+#include "wayland-client.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @page page_xdg_shell The xdg_shell protocol
+ * @section page_ifaces_xdg_shell Interfaces
+ * - @subpage page_iface_xdg_wm_base - create desktop-style surfaces
+ * - @subpage page_iface_xdg_positioner - child surface positioner
+ * - @subpage page_iface_xdg_surface - desktop user interface surface base interface
+ * - @subpage page_iface_xdg_toplevel - toplevel surface
+ * - @subpage page_iface_xdg_popup - short-lived, popup surfaces for menus
+ * @section page_copyright_xdg_shell Copyright
+ *
+ *
+ * Copyright © 2008-2013 Kristian Høgsberg
+ * Copyright © 2013 Rafael Antognolli
+ * Copyright © 2013 Jasper St. Pierre
+ * Copyright © 2010-2013 Intel Corporation
+ * Copyright © 2015-2017 Samsung Electronics Co., Ltd
+ * Copyright © 2015-2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+struct wl_output;
+struct wl_seat;
+struct wl_surface;
+struct xdg_popup;
+struct xdg_positioner;
+struct xdg_surface;
+struct xdg_toplevel;
+struct xdg_wm_base;
+
+#ifndef XDG_WM_BASE_INTERFACE
+#define XDG_WM_BASE_INTERFACE
+/**
+ * @page page_iface_xdg_wm_base xdg_wm_base
+ * @section page_iface_xdg_wm_base_desc Description
+ *
+ * The xdg_wm_base interface is exposed as a global object enabling clients
+ * to turn their wl_surfaces into windows in a desktop environment. It
+ * defines the basic functionality needed for clients and the compositor to
+ * create windows that can be dragged, resized, maximized, etc, as well as
+ * creating transient windows such as popup menus.
+ * @section page_iface_xdg_wm_base_api API
+ * See @ref iface_xdg_wm_base.
+ */
+/**
+ * @defgroup iface_xdg_wm_base The xdg_wm_base interface
+ *
+ * The xdg_wm_base interface is exposed as a global object enabling clients
+ * to turn their wl_surfaces into windows in a desktop environment. It
+ * defines the basic functionality needed for clients and the compositor to
+ * create windows that can be dragged, resized, maximized, etc, as well as
+ * creating transient windows such as popup menus.
+ */
+extern const struct wl_interface xdg_wm_base_interface;
+#endif
+#ifndef XDG_POSITIONER_INTERFACE
+#define XDG_POSITIONER_INTERFACE
+/**
+ * @page page_iface_xdg_positioner xdg_positioner
+ * @section page_iface_xdg_positioner_desc Description
+ *
+ * The xdg_positioner provides a collection of rules for the placement of a
+ * child surface relative to a parent surface. Rules can be defined to ensure
+ * the child surface remains within the visible area's borders, and to
+ * specify how the child surface changes its position, such as sliding along
+ * an axis, or flipping around a rectangle. These positioner-created rules are
+ * constrained by the requirement that a child surface must intersect with or
+ * be at least partially adjacent to its parent surface.
+ *
+ * See the various requests for details about possible rules.
+ *
+ * At the time of the request, the compositor makes a copy of the rules
+ * specified by the xdg_positioner. Thus, after the request is complete the
+ * xdg_positioner object can be destroyed or reused; further changes to the
+ * object will have no effect on previous usages.
+ *
+ * For an xdg_positioner object to be considered complete, it must have a
+ * non-zero size set by set_size, and a non-zero anchor rectangle set by
+ * set_anchor_rect. Passing an incomplete xdg_positioner object when
+ * positioning a surface raises an error.
+ * @section page_iface_xdg_positioner_api API
+ * See @ref iface_xdg_positioner.
+ */
+/**
+ * @defgroup iface_xdg_positioner The xdg_positioner interface
+ *
+ * The xdg_positioner provides a collection of rules for the placement of a
+ * child surface relative to a parent surface. Rules can be defined to ensure
+ * the child surface remains within the visible area's borders, and to
+ * specify how the child surface changes its position, such as sliding along
+ * an axis, or flipping around a rectangle. These positioner-created rules are
+ * constrained by the requirement that a child surface must intersect with or
+ * be at least partially adjacent to its parent surface.
+ *
+ * See the various requests for details about possible rules.
+ *
+ * At the time of the request, the compositor makes a copy of the rules
+ * specified by the xdg_positioner. Thus, after the request is complete the
+ * xdg_positioner object can be destroyed or reused; further changes to the
+ * object will have no effect on previous usages.
+ *
+ * For an xdg_positioner object to be considered complete, it must have a
+ * non-zero size set by set_size, and a non-zero anchor rectangle set by
+ * set_anchor_rect. Passing an incomplete xdg_positioner object when
+ * positioning a surface raises an error.
+ */
+extern const struct wl_interface xdg_positioner_interface;
+#endif
+#ifndef XDG_SURFACE_INTERFACE
+#define XDG_SURFACE_INTERFACE
+/**
+ * @page page_iface_xdg_surface xdg_surface
+ * @section page_iface_xdg_surface_desc Description
+ *
+ * An interface that may be implemented by a wl_surface, for
+ * implementations that provide a desktop-style user interface.
+ *
+ * It provides a base set of functionality required to construct user
+ * interface elements requiring management by the compositor, such as
+ * toplevel windows, menus, etc. The types of functionality are split into
+ * xdg_surface roles.
+ *
+ * Creating an xdg_surface does not set the role for a wl_surface. In order
+ * to map an xdg_surface, the client must create a role-specific object
+ * using, e.g., get_toplevel, get_popup. The wl_surface for any given
+ * xdg_surface can have at most one role, and may not be assigned any role
+ * not based on xdg_surface.
+ *
+ * A role must be assigned before any other requests are made to the
+ * xdg_surface object.
+ *
+ * The client must call wl_surface.commit on the corresponding wl_surface
+ * for the xdg_surface state to take effect.
+ *
+ * Creating an xdg_surface from a wl_surface which has a buffer attached or
+ * committed is a client error, and any attempts by a client to attach or
+ * manipulate a buffer prior to the first xdg_surface.configure call must
+ * also be treated as errors.
+ *
+ * After creating a role-specific object and setting it up, the client must
+ * perform an initial commit without any buffer attached. The compositor
+ * will reply with an xdg_surface.configure event. The client must
+ * acknowledge it and is then allowed to attach a buffer to map the surface.
+ *
+ * Mapping an xdg_surface-based role surface is defined as making it
+ * possible for the surface to be shown by the compositor. Note that
+ * a mapped surface is not guaranteed to be visible once it is mapped.
+ *
+ * For an xdg_surface to be mapped by the compositor, the following
+ * conditions must be met:
+ * (1) the client has assigned an xdg_surface-based role to the surface
+ * (2) the client has set and committed the xdg_surface state and the
+ * role-dependent state to the surface
+ * (3) the client has committed a buffer to the surface
+ *
+ * A newly-unmapped surface is considered to have met condition (1) out
+ * of the 3 required conditions for mapping a surface if its role surface
+ * has not been destroyed.
+ * @section page_iface_xdg_surface_api API
+ * See @ref iface_xdg_surface.
+ */
+/**
+ * @defgroup iface_xdg_surface The xdg_surface interface
+ *
+ * An interface that may be implemented by a wl_surface, for
+ * implementations that provide a desktop-style user interface.
+ *
+ * It provides a base set of functionality required to construct user
+ * interface elements requiring management by the compositor, such as
+ * toplevel windows, menus, etc. The types of functionality are split into
+ * xdg_surface roles.
+ *
+ * Creating an xdg_surface does not set the role for a wl_surface. In order
+ * to map an xdg_surface, the client must create a role-specific object
+ * using, e.g., get_toplevel, get_popup. The wl_surface for any given
+ * xdg_surface can have at most one role, and may not be assigned any role
+ * not based on xdg_surface.
+ *
+ * A role must be assigned before any other requests are made to the
+ * xdg_surface object.
+ *
+ * The client must call wl_surface.commit on the corresponding wl_surface
+ * for the xdg_surface state to take effect.
+ *
+ * Creating an xdg_surface from a wl_surface which has a buffer attached or
+ * committed is a client error, and any attempts by a client to attach or
+ * manipulate a buffer prior to the first xdg_surface.configure call must
+ * also be treated as errors.
+ *
+ * After creating a role-specific object and setting it up, the client must
+ * perform an initial commit without any buffer attached. The compositor
+ * will reply with an xdg_surface.configure event. The client must
+ * acknowledge it and is then allowed to attach a buffer to map the surface.
+ *
+ * Mapping an xdg_surface-based role surface is defined as making it
+ * possible for the surface to be shown by the compositor. Note that
+ * a mapped surface is not guaranteed to be visible once it is mapped.
+ *
+ * For an xdg_surface to be mapped by the compositor, the following
+ * conditions must be met:
+ * (1) the client has assigned an xdg_surface-based role to the surface
+ * (2) the client has set and committed the xdg_surface state and the
+ * role-dependent state to the surface
+ * (3) the client has committed a buffer to the surface
+ *
+ * A newly-unmapped surface is considered to have met condition (1) out
+ * of the 3 required conditions for mapping a surface if its role surface
+ * has not been destroyed.
+ */
+extern const struct wl_interface xdg_surface_interface;
+#endif
+#ifndef XDG_TOPLEVEL_INTERFACE
+#define XDG_TOPLEVEL_INTERFACE
+/**
+ * @page page_iface_xdg_toplevel xdg_toplevel
+ * @section page_iface_xdg_toplevel_desc Description
+ *
+ * This interface defines an xdg_surface role which allows a surface to,
+ * among other things, set window-like properties such as maximize,
+ * fullscreen, and minimize, set application-specific metadata like title and
+ * id, and well as trigger user interactive operations such as interactive
+ * resize and move.
+ *
+ * Unmapping an xdg_toplevel means that the surface cannot be shown
+ * by the compositor until it is explicitly mapped again.
+ * All active operations (e.g., move, resize) are canceled and all
+ * attributes (e.g. title, state, stacking, ...) are discarded for
+ * an xdg_toplevel surface when it is unmapped. The xdg_toplevel returns to
+ * the state it had right after xdg_surface.get_toplevel. The client
+ * can re-map the toplevel by perfoming a commit without any buffer
+ * attached, waiting for a configure event and handling it as usual (see
+ * xdg_surface description).
+ *
+ * Attaching a null buffer to a toplevel unmaps the surface.
+ * @section page_iface_xdg_toplevel_api API
+ * See @ref iface_xdg_toplevel.
+ */
+/**
+ * @defgroup iface_xdg_toplevel The xdg_toplevel interface
+ *
+ * This interface defines an xdg_surface role which allows a surface to,
+ * among other things, set window-like properties such as maximize,
+ * fullscreen, and minimize, set application-specific metadata like title and
+ * id, and well as trigger user interactive operations such as interactive
+ * resize and move.
+ *
+ * Unmapping an xdg_toplevel means that the surface cannot be shown
+ * by the compositor until it is explicitly mapped again.
+ * All active operations (e.g., move, resize) are canceled and all
+ * attributes (e.g. title, state, stacking, ...) are discarded for
+ * an xdg_toplevel surface when it is unmapped. The xdg_toplevel returns to
+ * the state it had right after xdg_surface.get_toplevel. The client
+ * can re-map the toplevel by perfoming a commit without any buffer
+ * attached, waiting for a configure event and handling it as usual (see
+ * xdg_surface description).
+ *
+ * Attaching a null buffer to a toplevel unmaps the surface.
+ */
+extern const struct wl_interface xdg_toplevel_interface;
+#endif
+#ifndef XDG_POPUP_INTERFACE
+#define XDG_POPUP_INTERFACE
+/**
+ * @page page_iface_xdg_popup xdg_popup
+ * @section page_iface_xdg_popup_desc Description
+ *
+ * A popup surface is a short-lived, temporary surface. It can be used to
+ * implement for example menus, popovers, tooltips and other similar user
+ * interface concepts.
+ *
+ * A popup can be made to take an explicit grab. See xdg_popup.grab for
+ * details.
+ *
+ * When the popup is dismissed, a popup_done event will be sent out, and at
+ * the same time the surface will be unmapped. See the xdg_popup.popup_done
+ * event for details.
+ *
+ * Explicitly destroying the xdg_popup object will also dismiss the popup and
+ * unmap the surface. Clients that want to dismiss the popup when another
+ * surface of their own is clicked should dismiss the popup using the destroy
+ * request.
+ *
+ * A newly created xdg_popup will be stacked on top of all previously created
+ * xdg_popup surfaces associated with the same xdg_toplevel.
+ *
+ * The parent of an xdg_popup must be mapped (see the xdg_surface
+ * description) before the xdg_popup itself.
+ *
+ * The client must call wl_surface.commit on the corresponding wl_surface
+ * for the xdg_popup state to take effect.
+ * @section page_iface_xdg_popup_api API
+ * See @ref iface_xdg_popup.
+ */
+/**
+ * @defgroup iface_xdg_popup The xdg_popup interface
+ *
+ * A popup surface is a short-lived, temporary surface. It can be used to
+ * implement for example menus, popovers, tooltips and other similar user
+ * interface concepts.
+ *
+ * A popup can be made to take an explicit grab. See xdg_popup.grab for
+ * details.
+ *
+ * When the popup is dismissed, a popup_done event will be sent out, and at
+ * the same time the surface will be unmapped. See the xdg_popup.popup_done
+ * event for details.
+ *
+ * Explicitly destroying the xdg_popup object will also dismiss the popup and
+ * unmap the surface. Clients that want to dismiss the popup when another
+ * surface of their own is clicked should dismiss the popup using the destroy
+ * request.
+ *
+ * A newly created xdg_popup will be stacked on top of all previously created
+ * xdg_popup surfaces associated with the same xdg_toplevel.
+ *
+ * The parent of an xdg_popup must be mapped (see the xdg_surface
+ * description) before the xdg_popup itself.
+ *
+ * The client must call wl_surface.commit on the corresponding wl_surface
+ * for the xdg_popup state to take effect.
+ */
+extern const struct wl_interface xdg_popup_interface;
+#endif
+
+#ifndef XDG_WM_BASE_ERROR_ENUM
+#define XDG_WM_BASE_ERROR_ENUM
+enum xdg_wm_base_error {
+ /**
+ * given wl_surface has another role
+ */
+ XDG_WM_BASE_ERROR_ROLE = 0,
+ /**
+ * xdg_wm_base was destroyed before children
+ */
+ XDG_WM_BASE_ERROR_DEFUNCT_SURFACES = 1,
+ /**
+ * the client tried to map or destroy a non-topmost popup
+ */
+ XDG_WM_BASE_ERROR_NOT_THE_TOPMOST_POPUP = 2,
+ /**
+ * the client specified an invalid popup parent surface
+ */
+ XDG_WM_BASE_ERROR_INVALID_POPUP_PARENT = 3,
+ /**
+ * the client provided an invalid surface state
+ */
+ XDG_WM_BASE_ERROR_INVALID_SURFACE_STATE = 4,
+ /**
+ * the client provided an invalid positioner
+ */
+ XDG_WM_BASE_ERROR_INVALID_POSITIONER = 5,
+};
+#endif /* XDG_WM_BASE_ERROR_ENUM */
+
+/**
+ * @ingroup iface_xdg_wm_base
+ * @struct xdg_wm_base_listener
+ */
+struct xdg_wm_base_listener {
+ /**
+ * check if the client is alive
+ *
+ * The ping event asks the client if it's still alive. Pass the
+ * serial specified in the event back to the compositor by sending
+ * a "pong" request back with the specified serial. See
+ * xdg_wm_base.pong.
+ *
+ * Compositors can use this to determine if the client is still
+ * alive. It's unspecified what will happen if the client doesn't
+ * respond to the ping request, or in what timeframe. Clients
+ * should try to respond in a reasonable amount of time.
+ *
+ * A compositor is free to ping in any way it wants, but a client
+ * must always respond to any xdg_wm_base object it created.
+ * @param serial pass this to the pong request
+ */
+ void (*ping)(void *data,
+ struct xdg_wm_base *xdg_wm_base,
+ uint32_t serial);
+};
+
+/**
+ * @ingroup iface_xdg_wm_base
+ */
+static inline int
+xdg_wm_base_add_listener(struct xdg_wm_base *xdg_wm_base,
+ const struct xdg_wm_base_listener *listener, void *data)
+{
+ return wl_proxy_add_listener((struct wl_proxy *) xdg_wm_base,
+ (void (**)(void)) listener, data);
+}
+
+#define XDG_WM_BASE_DESTROY 0
+#define XDG_WM_BASE_CREATE_POSITIONER 1
+#define XDG_WM_BASE_GET_XDG_SURFACE 2
+#define XDG_WM_BASE_PONG 3
+
+/**
+ * @ingroup iface_xdg_wm_base
+ */
+#define XDG_WM_BASE_PING_SINCE_VERSION 1
+
+/**
+ * @ingroup iface_xdg_wm_base
+ */
+#define XDG_WM_BASE_DESTROY_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_wm_base
+ */
+#define XDG_WM_BASE_CREATE_POSITIONER_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_wm_base
+ */
+#define XDG_WM_BASE_GET_XDG_SURFACE_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_wm_base
+ */
+#define XDG_WM_BASE_PONG_SINCE_VERSION 1
+
+/** @ingroup iface_xdg_wm_base */
+static inline void
+xdg_wm_base_set_user_data(struct xdg_wm_base *xdg_wm_base, void *user_data)
+{
+ wl_proxy_set_user_data((struct wl_proxy *) xdg_wm_base, user_data);
+}
+
+/** @ingroup iface_xdg_wm_base */
+static inline void *
+xdg_wm_base_get_user_data(struct xdg_wm_base *xdg_wm_base)
+{
+ return wl_proxy_get_user_data((struct wl_proxy *) xdg_wm_base);
+}
+
+static inline uint32_t
+xdg_wm_base_get_version(struct xdg_wm_base *xdg_wm_base)
+{
+ return wl_proxy_get_version((struct wl_proxy *) xdg_wm_base);
+}
+
+/**
+ * @ingroup iface_xdg_wm_base
+ *
+ * Destroy this xdg_wm_base object.
+ *
+ * Destroying a bound xdg_wm_base object while there are surfaces
+ * still alive created by this xdg_wm_base object instance is illegal
+ * and will result in a protocol error.
+ */
+static inline void
+xdg_wm_base_destroy(struct xdg_wm_base *xdg_wm_base)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_wm_base,
+ XDG_WM_BASE_DESTROY);
+
+ wl_proxy_destroy((struct wl_proxy *) xdg_wm_base);
+}
+
+/**
+ * @ingroup iface_xdg_wm_base
+ *
+ * Create a positioner object. A positioner object is used to position
+ * surfaces relative to some parent surface. See the interface description
+ * and xdg_surface.get_popup for details.
+ */
+static inline struct xdg_positioner *
+xdg_wm_base_create_positioner(struct xdg_wm_base *xdg_wm_base)
+{
+ struct wl_proxy *id;
+
+ id = wl_proxy_marshal_constructor((struct wl_proxy *) xdg_wm_base,
+ XDG_WM_BASE_CREATE_POSITIONER, &xdg_positioner_interface, NULL);
+
+ return (struct xdg_positioner *) id;
+}
+
+/**
+ * @ingroup iface_xdg_wm_base
+ *
+ * This creates an xdg_surface for the given surface. While xdg_surface
+ * itself is not a role, the corresponding surface may only be assigned
+ * a role extending xdg_surface, such as xdg_toplevel or xdg_popup.
+ *
+ * This creates an xdg_surface for the given surface. An xdg_surface is
+ * used as basis to define a role to a given surface, such as xdg_toplevel
+ * or xdg_popup. It also manages functionality shared between xdg_surface
+ * based surface roles.
+ *
+ * See the documentation of xdg_surface for more details about what an
+ * xdg_surface is and how it is used.
+ */
+static inline struct xdg_surface *
+xdg_wm_base_get_xdg_surface(struct xdg_wm_base *xdg_wm_base, struct wl_surface *surface)
+{
+ struct wl_proxy *id;
+
+ id = wl_proxy_marshal_constructor((struct wl_proxy *) xdg_wm_base,
+ XDG_WM_BASE_GET_XDG_SURFACE, &xdg_surface_interface, NULL, surface);
+
+ return (struct xdg_surface *) id;
+}
+
+/**
+ * @ingroup iface_xdg_wm_base
+ *
+ * A client must respond to a ping event with a pong request or
+ * the client may be deemed unresponsive. See xdg_wm_base.ping.
+ */
+static inline void
+xdg_wm_base_pong(struct xdg_wm_base *xdg_wm_base, uint32_t serial)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_wm_base,
+ XDG_WM_BASE_PONG, serial);
+}
+
+#ifndef XDG_POSITIONER_ERROR_ENUM
+#define XDG_POSITIONER_ERROR_ENUM
+enum xdg_positioner_error {
+ /**
+ * invalid input provided
+ */
+ XDG_POSITIONER_ERROR_INVALID_INPUT = 0,
+};
+#endif /* XDG_POSITIONER_ERROR_ENUM */
+
+#ifndef XDG_POSITIONER_ANCHOR_ENUM
+#define XDG_POSITIONER_ANCHOR_ENUM
+enum xdg_positioner_anchor {
+ XDG_POSITIONER_ANCHOR_NONE = 0,
+ XDG_POSITIONER_ANCHOR_TOP = 1,
+ XDG_POSITIONER_ANCHOR_BOTTOM = 2,
+ XDG_POSITIONER_ANCHOR_LEFT = 3,
+ XDG_POSITIONER_ANCHOR_RIGHT = 4,
+ XDG_POSITIONER_ANCHOR_TOP_LEFT = 5,
+ XDG_POSITIONER_ANCHOR_BOTTOM_LEFT = 6,
+ XDG_POSITIONER_ANCHOR_TOP_RIGHT = 7,
+ XDG_POSITIONER_ANCHOR_BOTTOM_RIGHT = 8,
+};
+#endif /* XDG_POSITIONER_ANCHOR_ENUM */
+
+#ifndef XDG_POSITIONER_GRAVITY_ENUM
+#define XDG_POSITIONER_GRAVITY_ENUM
+enum xdg_positioner_gravity {
+ XDG_POSITIONER_GRAVITY_NONE = 0,
+ XDG_POSITIONER_GRAVITY_TOP = 1,
+ XDG_POSITIONER_GRAVITY_BOTTOM = 2,
+ XDG_POSITIONER_GRAVITY_LEFT = 3,
+ XDG_POSITIONER_GRAVITY_RIGHT = 4,
+ XDG_POSITIONER_GRAVITY_TOP_LEFT = 5,
+ XDG_POSITIONER_GRAVITY_BOTTOM_LEFT = 6,
+ XDG_POSITIONER_GRAVITY_TOP_RIGHT = 7,
+ XDG_POSITIONER_GRAVITY_BOTTOM_RIGHT = 8,
+};
+#endif /* XDG_POSITIONER_GRAVITY_ENUM */
+
+#ifndef XDG_POSITIONER_CONSTRAINT_ADJUSTMENT_ENUM
+#define XDG_POSITIONER_CONSTRAINT_ADJUSTMENT_ENUM
+/**
+ * @ingroup iface_xdg_positioner
+ * vertically resize the surface
+ *
+ * Resize the surface vertically so that it is completely unconstrained.
+ */
+enum xdg_positioner_constraint_adjustment {
+ XDG_POSITIONER_CONSTRAINT_ADJUSTMENT_NONE = 0,
+ XDG_POSITIONER_CONSTRAINT_ADJUSTMENT_SLIDE_X = 1,
+ XDG_POSITIONER_CONSTRAINT_ADJUSTMENT_SLIDE_Y = 2,
+ XDG_POSITIONER_CONSTRAINT_ADJUSTMENT_FLIP_X = 4,
+ XDG_POSITIONER_CONSTRAINT_ADJUSTMENT_FLIP_Y = 8,
+ XDG_POSITIONER_CONSTRAINT_ADJUSTMENT_RESIZE_X = 16,
+ XDG_POSITIONER_CONSTRAINT_ADJUSTMENT_RESIZE_Y = 32,
+};
+#endif /* XDG_POSITIONER_CONSTRAINT_ADJUSTMENT_ENUM */
+
+#define XDG_POSITIONER_DESTROY 0
+#define XDG_POSITIONER_SET_SIZE 1
+#define XDG_POSITIONER_SET_ANCHOR_RECT 2
+#define XDG_POSITIONER_SET_ANCHOR 3
+#define XDG_POSITIONER_SET_GRAVITY 4
+#define XDG_POSITIONER_SET_CONSTRAINT_ADJUSTMENT 5
+#define XDG_POSITIONER_SET_OFFSET 6
+#define XDG_POSITIONER_SET_REACTIVE 7
+#define XDG_POSITIONER_SET_PARENT_SIZE 8
+#define XDG_POSITIONER_SET_PARENT_CONFIGURE 9
+
+
+/**
+ * @ingroup iface_xdg_positioner
+ */
+#define XDG_POSITIONER_DESTROY_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_positioner
+ */
+#define XDG_POSITIONER_SET_SIZE_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_positioner
+ */
+#define XDG_POSITIONER_SET_ANCHOR_RECT_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_positioner
+ */
+#define XDG_POSITIONER_SET_ANCHOR_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_positioner
+ */
+#define XDG_POSITIONER_SET_GRAVITY_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_positioner
+ */
+#define XDG_POSITIONER_SET_CONSTRAINT_ADJUSTMENT_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_positioner
+ */
+#define XDG_POSITIONER_SET_OFFSET_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_positioner
+ */
+#define XDG_POSITIONER_SET_REACTIVE_SINCE_VERSION 3
+/**
+ * @ingroup iface_xdg_positioner
+ */
+#define XDG_POSITIONER_SET_PARENT_SIZE_SINCE_VERSION 3
+/**
+ * @ingroup iface_xdg_positioner
+ */
+#define XDG_POSITIONER_SET_PARENT_CONFIGURE_SINCE_VERSION 3
+
+/** @ingroup iface_xdg_positioner */
+static inline void
+xdg_positioner_set_user_data(struct xdg_positioner *xdg_positioner, void *user_data)
+{
+ wl_proxy_set_user_data((struct wl_proxy *) xdg_positioner, user_data);
+}
+
+/** @ingroup iface_xdg_positioner */
+static inline void *
+xdg_positioner_get_user_data(struct xdg_positioner *xdg_positioner)
+{
+ return wl_proxy_get_user_data((struct wl_proxy *) xdg_positioner);
+}
+
+static inline uint32_t
+xdg_positioner_get_version(struct xdg_positioner *xdg_positioner)
+{
+ return wl_proxy_get_version((struct wl_proxy *) xdg_positioner);
+}
+
+/**
+ * @ingroup iface_xdg_positioner
+ *
+ * Notify the compositor that the xdg_positioner will no longer be used.
+ */
+static inline void
+xdg_positioner_destroy(struct xdg_positioner *xdg_positioner)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_positioner,
+ XDG_POSITIONER_DESTROY);
+
+ wl_proxy_destroy((struct wl_proxy *) xdg_positioner);
+}
+
+/**
+ * @ingroup iface_xdg_positioner
+ *
+ * Set the size of the surface that is to be positioned with the positioner
+ * object. The size is in surface-local coordinates and corresponds to the
+ * window geometry. See xdg_surface.set_window_geometry.
+ *
+ * If a zero or negative size is set the invalid_input error is raised.
+ */
+static inline void
+xdg_positioner_set_size(struct xdg_positioner *xdg_positioner, int32_t width, int32_t height)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_positioner,
+ XDG_POSITIONER_SET_SIZE, width, height);
+}
+
+/**
+ * @ingroup iface_xdg_positioner
+ *
+ * Specify the anchor rectangle within the parent surface that the child
+ * surface will be placed relative to. The rectangle is relative to the
+ * window geometry as defined by xdg_surface.set_window_geometry of the
+ * parent surface.
+ *
+ * When the xdg_positioner object is used to position a child surface, the
+ * anchor rectangle may not extend outside the window geometry of the
+ * positioned child's parent surface.
+ *
+ * If a negative size is set the invalid_input error is raised.
+ */
+static inline void
+xdg_positioner_set_anchor_rect(struct xdg_positioner *xdg_positioner, int32_t x, int32_t y, int32_t width, int32_t height)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_positioner,
+ XDG_POSITIONER_SET_ANCHOR_RECT, x, y, width, height);
+}
+
+/**
+ * @ingroup iface_xdg_positioner
+ *
+ * Defines the anchor point for the anchor rectangle. The specified anchor
+ * is used derive an anchor point that the child surface will be
+ * positioned relative to. If a corner anchor is set (e.g. 'top_left' or
+ * 'bottom_right'), the anchor point will be at the specified corner;
+ * otherwise, the derived anchor point will be centered on the specified
+ * edge, or in the center of the anchor rectangle if no edge is specified.
+ */
+static inline void
+xdg_positioner_set_anchor(struct xdg_positioner *xdg_positioner, uint32_t anchor)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_positioner,
+ XDG_POSITIONER_SET_ANCHOR, anchor);
+}
+
+/**
+ * @ingroup iface_xdg_positioner
+ *
+ * Defines in what direction a surface should be positioned, relative to
+ * the anchor point of the parent surface. If a corner gravity is
+ * specified (e.g. 'bottom_right' or 'top_left'), then the child surface
+ * will be placed towards the specified gravity; otherwise, the child
+ * surface will be centered over the anchor point on any axis that had no
+ * gravity specified.
+ */
+static inline void
+xdg_positioner_set_gravity(struct xdg_positioner *xdg_positioner, uint32_t gravity)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_positioner,
+ XDG_POSITIONER_SET_GRAVITY, gravity);
+}
+
+/**
+ * @ingroup iface_xdg_positioner
+ *
+ * Specify how the window should be positioned if the originally intended
+ * position caused the surface to be constrained, meaning at least
+ * partially outside positioning boundaries set by the compositor. The
+ * adjustment is set by constructing a bitmask describing the adjustment to
+ * be made when the surface is constrained on that axis.
+ *
+ * If no bit for one axis is set, the compositor will assume that the child
+ * surface should not change its position on that axis when constrained.
+ *
+ * If more than one bit for one axis is set, the order of how adjustments
+ * are applied is specified in the corresponding adjustment descriptions.
+ *
+ * The default adjustment is none.
+ */
+static inline void
+xdg_positioner_set_constraint_adjustment(struct xdg_positioner *xdg_positioner, uint32_t constraint_adjustment)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_positioner,
+ XDG_POSITIONER_SET_CONSTRAINT_ADJUSTMENT, constraint_adjustment);
+}
+
+/**
+ * @ingroup iface_xdg_positioner
+ *
+ * Specify the surface position offset relative to the position of the
+ * anchor on the anchor rectangle and the anchor on the surface. For
+ * example if the anchor of the anchor rectangle is at (x, y), the surface
+ * has the gravity bottom|right, and the offset is (ox, oy), the calculated
+ * surface position will be (x + ox, y + oy). The offset position of the
+ * surface is the one used for constraint testing. See
+ * set_constraint_adjustment.
+ *
+ * An example use case is placing a popup menu on top of a user interface
+ * element, while aligning the user interface element of the parent surface
+ * with some user interface element placed somewhere in the popup surface.
+ */
+static inline void
+xdg_positioner_set_offset(struct xdg_positioner *xdg_positioner, int32_t x, int32_t y)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_positioner,
+ XDG_POSITIONER_SET_OFFSET, x, y);
+}
+
+/**
+ * @ingroup iface_xdg_positioner
+ *
+ * When set reactive, the surface is reconstrained if the conditions used
+ * for constraining changed, e.g. the parent window moved.
+ *
+ * If the conditions changed and the popup was reconstrained, an
+ * xdg_popup.configure event is sent with updated geometry, followed by an
+ * xdg_surface.configure event.
+ */
+static inline void
+xdg_positioner_set_reactive(struct xdg_positioner *xdg_positioner)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_positioner,
+ XDG_POSITIONER_SET_REACTIVE);
+}
+
+/**
+ * @ingroup iface_xdg_positioner
+ *
+ * Set the parent window geometry the compositor should use when
+ * positioning the popup. The compositor may use this information to
+ * determine the future state the popup should be constrained using. If
+ * this doesn't match the dimension of the parent the popup is eventually
+ * positioned against, the behavior is undefined.
+ *
+ * The arguments are given in the surface-local coordinate space.
+ */
+static inline void
+xdg_positioner_set_parent_size(struct xdg_positioner *xdg_positioner, int32_t parent_width, int32_t parent_height)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_positioner,
+ XDG_POSITIONER_SET_PARENT_SIZE, parent_width, parent_height);
+}
+
+/**
+ * @ingroup iface_xdg_positioner
+ *
+ * Set the serial of an xdg_surface.configure event this positioner will be
+ * used in response to. The compositor may use this information together
+ * with set_parent_size to determine what future state the popup should be
+ * constrained using.
+ */
+static inline void
+xdg_positioner_set_parent_configure(struct xdg_positioner *xdg_positioner, uint32_t serial)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_positioner,
+ XDG_POSITIONER_SET_PARENT_CONFIGURE, serial);
+}
+
+#ifndef XDG_SURFACE_ERROR_ENUM
+#define XDG_SURFACE_ERROR_ENUM
+enum xdg_surface_error {
+ XDG_SURFACE_ERROR_NOT_CONSTRUCTED = 1,
+ XDG_SURFACE_ERROR_ALREADY_CONSTRUCTED = 2,
+ XDG_SURFACE_ERROR_UNCONFIGURED_BUFFER = 3,
+};
+#endif /* XDG_SURFACE_ERROR_ENUM */
+
+/**
+ * @ingroup iface_xdg_surface
+ * @struct xdg_surface_listener
+ */
+struct xdg_surface_listener {
+ /**
+ * suggest a surface change
+ *
+ * The configure event marks the end of a configure sequence. A
+ * configure sequence is a set of one or more events configuring
+ * the state of the xdg_surface, including the final
+ * xdg_surface.configure event.
+ *
+ * Where applicable, xdg_surface surface roles will during a
+ * configure sequence extend this event as a latched state sent as
+ * events before the xdg_surface.configure event. Such events
+ * should be considered to make up a set of atomically applied
+ * configuration states, where the xdg_surface.configure commits
+ * the accumulated state.
+ *
+ * Clients should arrange their surface for the new states, and
+ * then send an ack_configure request with the serial sent in this
+ * configure event at some point before committing the new surface.
+ *
+ * If the client receives multiple configure events before it can
+ * respond to one, it is free to discard all but the last event it
+ * received.
+ * @param serial serial of the configure event
+ */
+ void (*configure)(void *data,
+ struct xdg_surface *xdg_surface,
+ uint32_t serial);
+};
+
+/**
+ * @ingroup iface_xdg_surface
+ */
+static inline int
+xdg_surface_add_listener(struct xdg_surface *xdg_surface,
+ const struct xdg_surface_listener *listener, void *data)
+{
+ return wl_proxy_add_listener((struct wl_proxy *) xdg_surface,
+ (void (**)(void)) listener, data);
+}
+
+#define XDG_SURFACE_DESTROY 0
+#define XDG_SURFACE_GET_TOPLEVEL 1
+#define XDG_SURFACE_GET_POPUP 2
+#define XDG_SURFACE_SET_WINDOW_GEOMETRY 3
+#define XDG_SURFACE_ACK_CONFIGURE 4
+
+/**
+ * @ingroup iface_xdg_surface
+ */
+#define XDG_SURFACE_CONFIGURE_SINCE_VERSION 1
+
+/**
+ * @ingroup iface_xdg_surface
+ */
+#define XDG_SURFACE_DESTROY_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_surface
+ */
+#define XDG_SURFACE_GET_TOPLEVEL_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_surface
+ */
+#define XDG_SURFACE_GET_POPUP_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_surface
+ */
+#define XDG_SURFACE_SET_WINDOW_GEOMETRY_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_surface
+ */
+#define XDG_SURFACE_ACK_CONFIGURE_SINCE_VERSION 1
+
+/** @ingroup iface_xdg_surface */
+static inline void
+xdg_surface_set_user_data(struct xdg_surface *xdg_surface, void *user_data)
+{
+ wl_proxy_set_user_data((struct wl_proxy *) xdg_surface, user_data);
+}
+
+/** @ingroup iface_xdg_surface */
+static inline void *
+xdg_surface_get_user_data(struct xdg_surface *xdg_surface)
+{
+ return wl_proxy_get_user_data((struct wl_proxy *) xdg_surface);
+}
+
+static inline uint32_t
+xdg_surface_get_version(struct xdg_surface *xdg_surface)
+{
+ return wl_proxy_get_version((struct wl_proxy *) xdg_surface);
+}
+
+/**
+ * @ingroup iface_xdg_surface
+ *
+ * Destroy the xdg_surface object. An xdg_surface must only be destroyed
+ * after its role object has been destroyed.
+ */
+static inline void
+xdg_surface_destroy(struct xdg_surface *xdg_surface)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_surface,
+ XDG_SURFACE_DESTROY);
+
+ wl_proxy_destroy((struct wl_proxy *) xdg_surface);
+}
+
+/**
+ * @ingroup iface_xdg_surface
+ *
+ * This creates an xdg_toplevel object for the given xdg_surface and gives
+ * the associated wl_surface the xdg_toplevel role.
+ *
+ * See the documentation of xdg_toplevel for more details about what an
+ * xdg_toplevel is and how it is used.
+ */
+static inline struct xdg_toplevel *
+xdg_surface_get_toplevel(struct xdg_surface *xdg_surface)
+{
+ struct wl_proxy *id;
+
+ id = wl_proxy_marshal_constructor((struct wl_proxy *) xdg_surface,
+ XDG_SURFACE_GET_TOPLEVEL, &xdg_toplevel_interface, NULL);
+
+ return (struct xdg_toplevel *) id;
+}
+
+/**
+ * @ingroup iface_xdg_surface
+ *
+ * This creates an xdg_popup object for the given xdg_surface and gives
+ * the associated wl_surface the xdg_popup role.
+ *
+ * If null is passed as a parent, a parent surface must be specified using
+ * some other protocol, before committing the initial state.
+ *
+ * See the documentation of xdg_popup for more details about what an
+ * xdg_popup is and how it is used.
+ */
+static inline struct xdg_popup *
+xdg_surface_get_popup(struct xdg_surface *xdg_surface, struct xdg_surface *parent, struct xdg_positioner *positioner)
+{
+ struct wl_proxy *id;
+
+ id = wl_proxy_marshal_constructor((struct wl_proxy *) xdg_surface,
+ XDG_SURFACE_GET_POPUP, &xdg_popup_interface, NULL, parent, positioner);
+
+ return (struct xdg_popup *) id;
+}
+
+/**
+ * @ingroup iface_xdg_surface
+ *
+ * The window geometry of a surface is its "visible bounds" from the
+ * user's perspective. Client-side decorations often have invisible
+ * portions like drop-shadows which should be ignored for the
+ * purposes of aligning, placing and constraining windows.
+ *
+ * The window geometry is double buffered, and will be applied at the
+ * time wl_surface.commit of the corresponding wl_surface is called.
+ *
+ * When maintaining a position, the compositor should treat the (x, y)
+ * coordinate of the window geometry as the top left corner of the window.
+ * A client changing the (x, y) window geometry coordinate should in
+ * general not alter the position of the window.
+ *
+ * Once the window geometry of the surface is set, it is not possible to
+ * unset it, and it will remain the same until set_window_geometry is
+ * called again, even if a new subsurface or buffer is attached.
+ *
+ * If never set, the value is the full bounds of the surface,
+ * including any subsurfaces. This updates dynamically on every
+ * commit. This unset is meant for extremely simple clients.
+ *
+ * The arguments are given in the surface-local coordinate space of
+ * the wl_surface associated with this xdg_surface.
+ *
+ * The width and height must be greater than zero. Setting an invalid size
+ * will raise an error. When applied, the effective window geometry will be
+ * the set window geometry clamped to the bounding rectangle of the
+ * combined geometry of the surface of the xdg_surface and the associated
+ * subsurfaces.
+ */
+static inline void
+xdg_surface_set_window_geometry(struct xdg_surface *xdg_surface, int32_t x, int32_t y, int32_t width, int32_t height)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_surface,
+ XDG_SURFACE_SET_WINDOW_GEOMETRY, x, y, width, height);
+}
+
+/**
+ * @ingroup iface_xdg_surface
+ *
+ * When a configure event is received, if a client commits the
+ * surface in response to the configure event, then the client
+ * must make an ack_configure request sometime before the commit
+ * request, passing along the serial of the configure event.
+ *
+ * For instance, for toplevel surfaces the compositor might use this
+ * information to move a surface to the top left only when the client has
+ * drawn itself for the maximized or fullscreen state.
+ *
+ * If the client receives multiple configure events before it
+ * can respond to one, it only has to ack the last configure event.
+ *
+ * A client is not required to commit immediately after sending
+ * an ack_configure request - it may even ack_configure several times
+ * before its next surface commit.
+ *
+ * A client may send multiple ack_configure requests before committing, but
+ * only the last request sent before a commit indicates which configure
+ * event the client really is responding to.
+ */
+static inline void
+xdg_surface_ack_configure(struct xdg_surface *xdg_surface, uint32_t serial)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_surface,
+ XDG_SURFACE_ACK_CONFIGURE, serial);
+}
+
+#ifndef XDG_TOPLEVEL_RESIZE_EDGE_ENUM
+#define XDG_TOPLEVEL_RESIZE_EDGE_ENUM
+/**
+ * @ingroup iface_xdg_toplevel
+ * edge values for resizing
+ *
+ * These values are used to indicate which edge of a surface
+ * is being dragged in a resize operation.
+ */
+enum xdg_toplevel_resize_edge {
+ XDG_TOPLEVEL_RESIZE_EDGE_NONE = 0,
+ XDG_TOPLEVEL_RESIZE_EDGE_TOP = 1,
+ XDG_TOPLEVEL_RESIZE_EDGE_BOTTOM = 2,
+ XDG_TOPLEVEL_RESIZE_EDGE_LEFT = 4,
+ XDG_TOPLEVEL_RESIZE_EDGE_TOP_LEFT = 5,
+ XDG_TOPLEVEL_RESIZE_EDGE_BOTTOM_LEFT = 6,
+ XDG_TOPLEVEL_RESIZE_EDGE_RIGHT = 8,
+ XDG_TOPLEVEL_RESIZE_EDGE_TOP_RIGHT = 9,
+ XDG_TOPLEVEL_RESIZE_EDGE_BOTTOM_RIGHT = 10,
+};
+#endif /* XDG_TOPLEVEL_RESIZE_EDGE_ENUM */
+
+#ifndef XDG_TOPLEVEL_STATE_ENUM
+#define XDG_TOPLEVEL_STATE_ENUM
+/**
+ * @ingroup iface_xdg_toplevel
+ * the surface is tiled
+ *
+ * The window is currently in a tiled layout and the bottom edge is
+ * considered to be adjacent to another part of the tiling grid.
+ */
+enum xdg_toplevel_state {
+ /**
+ * the surface is maximized
+ */
+ XDG_TOPLEVEL_STATE_MAXIMIZED = 1,
+ /**
+ * the surface is fullscreen
+ */
+ XDG_TOPLEVEL_STATE_FULLSCREEN = 2,
+ /**
+ * the surface is being resized
+ */
+ XDG_TOPLEVEL_STATE_RESIZING = 3,
+ /**
+ * the surface is now activated
+ */
+ XDG_TOPLEVEL_STATE_ACTIVATED = 4,
+ /**
+ * @since 2
+ */
+ XDG_TOPLEVEL_STATE_TILED_LEFT = 5,
+ /**
+ * @since 2
+ */
+ XDG_TOPLEVEL_STATE_TILED_RIGHT = 6,
+ /**
+ * @since 2
+ */
+ XDG_TOPLEVEL_STATE_TILED_TOP = 7,
+ /**
+ * @since 2
+ */
+ XDG_TOPLEVEL_STATE_TILED_BOTTOM = 8,
+};
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_STATE_TILED_LEFT_SINCE_VERSION 2
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_STATE_TILED_RIGHT_SINCE_VERSION 2
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_STATE_TILED_TOP_SINCE_VERSION 2
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_STATE_TILED_BOTTOM_SINCE_VERSION 2
+#endif /* XDG_TOPLEVEL_STATE_ENUM */
+
+/**
+ * @ingroup iface_xdg_toplevel
+ * @struct xdg_toplevel_listener
+ */
+struct xdg_toplevel_listener {
+ /**
+ * suggest a surface change
+ *
+ * This configure event asks the client to resize its toplevel
+ * surface or to change its state. The configured state should not
+ * be applied immediately. See xdg_surface.configure for details.
+ *
+ * The width and height arguments specify a hint to the window
+ * about how its surface should be resized in window geometry
+ * coordinates. See set_window_geometry.
+ *
+ * If the width or height arguments are zero, it means the client
+ * should decide its own window dimension. This may happen when the
+ * compositor needs to configure the state of the surface but
+ * doesn't have any information about any previous or expected
+ * dimension.
+ *
+ * The states listed in the event specify how the width/height
+ * arguments should be interpreted, and possibly how it should be
+ * drawn.
+ *
+ * Clients must send an ack_configure in response to this event.
+ * See xdg_surface.configure and xdg_surface.ack_configure for
+ * details.
+ */
+ void (*configure)(void *data,
+ struct xdg_toplevel *xdg_toplevel,
+ int32_t width,
+ int32_t height,
+ struct wl_array *states);
+ /**
+ * surface wants to be closed
+ *
+ * The close event is sent by the compositor when the user wants
+ * the surface to be closed. This should be equivalent to the user
+ * clicking the close button in client-side decorations, if your
+ * application has any.
+ *
+ * This is only a request that the user intends to close the
+ * window. The client may choose to ignore this request, or show a
+ * dialog to ask the user to save their data, etc.
+ */
+ void (*close)(void *data,
+ struct xdg_toplevel *xdg_toplevel);
+};
+
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+static inline int
+xdg_toplevel_add_listener(struct xdg_toplevel *xdg_toplevel,
+ const struct xdg_toplevel_listener *listener, void *data)
+{
+ return wl_proxy_add_listener((struct wl_proxy *) xdg_toplevel,
+ (void (**)(void)) listener, data);
+}
+
+#define XDG_TOPLEVEL_DESTROY 0
+#define XDG_TOPLEVEL_SET_PARENT 1
+#define XDG_TOPLEVEL_SET_TITLE 2
+#define XDG_TOPLEVEL_SET_APP_ID 3
+#define XDG_TOPLEVEL_SHOW_WINDOW_MENU 4
+#define XDG_TOPLEVEL_MOVE 5
+#define XDG_TOPLEVEL_RESIZE 6
+#define XDG_TOPLEVEL_SET_MAX_SIZE 7
+#define XDG_TOPLEVEL_SET_MIN_SIZE 8
+#define XDG_TOPLEVEL_SET_MAXIMIZED 9
+#define XDG_TOPLEVEL_UNSET_MAXIMIZED 10
+#define XDG_TOPLEVEL_SET_FULLSCREEN 11
+#define XDG_TOPLEVEL_UNSET_FULLSCREEN 12
+#define XDG_TOPLEVEL_SET_MINIMIZED 13
+
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_CONFIGURE_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_CLOSE_SINCE_VERSION 1
+
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_DESTROY_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_SET_PARENT_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_SET_TITLE_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_SET_APP_ID_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_SHOW_WINDOW_MENU_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_MOVE_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_RESIZE_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_SET_MAX_SIZE_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_SET_MIN_SIZE_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_SET_MAXIMIZED_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_UNSET_MAXIMIZED_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_SET_FULLSCREEN_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_UNSET_FULLSCREEN_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_toplevel
+ */
+#define XDG_TOPLEVEL_SET_MINIMIZED_SINCE_VERSION 1
+
+/** @ingroup iface_xdg_toplevel */
+static inline void
+xdg_toplevel_set_user_data(struct xdg_toplevel *xdg_toplevel, void *user_data)
+{
+ wl_proxy_set_user_data((struct wl_proxy *) xdg_toplevel, user_data);
+}
+
+/** @ingroup iface_xdg_toplevel */
+static inline void *
+xdg_toplevel_get_user_data(struct xdg_toplevel *xdg_toplevel)
+{
+ return wl_proxy_get_user_data((struct wl_proxy *) xdg_toplevel);
+}
+
+static inline uint32_t
+xdg_toplevel_get_version(struct xdg_toplevel *xdg_toplevel)
+{
+ return wl_proxy_get_version((struct wl_proxy *) xdg_toplevel);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * This request destroys the role surface and unmaps the surface;
+ * see "Unmapping" behavior in interface section for details.
+ */
+static inline void
+xdg_toplevel_destroy(struct xdg_toplevel *xdg_toplevel)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_DESTROY);
+
+ wl_proxy_destroy((struct wl_proxy *) xdg_toplevel);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * Set the "parent" of this surface. This surface should be stacked
+ * above the parent surface and all other ancestor surfaces.
+ *
+ * Parent windows should be set on dialogs, toolboxes, or other
+ * "auxiliary" surfaces, so that the parent is raised when the dialog
+ * is raised.
+ *
+ * Setting a null parent for a child window removes any parent-child
+ * relationship for the child. Setting a null parent for a window which
+ * currently has no parent is a no-op.
+ *
+ * If the parent is unmapped then its children are managed as
+ * though the parent of the now-unmapped parent has become the
+ * parent of this surface. If no parent exists for the now-unmapped
+ * parent then the children are managed as though they have no
+ * parent surface.
+ */
+static inline void
+xdg_toplevel_set_parent(struct xdg_toplevel *xdg_toplevel, struct xdg_toplevel *parent)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_SET_PARENT, parent);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * Set a short title for the surface.
+ *
+ * This string may be used to identify the surface in a task bar,
+ * window list, or other user interface elements provided by the
+ * compositor.
+ *
+ * The string must be encoded in UTF-8.
+ */
+static inline void
+xdg_toplevel_set_title(struct xdg_toplevel *xdg_toplevel, const char *title)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_SET_TITLE, title);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * Set an application identifier for the surface.
+ *
+ * The app ID identifies the general class of applications to which
+ * the surface belongs. The compositor can use this to group multiple
+ * surfaces together, or to determine how to launch a new application.
+ *
+ * For D-Bus activatable applications, the app ID is used as the D-Bus
+ * service name.
+ *
+ * The compositor shell will try to group application surfaces together
+ * by their app ID. As a best practice, it is suggested to select app
+ * ID's that match the basename of the application's .desktop file.
+ * For example, "org.freedesktop.FooViewer" where the .desktop file is
+ * "org.freedesktop.FooViewer.desktop".
+ *
+ * Like other properties, a set_app_id request can be sent after the
+ * xdg_toplevel has been mapped to update the property.
+ *
+ * See the desktop-entry specification [0] for more details on
+ * application identifiers and how they relate to well-known D-Bus
+ * names and .desktop files.
+ *
+ * [0] http://standards.freedesktop.org/desktop-entry-spec/
+ */
+static inline void
+xdg_toplevel_set_app_id(struct xdg_toplevel *xdg_toplevel, const char *app_id)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_SET_APP_ID, app_id);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * Clients implementing client-side decorations might want to show
+ * a context menu when right-clicking on the decorations, giving the
+ * user a menu that they can use to maximize or minimize the window.
+ *
+ * This request asks the compositor to pop up such a window menu at
+ * the given position, relative to the local surface coordinates of
+ * the parent surface. There are no guarantees as to what menu items
+ * the window menu contains.
+ *
+ * This request must be used in response to some sort of user action
+ * like a button press, key press, or touch down event.
+ */
+static inline void
+xdg_toplevel_show_window_menu(struct xdg_toplevel *xdg_toplevel, struct wl_seat *seat, uint32_t serial, int32_t x, int32_t y)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_SHOW_WINDOW_MENU, seat, serial, x, y);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * Start an interactive, user-driven move of the surface.
+ *
+ * This request must be used in response to some sort of user action
+ * like a button press, key press, or touch down event. The passed
+ * serial is used to determine the type of interactive move (touch,
+ * pointer, etc).
+ *
+ * The server may ignore move requests depending on the state of
+ * the surface (e.g. fullscreen or maximized), or if the passed serial
+ * is no longer valid.
+ *
+ * If triggered, the surface will lose the focus of the device
+ * (wl_pointer, wl_touch, etc) used for the move. It is up to the
+ * compositor to visually indicate that the move is taking place, such as
+ * updating a pointer cursor, during the move. There is no guarantee
+ * that the device focus will return when the move is completed.
+ */
+static inline void
+xdg_toplevel_move(struct xdg_toplevel *xdg_toplevel, struct wl_seat *seat, uint32_t serial)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_MOVE, seat, serial);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * Start a user-driven, interactive resize of the surface.
+ *
+ * This request must be used in response to some sort of user action
+ * like a button press, key press, or touch down event. The passed
+ * serial is used to determine the type of interactive resize (touch,
+ * pointer, etc).
+ *
+ * The server may ignore resize requests depending on the state of
+ * the surface (e.g. fullscreen or maximized).
+ *
+ * If triggered, the client will receive configure events with the
+ * "resize" state enum value and the expected sizes. See the "resize"
+ * enum value for more details about what is required. The client
+ * must also acknowledge configure events using "ack_configure". After
+ * the resize is completed, the client will receive another "configure"
+ * event without the resize state.
+ *
+ * If triggered, the surface also will lose the focus of the device
+ * (wl_pointer, wl_touch, etc) used for the resize. It is up to the
+ * compositor to visually indicate that the resize is taking place,
+ * such as updating a pointer cursor, during the resize. There is no
+ * guarantee that the device focus will return when the resize is
+ * completed.
+ *
+ * The edges parameter specifies how the surface should be resized,
+ * and is one of the values of the resize_edge enum. The compositor
+ * may use this information to update the surface position for
+ * example when dragging the top left corner. The compositor may also
+ * use this information to adapt its behavior, e.g. choose an
+ * appropriate cursor image.
+ */
+static inline void
+xdg_toplevel_resize(struct xdg_toplevel *xdg_toplevel, struct wl_seat *seat, uint32_t serial, uint32_t edges)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_RESIZE, seat, serial, edges);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * Set a maximum size for the window.
+ *
+ * The client can specify a maximum size so that the compositor does
+ * not try to configure the window beyond this size.
+ *
+ * The width and height arguments are in window geometry coordinates.
+ * See xdg_surface.set_window_geometry.
+ *
+ * Values set in this way are double-buffered. They will get applied
+ * on the next commit.
+ *
+ * The compositor can use this information to allow or disallow
+ * different states like maximize or fullscreen and draw accurate
+ * animations.
+ *
+ * Similarly, a tiling window manager may use this information to
+ * place and resize client windows in a more effective way.
+ *
+ * The client should not rely on the compositor to obey the maximum
+ * size. The compositor may decide to ignore the values set by the
+ * client and request a larger size.
+ *
+ * If never set, or a value of zero in the request, means that the
+ * client has no expected maximum size in the given dimension.
+ * As a result, a client wishing to reset the maximum size
+ * to an unspecified state can use zero for width and height in the
+ * request.
+ *
+ * Requesting a maximum size to be smaller than the minimum size of
+ * a surface is illegal and will result in a protocol error.
+ *
+ * The width and height must be greater than or equal to zero. Using
+ * strictly negative values for width and height will result in a
+ * protocol error.
+ */
+static inline void
+xdg_toplevel_set_max_size(struct xdg_toplevel *xdg_toplevel, int32_t width, int32_t height)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_SET_MAX_SIZE, width, height);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * Set a minimum size for the window.
+ *
+ * The client can specify a minimum size so that the compositor does
+ * not try to configure the window below this size.
+ *
+ * The width and height arguments are in window geometry coordinates.
+ * See xdg_surface.set_window_geometry.
+ *
+ * Values set in this way are double-buffered. They will get applied
+ * on the next commit.
+ *
+ * The compositor can use this information to allow or disallow
+ * different states like maximize or fullscreen and draw accurate
+ * animations.
+ *
+ * Similarly, a tiling window manager may use this information to
+ * place and resize client windows in a more effective way.
+ *
+ * The client should not rely on the compositor to obey the minimum
+ * size. The compositor may decide to ignore the values set by the
+ * client and request a smaller size.
+ *
+ * If never set, or a value of zero in the request, means that the
+ * client has no expected minimum size in the given dimension.
+ * As a result, a client wishing to reset the minimum size
+ * to an unspecified state can use zero for width and height in the
+ * request.
+ *
+ * Requesting a minimum size to be larger than the maximum size of
+ * a surface is illegal and will result in a protocol error.
+ *
+ * The width and height must be greater than or equal to zero. Using
+ * strictly negative values for width and height will result in a
+ * protocol error.
+ */
+static inline void
+xdg_toplevel_set_min_size(struct xdg_toplevel *xdg_toplevel, int32_t width, int32_t height)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_SET_MIN_SIZE, width, height);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * Maximize the surface.
+ *
+ * After requesting that the surface should be maximized, the compositor
+ * will respond by emitting a configure event. Whether this configure
+ * actually sets the window maximized is subject to compositor policies.
+ * The client must then update its content, drawing in the configured
+ * state. The client must also acknowledge the configure when committing
+ * the new content (see ack_configure).
+ *
+ * It is up to the compositor to decide how and where to maximize the
+ * surface, for example which output and what region of the screen should
+ * be used.
+ *
+ * If the surface was already maximized, the compositor will still emit
+ * a configure event with the "maximized" state.
+ *
+ * If the surface is in a fullscreen state, this request has no direct
+ * effect. It may alter the state the surface is returned to when
+ * unmaximized unless overridden by the compositor.
+ */
+static inline void
+xdg_toplevel_set_maximized(struct xdg_toplevel *xdg_toplevel)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_SET_MAXIMIZED);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * Unmaximize the surface.
+ *
+ * After requesting that the surface should be unmaximized, the compositor
+ * will respond by emitting a configure event. Whether this actually
+ * un-maximizes the window is subject to compositor policies.
+ * If available and applicable, the compositor will include the window
+ * geometry dimensions the window had prior to being maximized in the
+ * configure event. The client must then update its content, drawing it in
+ * the configured state. The client must also acknowledge the configure
+ * when committing the new content (see ack_configure).
+ *
+ * It is up to the compositor to position the surface after it was
+ * unmaximized; usually the position the surface had before maximizing, if
+ * applicable.
+ *
+ * If the surface was already not maximized, the compositor will still
+ * emit a configure event without the "maximized" state.
+ *
+ * If the surface is in a fullscreen state, this request has no direct
+ * effect. It may alter the state the surface is returned to when
+ * unmaximized unless overridden by the compositor.
+ */
+static inline void
+xdg_toplevel_unset_maximized(struct xdg_toplevel *xdg_toplevel)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_UNSET_MAXIMIZED);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * Make the surface fullscreen.
+ *
+ * After requesting that the surface should be fullscreened, the
+ * compositor will respond by emitting a configure event. Whether the
+ * client is actually put into a fullscreen state is subject to compositor
+ * policies. The client must also acknowledge the configure when
+ * committing the new content (see ack_configure).
+ *
+ * The output passed by the request indicates the client's preference as
+ * to which display it should be set fullscreen on. If this value is NULL,
+ * it's up to the compositor to choose which display will be used to map
+ * this surface.
+ *
+ * If the surface doesn't cover the whole output, the compositor will
+ * position the surface in the center of the output and compensate with
+ * with border fill covering the rest of the output. The content of the
+ * border fill is undefined, but should be assumed to be in some way that
+ * attempts to blend into the surrounding area (e.g. solid black).
+ *
+ * If the fullscreened surface is not opaque, the compositor must make
+ * sure that other screen content not part of the same surface tree (made
+ * up of subsurfaces, popups or similarly coupled surfaces) are not
+ * visible below the fullscreened surface.
+ */
+static inline void
+xdg_toplevel_set_fullscreen(struct xdg_toplevel *xdg_toplevel, struct wl_output *output)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_SET_FULLSCREEN, output);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * Make the surface no longer fullscreen.
+ *
+ * After requesting that the surface should be unfullscreened, the
+ * compositor will respond by emitting a configure event.
+ * Whether this actually removes the fullscreen state of the client is
+ * subject to compositor policies.
+ *
+ * Making a surface unfullscreen sets states for the surface based on the following:
+ * * the state(s) it may have had before becoming fullscreen
+ * * any state(s) decided by the compositor
+ * * any state(s) requested by the client while the surface was fullscreen
+ *
+ * The compositor may include the previous window geometry dimensions in
+ * the configure event, if applicable.
+ *
+ * The client must also acknowledge the configure when committing the new
+ * content (see ack_configure).
+ */
+static inline void
+xdg_toplevel_unset_fullscreen(struct xdg_toplevel *xdg_toplevel)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_UNSET_FULLSCREEN);
+}
+
+/**
+ * @ingroup iface_xdg_toplevel
+ *
+ * Request that the compositor minimize your surface. There is no
+ * way to know if the surface is currently minimized, nor is there
+ * any way to unset minimization on this surface.
+ *
+ * If you are looking to throttle redrawing when minimized, please
+ * instead use the wl_surface.frame event for this, as this will
+ * also work with live previews on windows in Alt-Tab, Expose or
+ * similar compositor features.
+ */
+static inline void
+xdg_toplevel_set_minimized(struct xdg_toplevel *xdg_toplevel)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_toplevel,
+ XDG_TOPLEVEL_SET_MINIMIZED);
+}
+
+#ifndef XDG_POPUP_ERROR_ENUM
+#define XDG_POPUP_ERROR_ENUM
+enum xdg_popup_error {
+ /**
+ * tried to grab after being mapped
+ */
+ XDG_POPUP_ERROR_INVALID_GRAB = 0,
+};
+#endif /* XDG_POPUP_ERROR_ENUM */
+
+/**
+ * @ingroup iface_xdg_popup
+ * @struct xdg_popup_listener
+ */
+struct xdg_popup_listener {
+ /**
+ * configure the popup surface
+ *
+ * This event asks the popup surface to configure itself given
+ * the configuration. The configured state should not be applied
+ * immediately. See xdg_surface.configure for details.
+ *
+ * The x and y arguments represent the position the popup was
+ * placed at given the xdg_positioner rule, relative to the upper
+ * left corner of the window geometry of the parent surface.
+ *
+ * For version 2 or older, the configure event for an xdg_popup is
+ * only ever sent once for the initial configuration. Starting with
+ * version 3, it may be sent again if the popup is setup with an
+ * xdg_positioner with set_reactive requested, or in response to
+ * xdg_popup.reposition requests.
+ * @param x x position relative to parent surface window geometry
+ * @param y y position relative to parent surface window geometry
+ * @param width window geometry width
+ * @param height window geometry height
+ */
+ void (*configure)(void *data,
+ struct xdg_popup *xdg_popup,
+ int32_t x,
+ int32_t y,
+ int32_t width,
+ int32_t height);
+ /**
+ * popup interaction is done
+ *
+ * The popup_done event is sent out when a popup is dismissed by
+ * the compositor. The client should destroy the xdg_popup object
+ * at this point.
+ */
+ void (*popup_done)(void *data,
+ struct xdg_popup *xdg_popup);
+ /**
+ * signal the completion of a repositioned request
+ *
+ * The repositioned event is sent as part of a popup
+ * configuration sequence, together with xdg_popup.configure and
+ * lastly xdg_surface.configure to notify the completion of a
+ * reposition request.
+ *
+ * The repositioned event is to notify about the completion of a
+ * xdg_popup.reposition request. The token argument is the token
+ * passed in the xdg_popup.reposition request.
+ *
+ * Immediately after this event is emitted, xdg_popup.configure and
+ * xdg_surface.configure will be sent with the updated size and
+ * position, as well as a new configure serial.
+ *
+ * The client should optionally update the content of the popup,
+ * but must acknowledge the new popup configuration for the new
+ * position to take effect. See xdg_surface.ack_configure for
+ * details.
+ * @param token reposition request token
+ * @since 3
+ */
+ void (*repositioned)(void *data,
+ struct xdg_popup *xdg_popup,
+ uint32_t token);
+};
+
+/**
+ * @ingroup iface_xdg_popup
+ */
+static inline int
+xdg_popup_add_listener(struct xdg_popup *xdg_popup,
+ const struct xdg_popup_listener *listener, void *data)
+{
+ return wl_proxy_add_listener((struct wl_proxy *) xdg_popup,
+ (void (**)(void)) listener, data);
+}
+
+#define XDG_POPUP_DESTROY 0
+#define XDG_POPUP_GRAB 1
+#define XDG_POPUP_REPOSITION 2
+
+/**
+ * @ingroup iface_xdg_popup
+ */
+#define XDG_POPUP_CONFIGURE_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_popup
+ */
+#define XDG_POPUP_POPUP_DONE_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_popup
+ */
+#define XDG_POPUP_REPOSITIONED_SINCE_VERSION 3
+
+/**
+ * @ingroup iface_xdg_popup
+ */
+#define XDG_POPUP_DESTROY_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_popup
+ */
+#define XDG_POPUP_GRAB_SINCE_VERSION 1
+/**
+ * @ingroup iface_xdg_popup
+ */
+#define XDG_POPUP_REPOSITION_SINCE_VERSION 3
+
+/** @ingroup iface_xdg_popup */
+static inline void
+xdg_popup_set_user_data(struct xdg_popup *xdg_popup, void *user_data)
+{
+ wl_proxy_set_user_data((struct wl_proxy *) xdg_popup, user_data);
+}
+
+/** @ingroup iface_xdg_popup */
+static inline void *
+xdg_popup_get_user_data(struct xdg_popup *xdg_popup)
+{
+ return wl_proxy_get_user_data((struct wl_proxy *) xdg_popup);
+}
+
+static inline uint32_t
+xdg_popup_get_version(struct xdg_popup *xdg_popup)
+{
+ return wl_proxy_get_version((struct wl_proxy *) xdg_popup);
+}
+
+/**
+ * @ingroup iface_xdg_popup
+ *
+ * This destroys the popup. Explicitly destroying the xdg_popup
+ * object will also dismiss the popup, and unmap the surface.
+ *
+ * If this xdg_popup is not the "topmost" popup, a protocol error
+ * will be sent.
+ */
+static inline void
+xdg_popup_destroy(struct xdg_popup *xdg_popup)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_popup,
+ XDG_POPUP_DESTROY);
+
+ wl_proxy_destroy((struct wl_proxy *) xdg_popup);
+}
+
+/**
+ * @ingroup iface_xdg_popup
+ *
+ * This request makes the created popup take an explicit grab. An explicit
+ * grab will be dismissed when the user dismisses the popup, or when the
+ * client destroys the xdg_popup. This can be done by the user clicking
+ * outside the surface, using the keyboard, or even locking the screen
+ * through closing the lid or a timeout.
+ *
+ * If the compositor denies the grab, the popup will be immediately
+ * dismissed.
+ *
+ * This request must be used in response to some sort of user action like a
+ * button press, key press, or touch down event. The serial number of the
+ * event should be passed as 'serial'.
+ *
+ * The parent of a grabbing popup must either be an xdg_toplevel surface or
+ * another xdg_popup with an explicit grab. If the parent is another
+ * xdg_popup it means that the popups are nested, with this popup now being
+ * the topmost popup.
+ *
+ * Nested popups must be destroyed in the reverse order they were created
+ * in, e.g. the only popup you are allowed to destroy at all times is the
+ * topmost one.
+ *
+ * When compositors choose to dismiss a popup, they may dismiss every
+ * nested grabbing popup as well. When a compositor dismisses popups, it
+ * will follow the same dismissing order as required from the client.
+ *
+ * The parent of a grabbing popup must either be another xdg_popup with an
+ * active explicit grab, or an xdg_popup or xdg_toplevel, if there are no
+ * explicit grabs already taken.
+ *
+ * If the topmost grabbing popup is destroyed, the grab will be returned to
+ * the parent of the popup, if that parent previously had an explicit grab.
+ *
+ * If the parent is a grabbing popup which has already been dismissed, this
+ * popup will be immediately dismissed. If the parent is a popup that did
+ * not take an explicit grab, an error will be raised.
+ *
+ * During a popup grab, the client owning the grab will receive pointer
+ * and touch events for all their surfaces as normal (similar to an
+ * "owner-events" grab in X11 parlance), while the top most grabbing popup
+ * will always have keyboard focus.
+ */
+static inline void
+xdg_popup_grab(struct xdg_popup *xdg_popup, struct wl_seat *seat, uint32_t serial)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_popup,
+ XDG_POPUP_GRAB, seat, serial);
+}
+
+/**
+ * @ingroup iface_xdg_popup
+ *
+ * Reposition an already-mapped popup. The popup will be placed given the
+ * details in the passed xdg_positioner object, and a
+ * xdg_popup.repositioned followed by xdg_popup.configure and
+ * xdg_surface.configure will be emitted in response. Any parameters set
+ * by the previous positioner will be discarded.
+ *
+ * The passed token will be sent in the corresponding
+ * xdg_popup.repositioned event. The new popup position will not take
+ * effect until the corresponding configure event is acknowledged by the
+ * client. See xdg_popup.repositioned for details. The token itself is
+ * opaque, and has no other special meaning.
+ *
+ * If multiple reposition requests are sent, the compositor may skip all
+ * but the last one.
+ *
+ * If the popup is repositioned in response to a configure event for its
+ * parent, the client should send an xdg_positioner.set_parent_configure
+ * and possibly an xdg_positioner.set_parent_size request to allow the
+ * compositor to properly constrain the popup.
+ *
+ * If the popup is repositioned together with a parent that is being
+ * resized, but not in response to a configure event, the client should
+ * send an xdg_positioner.set_parent_size request.
+ */
+static inline void
+xdg_popup_reposition(struct xdg_popup *xdg_popup, struct xdg_positioner *positioner, uint32_t token)
+{
+ wl_proxy_marshal((struct wl_proxy *) xdg_popup,
+ XDG_POPUP_REPOSITION, positioner, token);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/vendor/gioui.org/app/window.go b/vendor/gioui.org/app/window.go
new file mode 100644
index 0000000..95148d1
--- /dev/null
+++ b/vendor/gioui.org/app/window.go
@@ -0,0 +1,882 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package app
+
+import (
+ "errors"
+ "fmt"
+ "image"
+ "image/color"
+ "runtime"
+ "time"
+
+ "gioui.org/f32"
+ "gioui.org/gpu"
+ "gioui.org/io/event"
+ "gioui.org/io/pointer"
+ "gioui.org/io/profile"
+ "gioui.org/io/router"
+ "gioui.org/io/system"
+ "gioui.org/op"
+ "gioui.org/unit"
+
+ _ "gioui.org/app/internal/log"
+)
+
+// Option configures a window.
+type Option func(unit.Metric, *Config)
+
+// Window represents an operating system window.
+type Window struct {
+ ctx context
+ gpu gpu.GPU
+
+ // driverFuncs is a channel of functions to run when
+ // the Window has a valid driver.
+ driverFuncs chan func(d driver)
+ // driverDefers is like driverFuncs for functions that may
+ // block and shouldn't be waited for.
+ driverDefers chan func(d driver)
+ // wakeups wakes up the native event loop to send a
+ // WakeupEvent that flushes driverFuncs.
+ wakeups chan struct{}
+
+ out chan event.Event
+ in chan event.Event
+ ack chan struct{}
+ invalidates chan struct{}
+ frames chan *op.Ops
+ frameAck chan struct{}
+ // dead is closed when the window is destroyed.
+ dead chan struct{}
+
+ stage system.Stage
+ animating bool
+ hasNextFrame bool
+ nextFrame time.Time
+ delayedDraw *time.Timer
+
+ queue queue
+ cursor pointer.CursorName
+
+ callbacks callbacks
+
+ nocontext bool
+
+ // semantic data, lazily evaluated if requested by a backend to speed up
+ // the cases where semantic data is not needed.
+ semantic struct {
+ // requestDiffs is notified when a backend requests the list of changed
+ // semantic ids.
+ requestDiffs chan struct{}
+ // diffs is sent every changed semantic id when semRequestDiffs is requested,
+ // ending with the zero id.
+ diffs chan router.SemanticID
+ // lookups is sent semantic IDs for lookup.
+ lookups chan router.SemanticID
+ // results is sent the responses for semLookups queries.
+ results chan semanticResult
+ // requestRoots is sent request for the root ID.
+ requestRoots chan struct{}
+ // roots is sent root IDs when requested throught queryRoots.
+ roots chan router.SemanticID
+ // positions is sent positional requests.
+ positions chan f32.Point
+ // positionIDs is sent results for positions requests.
+ positionIDs chan semanticID
+
+ // uptodate tracks whether the fields below are up to date.
+ uptodate bool
+ root router.SemanticID
+ prevTree []router.SemanticNode
+ tree []router.SemanticNode
+ ids map[router.SemanticID]router.SemanticNode
+ }
+}
+
+type semanticID struct {
+ found bool
+ id router.SemanticID
+}
+
+type semanticResult struct {
+ found bool
+ node router.SemanticNode
+}
+
+type callbacks struct {
+ w *Window
+ d driver
+}
+
+// queue is an event.Queue implementation that distributes system events
+// to the input handlers declared in the most recent frame.
+type queue struct {
+ q router.Router
+}
+
+// driverEvent is sent when the underlying driver changes.
+type driverEvent struct {
+ wakeup func()
+}
+
+// Pre-allocate the ack event to avoid garbage.
+var ackEvent event.Event
+
+// NewWindow creates a new window for a set of window
+// options. The options are hints; the platform is free to
+// ignore or adjust them.
+//
+// If the current program is running on iOS and Android,
+// NewWindow returns the window previously created by the
+// platform.
+//
+// Calling NewWindow more than once is not supported on
+// iOS, Android, WebAssembly.
+func NewWindow(options ...Option) *Window {
+ defaultOptions := []Option{
+ Size(unit.Dp(800), unit.Dp(600)),
+ Title("Gio"),
+ }
+ options = append(defaultOptions, options...)
+ var cnf Config
+ cnf.apply(unit.Metric{}, options)
+
+ w := &Window{
+ in: make(chan event.Event),
+ out: make(chan event.Event),
+ ack: make(chan struct{}),
+ invalidates: make(chan struct{}, 1),
+ frames: make(chan *op.Ops),
+ frameAck: make(chan struct{}),
+ driverFuncs: make(chan func(d driver), 1),
+ driverDefers: make(chan func(d driver), 1),
+ wakeups: make(chan struct{}, 1),
+ dead: make(chan struct{}),
+ nocontext: cnf.CustomRenderer,
+ }
+ w.semantic.ids = make(map[router.SemanticID]router.SemanticNode)
+ w.semantic.lookups = make(chan router.SemanticID)
+ w.semantic.results = make(chan semanticResult)
+ w.semantic.requestDiffs = make(chan struct{})
+ w.semantic.requestRoots = make(chan struct{})
+ w.semantic.roots = make(chan router.SemanticID)
+ w.semantic.positions = make(chan f32.Point)
+ w.semantic.positionIDs = make(chan semanticID)
+ // Add buffer to limit context switching when the diff is large.
+ w.semantic.diffs = make(chan router.SemanticID, 50)
+ w.callbacks.w = w
+ go w.run(options)
+ return w
+}
+
+// Events returns the channel where events are delivered.
+func (w *Window) Events() <-chan event.Event {
+ return w.out
+}
+
+// update updates the window contents, input operations declare input handlers,
+// and so on. The supplied operations list completely replaces the window state
+// from previous calls.
+func (w *Window) update(frame *op.Ops) {
+ w.frames <- frame
+ <-w.frameAck
+}
+
+func (w *Window) validateAndProcess(frameStart time.Time, size image.Point, sync bool, frame *op.Ops) error {
+ for {
+ if w.gpu == nil && !w.nocontext {
+ var err error
+ if w.ctx == nil {
+ w.driverRun(func(d driver) {
+ w.ctx, err = d.NewContext()
+ })
+ if err != nil {
+ return err
+ }
+ sync = true
+ }
+ }
+ if sync && w.ctx != nil {
+ var err error
+ w.driverRun(func(d driver) {
+ err = w.ctx.Refresh()
+ })
+ if err != nil {
+ if errors.Is(err, errOutOfDate) {
+ // Surface couldn't be created for transient reasons. Skip
+ // this frame and wait for the next.
+ return nil
+ }
+ w.destroyGPU()
+ if errors.Is(err, gpu.ErrDeviceLost) {
+ continue
+ }
+ return err
+ }
+ }
+ if w.gpu == nil && !w.nocontext {
+ if err := w.ctx.Lock(); err != nil {
+ w.destroyGPU()
+ return err
+ }
+ gpu, err := gpu.New(w.ctx.API())
+ w.ctx.Unlock()
+ if err != nil {
+ w.destroyGPU()
+ return err
+ }
+ w.gpu = gpu
+ }
+ if w.gpu != nil {
+ if err := w.render(frame, size); err != nil {
+ if errors.Is(err, errOutOfDate) {
+ // GPU surface needs refreshing.
+ sync = true
+ continue
+ }
+ w.destroyGPU()
+ if errors.Is(err, gpu.ErrDeviceLost) {
+ continue
+ }
+ return err
+ }
+ }
+ w.processFrame(frameStart, frame)
+ return nil
+ }
+}
+
+func (w *Window) render(frame *op.Ops, viewport image.Point) error {
+ if err := w.ctx.Lock(); err != nil {
+ return err
+ }
+ defer w.ctx.Unlock()
+ if runtime.GOOS == "js" {
+ // Use transparent black when Gio is embedded, to allow mixing of Gio and
+ // foreign content below.
+ w.gpu.Clear(color.NRGBA{A: 0x00, R: 0x00, G: 0x00, B: 0x00})
+ } else {
+ w.gpu.Clear(color.NRGBA{A: 0xff, R: 0xff, G: 0xff, B: 0xff})
+ }
+ target, err := w.ctx.RenderTarget()
+ if err != nil {
+ return err
+ }
+ if err := w.gpu.Frame(frame, target, viewport); err != nil {
+ return err
+ }
+ return w.ctx.Present()
+}
+
+func (w *Window) processFrame(frameStart time.Time, frame *op.Ops) {
+ w.queue.q.Frame(frame)
+ for k := range w.semantic.ids {
+ delete(w.semantic.ids, k)
+ }
+ w.semantic.uptodate = false
+ switch w.queue.q.TextInputState() {
+ case router.TextInputOpen:
+ w.driverDefer(func(d driver) { d.ShowTextInput(true) })
+ case router.TextInputClose:
+ w.driverDefer(func(d driver) { d.ShowTextInput(false) })
+ }
+ if hint, ok := w.queue.q.TextInputHint(); ok {
+ w.driverDefer(func(d driver) { d.SetInputHint(hint) })
+ }
+ if txt, ok := w.queue.q.WriteClipboard(); ok {
+ w.WriteClipboard(txt)
+ }
+ if w.queue.q.ReadClipboard() {
+ w.ReadClipboard()
+ }
+ if w.queue.q.Profiling() && w.gpu != nil {
+ frameDur := time.Since(frameStart)
+ frameDur = frameDur.Truncate(100 * time.Microsecond)
+ q := 100 * time.Microsecond
+ timings := fmt.Sprintf("tot:%7s %s", frameDur.Round(q), w.gpu.Profile())
+ w.queue.q.Queue(profile.Event{Timings: timings})
+ }
+ if t, ok := w.queue.q.WakeupTime(); ok {
+ w.setNextFrame(t)
+ }
+ // Opportunistically check whether Invalidate has been called, to avoid
+ // stopping and starting animation mode.
+ select {
+ case <-w.invalidates:
+ w.setNextFrame(time.Time{})
+ default:
+ }
+ w.updateAnimation()
+}
+
+// Invalidate the window such that a FrameEvent will be generated immediately.
+// If the window is inactive, the event is sent when the window becomes active.
+//
+// Note that Invalidate is intended for externally triggered updates, such as a
+// response from a network request. InvalidateOp is more efficient for animation
+// and similar internal updates.
+//
+// Invalidate is safe for concurrent use.
+func (w *Window) Invalidate() {
+ select {
+ case w.invalidates <- struct{}{}:
+ default:
+ }
+}
+
+// Option applies the options to the window.
+func (w *Window) Option(opts ...Option) {
+ w.driverDefer(func(d driver) {
+ d.Configure(opts)
+ })
+}
+
+// ReadClipboard initiates a read of the clipboard in the form
+// of a clipboard.Event. Multiple reads may be coalesced
+// to a single event.
+func (w *Window) ReadClipboard() {
+ w.driverDefer(func(d driver) {
+ d.ReadClipboard()
+ })
+}
+
+// WriteClipboard writes a string to the clipboard.
+func (w *Window) WriteClipboard(s string) {
+ w.driverDefer(func(d driver) {
+ d.WriteClipboard(s)
+ })
+}
+
+// SetCursorName changes the current window cursor to name.
+func (w *Window) SetCursorName(name pointer.CursorName) {
+ w.driverDefer(func(d driver) {
+ d.SetCursor(name)
+ })
+}
+
+// Close the window. The window's event loop should exit when it receives
+// system.DestroyEvent.
+//
+// Currently, only macOS, Windows, X11 and Wayland drivers implement this functionality,
+// all others are stubbed.
+func (w *Window) Close() {
+ w.driverDefer(func(d driver) {
+ d.Close()
+ })
+}
+
+// Maximize the window.
+// Note: only implemented on Windows, macOS and X11.
+func (w *Window) Maximize() {
+ w.driverDefer(func(d driver) {
+ d.Maximize()
+ })
+}
+
+// Center the window.
+// Note: only implemented on Windows, macOS and X11.
+func (w *Window) Center() {
+ w.driverDefer(func(d driver) {
+ d.Center()
+ })
+}
+
+// Run f in the same thread as the native window event loop, and wait for f to
+// return or the window to close. Run is guaranteed not to deadlock if it is
+// invoked during the handling of a ViewEvent, system.FrameEvent,
+// system.StageEvent; call Run in a separate goroutine to avoid deadlock in all
+// other cases.
+//
+// Note that most programs should not call Run; configuring a Window with
+// CustomRenderer is a notable exception.
+func (w *Window) Run(f func()) {
+ w.driverRun(func(_ driver) {
+ f()
+ })
+}
+
+// driverRun runs f on the driver event goroutine and returns when f has
+// completed. It can only be called during the processing of an event from
+// w.in.
+func (w *Window) driverRun(f func(d driver)) {
+ done := make(chan struct{})
+ wrapper := func(d driver) {
+ defer close(done)
+ f(d)
+ }
+ select {
+ case w.driverFuncs <- wrapper:
+ select {
+ case <-done:
+ case <-w.dead:
+ }
+ case <-w.dead:
+ }
+}
+
+// driverDefer is like driverRun but can be run from any context. It doesn't wait
+// for f to return.
+func (w *Window) driverDefer(f func(d driver)) {
+ select {
+ case w.driverDefers <- f:
+ w.wakeup()
+ case <-w.dead:
+ }
+}
+
+func (w *Window) updateAnimation() {
+ animate := false
+ if w.delayedDraw != nil {
+ w.delayedDraw.Stop()
+ w.delayedDraw = nil
+ }
+ if w.stage >= system.StageRunning && w.hasNextFrame {
+ if dt := time.Until(w.nextFrame); dt <= 0 {
+ animate = true
+ } else {
+ w.delayedDraw = time.NewTimer(dt)
+ }
+ }
+ if animate != w.animating {
+ w.animating = animate
+ if animate {
+ w.driverDefer(enableAnim)
+ } else {
+ w.driverDefer(disableAnim)
+ }
+ }
+}
+
+func enableAnim(d driver) {
+ d.SetAnimating(true)
+}
+
+func disableAnim(d driver) {
+ d.SetAnimating(false)
+}
+
+func (w *Window) wakeup() {
+ select {
+ case w.wakeups <- struct{}{}:
+ default:
+ }
+}
+
+func (w *Window) setNextFrame(at time.Time) {
+ if !w.hasNextFrame || at.Before(w.nextFrame) {
+ w.hasNextFrame = true
+ w.nextFrame = at
+ }
+}
+
+func (c *callbacks) SetDriver(d driver) {
+ c.d = d
+ var wakeup func()
+ if d != nil {
+ wakeup = d.Wakeup
+ }
+ c.Event(driverEvent{wakeup})
+}
+
+func (c *callbacks) Event(e event.Event) {
+ deferChan := c.w.driverDefers
+ if c.d == nil {
+ deferChan = nil
+ }
+ for {
+ select {
+ case f := <-deferChan:
+ f(c.d)
+ case c.w.in <- e:
+ c.w.runFuncs(c.d)
+ return
+ case <-c.w.dead:
+ return
+ }
+ }
+}
+
+// SemanticRoot returns the ID of the semantic root.
+func (c *callbacks) SemanticRoot() router.SemanticID {
+ c.w.semantic.requestRoots <- struct{}{}
+ return <-c.w.semantic.roots
+}
+
+// LookupSemantic looks up a semantic node from an ID. The zero ID denotes the root.
+func (c *callbacks) LookupSemantic(semID router.SemanticID) (router.SemanticNode, bool) {
+ c.w.semantic.lookups <- semID
+ res := <-c.w.semantic.results
+ return res.node, res.found
+}
+
+func (c *callbacks) RequestSemanticDiffs() <-chan router.SemanticID {
+ c.w.semantic.requestDiffs <- struct{}{}
+ return c.w.semantic.diffs
+}
+
+func (c *callbacks) SemanticAt(pos f32.Point) (router.SemanticID, bool) {
+ c.w.semantic.positions <- pos
+ res := <-c.w.semantic.positionIDs
+ return res.id, res.found
+}
+
+func (w *Window) runFuncs(d driver) {
+ // Don't run driver functions if there's no driver.
+ if d == nil {
+ <-w.ack
+ return
+ }
+ var defers []func(d driver)
+ // Don't miss deferred functions when ack arrives immediately. There is one
+ // wakeup event per function, so one select is enough.
+ select {
+ case f := <-w.driverDefers:
+ defers = append(defers, f)
+ default:
+ }
+ // Wait for ack while running incoming runnables.
+ for {
+ select {
+ case f := <-w.driverFuncs:
+ f(d)
+ case f := <-w.driverDefers:
+ defers = append(defers, f)
+ case <-w.ack:
+ for _, f := range defers {
+ f(d)
+ }
+ return
+ }
+ }
+}
+
+func (w *Window) waitAck() {
+ // Send a dummy event; when it gets through we
+ // know the application has processed the previous event.
+ w.out <- ackEvent
+}
+
+// Prematurely destroy the window and wait for the native window
+// destroy event.
+func (w *Window) destroy(err error) {
+ w.destroyGPU()
+ // Ack the current event.
+ w.ack <- struct{}{}
+ w.out <- system.DestroyEvent{Err: err}
+ close(w.dead)
+ close(w.out)
+ for e := range w.in {
+ w.ack <- struct{}{}
+ if _, ok := e.(system.DestroyEvent); ok {
+ return
+ }
+ }
+}
+
+func (w *Window) destroyGPU() {
+ if w.gpu != nil {
+ w.ctx.Lock()
+ w.gpu.Release()
+ w.ctx.Unlock()
+ w.gpu = nil
+ }
+ if w.ctx != nil {
+ w.ctx.Release()
+ w.ctx = nil
+ }
+}
+
+// waitFrame waits for the client to either call FrameEvent.Frame
+// or to continue event handling. It returns whether the client
+// called Frame or not.
+func (w *Window) waitFrame() (*op.Ops, bool) {
+ select {
+ case frame := <-w.frames:
+ // The client called FrameEvent.Frame.
+ return frame, true
+ case w.out <- ackEvent:
+ // The client ignored FrameEvent and continued processing
+ // events.
+ return nil, false
+ }
+}
+
+func (w *Window) lookupSemantic(id router.SemanticID) (router.SemanticNode, bool) {
+ w.updateSemantics()
+ n, ok := w.semantic.ids[id]
+ return n, ok
+}
+
+// updateSemantics refreshes the semantics tree, the id to node map and the ids of
+// updated nodes.
+func (w *Window) updateSemantics() {
+ if w.semantic.uptodate {
+ return
+ }
+ w.semantic.uptodate = true
+ w.semantic.prevTree, w.semantic.tree = w.semantic.tree, w.semantic.prevTree
+ w.semantic.tree = w.queue.q.AppendSemantics(w.semantic.tree[:0])
+ w.semantic.root = w.semantic.tree[0].ID
+ for _, n := range w.semantic.tree {
+ w.semantic.ids[n.ID] = n
+ }
+}
+
+// sendSemanticDiffs traverses the previous semantic tree and sends changed ids to
+// w.semDiffs.
+func (w *Window) sendSemanticDiffs() {
+ w.updateSemantics()
+ defer func() {
+ // Mark end of list.
+ w.semantic.diffs <- 0
+ }()
+ if tree := w.semantic.prevTree; len(tree) > 0 {
+ w.collectSemanticDiffs(w.semantic.prevTree[0])
+ }
+}
+
+// collectSemanticDiffs traverses the previous semantic tree, noting changed nodes.
+func (w *Window) collectSemanticDiffs(n router.SemanticNode) {
+ newNode, exists := w.semantic.ids[n.ID]
+ // Ignore deleted nodes, as their disappearance will be reported through an
+ // ancestor node.
+ if !exists {
+ return
+ }
+ diff := newNode.Desc != n.Desc || len(n.Children) != len(newNode.Children)
+ for i, ch := range n.Children {
+ if !diff {
+ newCh := newNode.Children[i]
+ diff = ch.ID != newCh.ID
+ }
+ w.collectSemanticDiffs(ch)
+ }
+ if diff {
+ w.semantic.diffs <- n.ID
+ }
+}
+
+func (w *Window) run(options []Option) {
+ // Some OpenGL drivers don't like being made current on many different
+ // OS threads. Force the Go runtime to map the event loop goroutine to
+ // only one thread.
+ runtime.LockOSThread()
+
+ defer close(w.out)
+ defer close(w.dead)
+ if err := newWindow(&w.callbacks, options); err != nil {
+ w.out <- system.DestroyEvent{Err: err}
+ return
+ }
+ var wakeup func()
+ for {
+ var (
+ wakeups <-chan struct{}
+ timer <-chan time.Time
+ )
+ if wakeup != nil {
+ wakeups = w.wakeups
+ // Make sure any pending deferred driver functions are processed before calling
+ // into driverFunc again; only one driver function can be queued at a time.
+ select {
+ case <-wakeups:
+ wakeup()
+ default:
+ }
+ }
+ if w.delayedDraw != nil {
+ timer = w.delayedDraw.C
+ }
+ select {
+ case <-timer:
+ w.setNextFrame(time.Time{})
+ w.updateAnimation()
+ case <-w.invalidates:
+ w.setNextFrame(time.Time{})
+ w.updateAnimation()
+ case <-wakeups:
+ wakeup()
+ case semID := <-w.semantic.lookups:
+ node, found := w.lookupSemantic(semID)
+ w.semantic.results <- semanticResult{
+ found: found,
+ node: node,
+ }
+ case <-w.semantic.requestDiffs:
+ w.sendSemanticDiffs()
+ case <-w.semantic.requestRoots:
+ w.semantic.roots <- w.semantic.root
+ case pos := <-w.semantic.positions:
+ sid, exists := w.queue.q.SemanticAt(pos)
+ w.semantic.positionIDs <- semanticID{
+ found: exists,
+ id: sid,
+ }
+ case e := <-w.in:
+ switch e2 := e.(type) {
+ case system.StageEvent:
+ if e2.Stage < system.StageRunning {
+ if w.gpu != nil {
+ w.ctx.Lock()
+ w.gpu.Release()
+ w.gpu = nil
+ w.ctx.Unlock()
+ }
+ }
+ w.stage = e2.Stage
+ w.updateAnimation()
+ w.out <- e
+ w.waitAck()
+ case frameEvent:
+ if e2.Size == (image.Point{}) {
+ panic(errors.New("internal error: zero-sized Draw"))
+ }
+ if w.stage < system.StageRunning {
+ // No drawing if not visible.
+ break
+ }
+ frameStart := time.Now()
+ w.hasNextFrame = false
+ e2.Frame = w.update
+ e2.Queue = &w.queue
+ w.out <- e2.FrameEvent
+ frame, gotFrame := w.waitFrame()
+ err := w.validateAndProcess(frameStart, e2.Size, e2.Sync, frame)
+ if gotFrame {
+ // We're done with frame, let the client continue.
+ w.frameAck <- struct{}{}
+ }
+ if err != nil {
+ w.destroyGPU()
+ w.destroy(err)
+ return
+ }
+ w.updateCursor()
+ case *system.CommandEvent:
+ w.out <- e
+ w.waitAck()
+ case driverEvent:
+ wakeup = e2.wakeup
+ case system.DestroyEvent:
+ w.destroyGPU()
+ w.out <- e2
+ w.ack <- struct{}{}
+ return
+ case ViewEvent:
+ w.out <- e2
+ w.waitAck()
+ case wakeupEvent:
+ case event.Event:
+ if w.queue.q.Queue(e2) {
+ w.setNextFrame(time.Time{})
+ w.updateAnimation()
+ }
+ w.updateCursor()
+ w.out <- e
+ }
+ w.ack <- struct{}{}
+ }
+ }
+}
+
+func (w *Window) updateCursor() {
+ if c := w.queue.q.Cursor(); c != w.cursor {
+ w.cursor = c
+ w.SetCursorName(c)
+ }
+}
+
+// Raise requests that the platform bring this window to the top of all open windows.
+// Some platforms do not allow this except under certain circumstances, such as when
+// a window from the same application already has focus. If the platform does not
+// support it, this method will do nothing.
+func (w *Window) Raise() {
+ w.driverDefer(func(d driver) {
+ d.Raise()
+ })
+}
+
+func (q *queue) Events(k event.Tag) []event.Event {
+ return q.q.Events(k)
+}
+
+// Title sets the title of the window.
+func Title(t string) Option {
+ return func(_ unit.Metric, cnf *Config) {
+ cnf.Title = t
+ }
+}
+
+// Size sets the size of the window. The option is ignored
+// in Fullscreen mode.
+func Size(w, h unit.Value) Option {
+ if w.V <= 0 {
+ panic("width must be larger than or equal to 0")
+ }
+ if h.V <= 0 {
+ panic("height must be larger than or equal to 0")
+ }
+ return func(m unit.Metric, cnf *Config) {
+ cnf.Size = image.Point{
+ X: m.Px(w),
+ Y: m.Px(h),
+ }
+ }
+}
+
+// MaxSize sets the maximum size of the window.
+func MaxSize(w, h unit.Value) Option {
+ if w.V <= 0 {
+ panic("width must be larger than or equal to 0")
+ }
+ if h.V <= 0 {
+ panic("height must be larger than or equal to 0")
+ }
+ return func(m unit.Metric, cnf *Config) {
+ cnf.MaxSize = image.Point{
+ X: m.Px(w),
+ Y: m.Px(h),
+ }
+ }
+}
+
+// MinSize sets the minimum size of the window.
+func MinSize(w, h unit.Value) Option {
+ if w.V <= 0 {
+ panic("width must be larger than or equal to 0")
+ }
+ if h.V <= 0 {
+ panic("height must be larger than or equal to 0")
+ }
+ return func(m unit.Metric, cnf *Config) {
+ cnf.MinSize = image.Point{
+ X: m.Px(w),
+ Y: m.Px(h),
+ }
+ }
+}
+
+// StatusColor sets the color of the Android status bar.
+func StatusColor(color color.NRGBA) Option {
+ return func(_ unit.Metric, cnf *Config) {
+ cnf.StatusColor = color
+ }
+}
+
+// NavigationColor sets the color of the navigation bar on Android, or the address bar in browsers.
+func NavigationColor(color color.NRGBA) Option {
+ return func(_ unit.Metric, cnf *Config) {
+ cnf.NavigationColor = color
+ }
+}
+
+// CustomRenderer controls whether the window contents is
+// rendered by the client. If true, no GPU context is created.
+func CustomRenderer(custom bool) Option {
+ return func(_ unit.Metric, cnf *Config) {
+ cnf.CustomRenderer = custom
+ }
+}
+
+func (driverEvent) ImplementsEvent() {}
diff --git a/vendor/gioui.org/cpu/LICENSE b/vendor/gioui.org/cpu/LICENSE
new file mode 100644
index 0000000..81f4733
--- /dev/null
+++ b/vendor/gioui.org/cpu/LICENSE
@@ -0,0 +1,63 @@
+This project is provided under the terms of the UNLICENSE or
+the MIT license denoted by the following SPDX identifier:
+
+SPDX-License-Identifier: Unlicense OR MIT
+
+You may use the project under the terms of either license.
+
+Both licenses are reproduced below.
+
+----
+The MIT License (MIT)
+
+Copyright (c) 2019 The Gio authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+---
+
+
+
+---
+The UNLICENSE
+
+This is free and unencumbered software released into the public domain.
+
+Anyone is free to copy, modify, publish, use, compile, sell, or
+distribute this software, either in source code form or as a compiled
+binary, for any purpose, commercial or non-commercial, and by any
+means.
+
+In jurisdictions that recognize copyright laws, the author or authors
+of this software dedicate any and all copyright interest in the
+software to the public domain. We make this dedication for the benefit
+of the public at large and to the detriment of our heirs and
+successors. We intend this dedication to be an overt act of
+relinquishment in perpetuity of all present and future rights to this
+software under copyright law.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+For more information, please refer to
+---
diff --git a/vendor/gioui.org/cpu/README.md b/vendor/gioui.org/cpu/README.md
new file mode 100644
index 0000000..4244fe0
--- /dev/null
+++ b/vendor/gioui.org/cpu/README.md
@@ -0,0 +1,25 @@
+# Compile and run compute programs on CPU
+
+This projects contains the compiler for turning Vulkan SPIR-V compute shaders
+into binaries for arm64, arm or amd64, using
+[SwiftShader](https://github.com/eliasnaur/swiftshader) with a few
+modifications. A runtime implemented in C and Go is available for running the
+resulting binaries.
+
+The primary use is to support a CPU-based rendering fallback for
+[Gio](https://gioui.org). In particular, the `gioui.org/shader/piet` package
+contains arm, arm64, amd64 binaries for
+[piet-gpu](https://github.com/linebender/piet-gpu).
+
+# Compiling and running shaders
+
+The `init.sh` script clones the modifed SwiftShader projects and builds it for
+64-bit and 32-bit. SwiftShader is not designed to cross-compile which is why a
+32-bit build is needed to compile shaders for arm.
+
+The `example/run.sh` script demonstrates compiling and running a simple compute
+program.
+
+## Issues and contributions
+
+See the [Gio contribution guide](https://gioui.org/doc/contribute).
diff --git a/vendor/gioui.org/cpu/abi.h b/vendor/gioui.org/cpu/abi.h
new file mode 100644
index 0000000..365d936
--- /dev/null
+++ b/vendor/gioui.org/cpu/abi.h
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+#define ALIGN(bytes, type) type __attribute__((aligned(bytes)))
+
+typedef ALIGN(8, uint8_t) byte8[8];
+typedef ALIGN(8, uint16_t) word4[4];
+typedef ALIGN(4, uint32_t) dword;
+typedef ALIGN(16, uint32_t) dword4[4];
+typedef ALIGN(8, uint64_t) qword;
+typedef ALIGN(16, uint64_t) qword2[2];
+typedef ALIGN(16, unsigned int) uint4[4];
+typedef ALIGN(8, uint32_t) dword2[2];
+typedef ALIGN(8, unsigned short) ushort4[4];
+typedef ALIGN(16, float) float4[4];
+typedef ALIGN(16, int) int4[4];
+
+typedef unsigned short half;
+
+typedef unsigned char bool;
+
+enum {
+ MAX_BOUND_DESCRIPTOR_SETS = 4,
+ MAX_DESCRIPTOR_SET_UNIFORM_BUFFERS_DYNAMIC = 8,
+ MAX_DESCRIPTOR_SET_STORAGE_BUFFERS_DYNAMIC = 4,
+ MAX_DESCRIPTOR_SET_COMBINED_BUFFERS_DYNAMIC =
+ MAX_DESCRIPTOR_SET_UNIFORM_BUFFERS_DYNAMIC +
+ MAX_DESCRIPTOR_SET_STORAGE_BUFFERS_DYNAMIC,
+ MAX_PUSH_CONSTANT_SIZE = 128,
+
+ MIN_STORAGE_BUFFER_OFFSET_ALIGNMENT = 256,
+
+ REQUIRED_MEMORY_ALIGNMENT = 16,
+
+ SIMD_WIDTH = 4,
+};
+
+struct image_descriptor {
+ ALIGN(16, void *ptr);
+ int width;
+ int height;
+ int depth;
+ int row_pitch_bytes;
+ int slice_pitch_bytes;
+ int sample_pitch_bytes;
+ int sample_count;
+ int size_in_bytes;
+
+ void *stencil_ptr;
+ int stencil_row_pitch_bytes;
+ int stencil_slice_pitch_bytes;
+ int stencil_sample_pitch_bytes;
+
+ // TODO: unused?
+ void *memoryOwner;
+};
+
+struct buffer_descriptor {
+ ALIGN(16, void *ptr);
+ int size_in_bytes;
+ int robustness_size;
+};
+
+struct program_data {
+ uint8_t *descriptor_sets[MAX_BOUND_DESCRIPTOR_SETS];
+ uint32_t descriptor_dynamic_offsets[MAX_DESCRIPTOR_SET_COMBINED_BUFFERS_DYNAMIC];
+ uint4 num_workgroups;
+ uint4 workgroup_size;
+ uint32_t invocations_per_subgroup;
+ uint32_t subgroups_per_workgroup;
+ uint32_t invocations_per_workgroup;
+ unsigned char push_constants[MAX_PUSH_CONSTANT_SIZE];
+ // Unused.
+ void *constants;
+};
+
+typedef int32_t yield_result;
+
+typedef void * coroutine;
+
+typedef coroutine (*routine_begin)(struct program_data *data,
+ int32_t workgroupX,
+ int32_t workgroupY,
+ int32_t workgroupZ,
+ void *workgroupMemory,
+ int32_t firstSubgroup,
+ int32_t subgroupCount);
+
+typedef bool (*routine_await)(coroutine r, yield_result *res);
+
+typedef void (*routine_destroy)(coroutine r);
+
diff --git a/vendor/gioui.org/cpu/driver.go b/vendor/gioui.org/cpu/driver.go
new file mode 100644
index 0000000..d156e2b
--- /dev/null
+++ b/vendor/gioui.org/cpu/driver.go
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+package cpu
+
+/*
+#cgo CFLAGS: -std=c11 -D_POSIX_C_SOURCE=200112L
+
+#include
+#include
+#include "abi.h"
+#include "runtime.h"
+*/
+import "C"
+import (
+ "unsafe"
+)
+
+type (
+ BufferDescriptor = C.struct_buffer_descriptor
+ ImageDescriptor = C.struct_image_descriptor
+ SamplerDescriptor = C.struct_sampler_descriptor
+
+ DispatchContext = C.struct_dispatch_context
+ ThreadContext = C.struct_thread_context
+ ProgramInfo = C.struct_program_info
+)
+
+const Supported = true
+
+func NewBuffer(size int) BufferDescriptor {
+ return C.alloc_buffer(C.size_t(size))
+}
+
+func (d *BufferDescriptor) Data() []byte {
+ return (*(*[1 << 30]byte)(d.ptr))[:d.size_in_bytes:d.size_in_bytes]
+}
+
+func (d *BufferDescriptor) Free() {
+ if d.ptr != nil {
+ C.free(d.ptr)
+ }
+ *d = BufferDescriptor{}
+}
+
+func NewImageRGBA(width, height int) ImageDescriptor {
+ return C.alloc_image_rgba(C.int(width), C.int(height))
+}
+
+func (d *ImageDescriptor) Data() []byte {
+ return (*(*[1 << 30]byte)(d.ptr))[:d.size_in_bytes:d.size_in_bytes]
+}
+
+func (d *ImageDescriptor) Free() {
+ if d.ptr != nil {
+ C.free(d.ptr)
+ }
+ *d = ImageDescriptor{}
+}
+
+func NewDispatchContext() *DispatchContext {
+ return C.alloc_dispatch_context()
+}
+
+func (c *DispatchContext) Free() {
+ C.free_dispatch_context(c)
+}
+
+func (c *DispatchContext) Prepare(numThreads int, prog *ProgramInfo, descSet unsafe.Pointer, x, y, z int) {
+ C.prepare_dispatch(c, C.int(numThreads), prog, (*C.uint8_t)(descSet), C.int(x), C.int(y), C.int(z))
+}
+
+func (c *DispatchContext) Dispatch(threadIdx int, ctx *ThreadContext) {
+ C.dispatch_thread(c, C.int(threadIdx), ctx)
+}
+
+func NewThreadContext() *ThreadContext {
+ return C.alloc_thread_context()
+}
+
+func (c *ThreadContext) Free() {
+ C.free_thread_context(c)
+}
diff --git a/vendor/gioui.org/cpu/driver_nosupport.go b/vendor/gioui.org/cpu/driver_nosupport.go
new file mode 100644
index 0000000..3a118f2
--- /dev/null
+++ b/vendor/gioui.org/cpu/driver_nosupport.go
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+//go:build !(linux && (arm64 || arm || amd64))
+// +build !linux !arm64,!arm,!amd64
+
+package cpu
+
+import "unsafe"
+
+type (
+ BufferDescriptor struct{}
+ ImageDescriptor struct{}
+ SamplerDescriptor struct{}
+
+ DispatchContext struct{}
+ ThreadContext struct{}
+ ProgramInfo struct{}
+)
+
+const Supported = false
+
+func NewBuffer(size int) BufferDescriptor {
+ panic("unsupported")
+}
+
+func (d *BufferDescriptor) Data() []byte {
+ panic("unsupported")
+}
+
+func (d *BufferDescriptor) Free() {
+}
+
+func NewImageRGBA(width, height int) ImageDescriptor {
+ panic("unsupported")
+}
+
+func (d *ImageDescriptor) Data() []byte {
+ panic("unsupported")
+}
+
+func (d *ImageDescriptor) Free() {
+}
+
+func NewDispatchContext() *DispatchContext {
+ panic("unsupported")
+}
+
+func (c *DispatchContext) Free() {
+}
+
+func (c *DispatchContext) Prepare(numThreads int, prog *ProgramInfo, descSet unsafe.Pointer, x, y, z int) {
+ panic("unsupported")
+}
+
+func (c *DispatchContext) Dispatch(threadIdx int, ctx *ThreadContext) {
+ panic("unsupported")
+}
+
+func NewThreadContext() *ThreadContext {
+ panic("unsupported")
+}
+
+func (c *ThreadContext) Free() {
+}
diff --git a/vendor/gioui.org/cpu/embed.go b/vendor/gioui.org/cpu/embed.go
new file mode 100644
index 0000000..9d3b944
--- /dev/null
+++ b/vendor/gioui.org/cpu/embed.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package cpu
+
+import _ "embed"
+
+//go:embed abi.h
+var ABIH []byte
+
+//go:embed runtime.h
+var RuntimeH []byte
diff --git a/vendor/gioui.org/cpu/go.mod b/vendor/gioui.org/cpu/go.mod
new file mode 100644
index 0000000..46709a0
--- /dev/null
+++ b/vendor/gioui.org/cpu/go.mod
@@ -0,0 +1,3 @@
+module gioui.org/cpu
+
+go 1.17
diff --git a/vendor/gioui.org/cpu/go.sum b/vendor/gioui.org/cpu/go.sum
new file mode 100644
index 0000000..e69de29
diff --git a/vendor/gioui.org/cpu/init.sh b/vendor/gioui.org/cpu/init.sh
new file mode 100644
index 0000000..f0f0a9c
--- /dev/null
+++ b/vendor/gioui.org/cpu/init.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+# SPDX-License-Identifier: Unlicense OR MIT
+
+set -e
+
+cd ~/.cache
+git clone https://github.com/eliasnaur/swiftshader
+cd swiftshader
+
+# 32-bit build
+cp -a build build.32bit
+cd build.32bit
+CXX=clang++ CC=clang CFLAGS=-m32 CXXFLAGS=-m32 cmake -DREACTOR_EMIT_ASM_FILE=true -DSWIFTSHADER_BUILD_PVR=false -DSWIFTSHADER_BUILD_TESTS=false -DSWIFTSHADER_BUILD_GLESv2=false -DSWIFTSHADER_BUILD_EGL=false -DSWIFTSHADER_BUILD_ANGLE=false ..
+cmake --build . --parallel 4
+cd ..
+
+# 64-bit build
+cp -a build build.64bit
+cd build.64bit
+CXX=clang++ CC=clang cmake -DREACTOR_EMIT_ASM_FILE=true -DSWIFTSHADER_BUILD_PVR=false -DSWIFTSHADER_BUILD_TESTS=false -DSWIFTSHADER_BUILD_GLESv2=false -DSWIFTSHADER_BUILD_EGL=false -DSWIFTSHADER_BUILD_ANGLE=false ..
+cmake --build . --parallel 4
+cd ..
diff --git a/vendor/gioui.org/cpu/runtime.c b/vendor/gioui.org/cpu/runtime.c
new file mode 100644
index 0000000..f7f6108
--- /dev/null
+++ b/vendor/gioui.org/cpu/runtime.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include "abi.h"
+#include "runtime.h"
+#include "_cgo_export.h"
+
+// coroutines is a FIFO queue of coroutines implemented as a circular
+// buffer.
+struct coroutines {
+ coroutine *routines;
+ // start and end indexes into routines.
+ unsigned int start;
+ unsigned int end;
+ // cap is the capacity of routines.
+ unsigned int cap;
+};
+
+struct dispatch_context {
+ // descriptor_set is the aligned storage for the descriptor set.
+ void *descriptor_set;
+ int desc_set_size;
+
+ int nthreads;
+ bool has_cbarriers;
+ size_t memory_size;
+ // Program entrypoints.
+ routine_begin begin;
+ routine_await await;
+ routine_destroy destroy;
+
+ struct program_data data;
+};
+
+struct thread_context {
+ struct coroutines routines;
+ size_t memory_size;
+ uint8_t *memory;
+};
+
+static void *malloc_align(size_t alignment, size_t size) {
+ void *ptr;
+ int ret = posix_memalign(&ptr, alignment, size);
+ assert(ret == 0);
+ return ptr;
+}
+
+static void coroutines_dump(struct coroutines *routines) {
+ fprintf(stderr, "s: %d e: %d c: %d [", routines->start, routines->end, routines->cap);
+ unsigned int i = routines->start;
+ while (i != routines->end) {
+ fprintf(stderr, "%p,", routines->routines[routines->start]);
+ i = (i + 1)%routines->cap;
+ }
+ fprintf(stderr, "]\n");
+}
+
+static void coroutines_push(struct coroutines *routines, coroutine r) {
+ unsigned int next = routines->end + 1;
+ if (next >= routines->cap) {
+ next = 0;
+ }
+ if (next == routines->start) {
+ unsigned int newcap = routines->cap*2;
+ if (newcap < 10) {
+ newcap = 10;
+ }
+ routines->routines = realloc(routines->routines, newcap*sizeof(coroutine));
+ // Move elements wrapped around the old cap to the newly allocated space.
+ if (routines->end < routines->start) {
+ unsigned int nelems = routines->end;
+ unsigned int max = newcap - routines->cap;
+ // We doubled the space above, so we can assume enough room.
+ assert(nelems <= max);
+ memmove(&routines->routines[routines->cap], &routines->routines[0], nelems*sizeof(coroutine));
+ routines->end += routines->cap;
+ }
+ routines->cap = newcap;
+ next = (routines->end + 1)%routines->cap;
+ }
+ routines->routines[routines->end] = r;
+ routines->end = next;
+}
+
+static bool coroutines_pop(struct coroutines *routines, coroutine *r) {
+ if (routines->start == routines->end) {
+ return 0;
+ }
+ *r = routines->routines[routines->start];
+ routines->start = (routines->start + 1)%routines->cap;
+ return 1;
+}
+
+static void coroutines_free(struct coroutines *routines) {
+ if (routines->routines != NULL) {
+ free(routines->routines);
+ }
+ struct coroutines clr = { 0 }; *routines = clr;
+}
+
+struct dispatch_context *alloc_dispatch_context(void) {
+ struct dispatch_context *c = malloc(sizeof(*c));
+ assert(c != NULL);
+ struct dispatch_context clr = { 0 }; *c = clr;
+ return c;
+}
+
+void free_dispatch_context(struct dispatch_context *c) {
+ if (c->descriptor_set != NULL) {
+ free(c->descriptor_set);
+ c->descriptor_set = NULL;
+ }
+}
+
+struct thread_context *alloc_thread_context(void) {
+ struct thread_context *c = malloc(sizeof(*c));
+ assert(c != NULL);
+ struct thread_context clr = { 0 }; *c = clr;
+ return c;
+}
+
+void free_thread_context(struct thread_context *c) {
+ if (c->memory != NULL) {
+ free(c->memory);
+ }
+ coroutines_free(&c->routines);
+ struct thread_context clr = { 0 }; *c = clr;
+}
+
+struct buffer_descriptor alloc_buffer(size_t size) {
+ void *buf = malloc_align(MIN_STORAGE_BUFFER_OFFSET_ALIGNMENT, size);
+ struct buffer_descriptor desc = {
+ .ptr = buf,
+ .size_in_bytes = size,
+ .robustness_size = size,
+ };
+ return desc;
+}
+
+struct image_descriptor alloc_image_rgba(int width, int height) {
+ size_t size = width*height*4;
+ size = (size + 16 - 1)&~(16 - 1);
+ void *storage = malloc_align(REQUIRED_MEMORY_ALIGNMENT, size);
+ struct image_descriptor desc = { 0 };
+ desc.ptr = storage;
+ desc.width = width;
+ desc.height = height;
+ desc.depth = 1;
+ desc.row_pitch_bytes = width*4;
+ desc.slice_pitch_bytes = size;
+ desc.sample_pitch_bytes = size;
+ desc.sample_count = 1;
+ desc.size_in_bytes = size;
+ return desc;
+}
+
+void prepare_dispatch(struct dispatch_context *ctx, int nthreads, struct program_info *info, uint8_t *desc_set, int ngroupx, int ngroupy, int ngroupz) {
+ if (ctx->desc_set_size < info->desc_set_size) {
+ if (ctx->descriptor_set != NULL) {
+ free(ctx->descriptor_set);
+ }
+ ctx->descriptor_set = malloc_align(16, info->desc_set_size);
+ ctx->desc_set_size = info->desc_set_size;
+ }
+ memcpy(ctx->descriptor_set, desc_set, info->desc_set_size);
+
+ int invocations_per_subgroup = SIMD_WIDTH;
+ int invocations_per_workgroup = info->workgroup_size_x * info->workgroup_size_y * info->workgroup_size_z;
+ int subgroups_per_workgroup = (invocations_per_workgroup + invocations_per_subgroup - 1) / invocations_per_subgroup;
+
+ ctx->has_cbarriers = info->has_cbarriers;
+ ctx->begin = info->begin;
+ ctx->await = info->await;
+ ctx->destroy = info->destroy;
+ ctx->nthreads = nthreads;
+ ctx->memory_size = info->min_memory_size;
+
+ ctx->data.workgroup_size[0] = info->workgroup_size_x;
+ ctx->data.workgroup_size[1] = info->workgroup_size_y;
+ ctx->data.workgroup_size[2] = info->workgroup_size_z;
+ ctx->data.num_workgroups[0] = ngroupx;
+ ctx->data.num_workgroups[1] = ngroupy;
+ ctx->data.num_workgroups[2] = ngroupz;
+ ctx->data.invocations_per_subgroup = invocations_per_subgroup;
+ ctx->data.invocations_per_workgroup = invocations_per_workgroup;
+ ctx->data.subgroups_per_workgroup = subgroups_per_workgroup;
+ ctx->data.descriptor_sets[0] = ctx->descriptor_set;
+}
+
+void dispatch_thread(struct dispatch_context *ctx, int thread_idx, struct thread_context *thread) {
+ if (thread->memory_size < ctx->memory_size) {
+ if (thread->memory != NULL) {
+ free(thread->memory);
+ }
+ // SwiftShader doesn't seem to align shared memory. However, better safe
+ // than subtle errors. Note that the program info generator pads
+ // memory_size to ensure space for alignment.
+ thread->memory = malloc_align(16, ctx->memory_size);
+ thread->memory_size = ctx->memory_size;
+ }
+ uint8_t *memory = thread->memory;
+
+ struct program_data *data = &ctx->data;
+
+ int sx = data->num_workgroups[0];
+ int sy = data->num_workgroups[1];
+ int sz = data->num_workgroups[2];
+ int ngroups = sx * sy * sz;
+
+ for (int i = thread_idx; i < ngroups; i += ctx->nthreads) {
+ int group_id = i;
+ int z = group_id / (sx * sy);
+ group_id -= z * sx * sy;
+ int y = group_id / sx;
+ group_id -= y * sx;
+ int x = group_id;
+ if (ctx->has_cbarriers) {
+ for (int subgroup = 0; subgroup < data->subgroups_per_workgroup; subgroup++) {
+ coroutine r = ctx->begin(data, x, y, z, memory, subgroup, 1);
+ coroutines_push(&thread->routines, r);
+ }
+ } else {
+ coroutine r = ctx->begin(data, x, y, z, memory, 0, data->subgroups_per_workgroup);
+ coroutines_push(&thread->routines, r);
+ }
+ coroutine r;
+ while (coroutines_pop(&thread->routines, &r)) {
+ yield_result res;
+ if (ctx->await(r, &res)) {
+ coroutines_push(&thread->routines, r);
+ } else {
+ ctx->destroy(r);
+ }
+ }
+ }
+}
diff --git a/vendor/gioui.org/cpu/runtime.h b/vendor/gioui.org/cpu/runtime.h
new file mode 100644
index 0000000..cfae912
--- /dev/null
+++ b/vendor/gioui.org/cpu/runtime.h
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+#define ATTR_HIDDEN __attribute__ ((visibility ("hidden")))
+
+// program_info contains constant parameters for a program.
+struct program_info {
+ // MinMemorySize is the minimum size of memory passed to dispatch.
+ size_t min_memory_size;
+ // has_cbarriers is 1 when the program contains control barriers.
+ bool has_cbarriers;
+ // desc_set_size is the size of the first descriptor set for the program.
+ size_t desc_set_size;
+ int workgroup_size_x;
+ int workgroup_size_y;
+ int workgroup_size_z;
+ // Program entrypoints.
+ routine_begin begin;
+ routine_await await;
+ routine_destroy destroy;
+};
+
+// dispatch_context contains the information a program dispatch.
+struct dispatch_context;
+
+// thread_context contains the working memory of a batch. It may be
+// reused, but not concurrently.
+struct thread_context;
+
+extern struct buffer_descriptor alloc_buffer(size_t size) ATTR_HIDDEN;
+extern struct image_descriptor alloc_image_rgba(int width, int height) ATTR_HIDDEN;
+
+extern struct dispatch_context *alloc_dispatch_context(void) ATTR_HIDDEN;
+
+extern void free_dispatch_context(struct dispatch_context *c) ATTR_HIDDEN;
+
+extern struct thread_context *alloc_thread_context(void) ATTR_HIDDEN;
+
+extern void free_thread_context(struct thread_context *c) ATTR_HIDDEN;
+
+// prepare_dispatch initializes ctx to run a dispatch of a program distributed
+// among nthreads threads.
+extern void prepare_dispatch(struct dispatch_context *ctx, int nthreads, struct program_info *info, uint8_t *desc_set, int ngroupx, int ngroupy, int ngroupz) ATTR_HIDDEN;
+
+// dispatch_batch executes a dispatch batch.
+extern void dispatch_thread(struct dispatch_context *ctx, int thread_idx, struct thread_context *thread) ATTR_HIDDEN;
diff --git a/vendor/gioui.org/f32/affine.go b/vendor/gioui.org/f32/affine.go
new file mode 100644
index 0000000..667f7e9
--- /dev/null
+++ b/vendor/gioui.org/f32/affine.go
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package f32
+
+import (
+ "fmt"
+ "math"
+)
+
+// Affine2D represents an affine 2D transformation. The zero value if Affine2D
+// represents the identity transform.
+type Affine2D struct {
+ // in order to make the zero value of Affine2D represent the identity
+ // transform we store it with the identity matrix subtracted, that is
+ // if the actual transformation matrix is:
+ // [sx, hx, ox]
+ // [hy, sy, oy]
+ // [ 0, 0, 1]
+ // we store a = sx-1 and e = sy-1
+ a, b, c float32
+ d, e, f float32
+}
+
+// NewAffine2D creates a new Affine2D transform from the matrix elements
+// in row major order. The rows are: [sx, hx, ox], [hy, sy, oy], [0, 0, 1].
+func NewAffine2D(sx, hx, ox, hy, sy, oy float32) Affine2D {
+ return Affine2D{
+ a: sx - 1, b: hx, c: ox,
+ d: hy, e: sy - 1, f: oy,
+ }
+}
+
+// Offset the transformation.
+func (a Affine2D) Offset(offset Point) Affine2D {
+ return Affine2D{
+ a.a, a.b, a.c + offset.X,
+ a.d, a.e, a.f + offset.Y,
+ }
+}
+
+// Scale the transformation around the given origin.
+func (a Affine2D) Scale(origin, factor Point) Affine2D {
+ if origin == (Point{}) {
+ return a.scale(factor)
+ }
+ a = a.Offset(origin.Mul(-1))
+ a = a.scale(factor)
+ return a.Offset(origin)
+}
+
+// Rotate the transformation by the given angle (in radians) counter clockwise around the given origin.
+func (a Affine2D) Rotate(origin Point, radians float32) Affine2D {
+ if origin == (Point{}) {
+ return a.rotate(radians)
+ }
+ a = a.Offset(origin.Mul(-1))
+ a = a.rotate(radians)
+ return a.Offset(origin)
+}
+
+// Shear the transformation by the given angle (in radians) around the given origin.
+func (a Affine2D) Shear(origin Point, radiansX, radiansY float32) Affine2D {
+ if origin == (Point{}) {
+ return a.shear(radiansX, radiansY)
+ }
+ a = a.Offset(origin.Mul(-1))
+ a = a.shear(radiansX, radiansY)
+ return a.Offset(origin)
+}
+
+// Mul returns A*B.
+func (A Affine2D) Mul(B Affine2D) (r Affine2D) {
+ r.a = (A.a+1)*(B.a+1) + A.b*B.d - 1
+ r.b = (A.a+1)*B.b + A.b*(B.e+1)
+ r.c = (A.a+1)*B.c + A.b*B.f + A.c
+ r.d = A.d*(B.a+1) + (A.e+1)*B.d
+ r.e = A.d*B.b + (A.e+1)*(B.e+1) - 1
+ r.f = A.d*B.c + (A.e+1)*B.f + A.f
+ return r
+}
+
+// Invert the transformation. Note that if the matrix is close to singular
+// numerical errors may become large or infinity.
+func (a Affine2D) Invert() Affine2D {
+ if a.a == 0 && a.b == 0 && a.d == 0 && a.e == 0 {
+ return Affine2D{a: 0, b: 0, c: -a.c, d: 0, e: 0, f: -a.f}
+ }
+ a.a += 1
+ a.e += 1
+ det := a.a*a.e - a.b*a.d
+ a.a, a.e = a.e/det, a.a/det
+ a.b, a.d = -a.b/det, -a.d/det
+ temp := a.c
+ a.c = -a.a*a.c - a.b*a.f
+ a.f = -a.d*temp - a.e*a.f
+ a.a -= 1
+ a.e -= 1
+ return a
+}
+
+// Transform p by returning a*p.
+func (a Affine2D) Transform(p Point) Point {
+ return Point{
+ X: p.X*(a.a+1) + p.Y*a.b + a.c,
+ Y: p.X*a.d + p.Y*(a.e+1) + a.f,
+ }
+}
+
+// Elems returns the matrix elements of the transform in row-major order. The
+// rows are: [sx, hx, ox], [hy, sy, oy], [0, 0, 1].
+func (a Affine2D) Elems() (sx, hx, ox, hy, sy, oy float32) {
+ return a.a + 1, a.b, a.c, a.d, a.e + 1, a.f
+}
+
+func (a Affine2D) scale(factor Point) Affine2D {
+ return Affine2D{
+ (a.a+1)*factor.X - 1, a.b * factor.X, a.c * factor.X,
+ a.d * factor.Y, (a.e+1)*factor.Y - 1, a.f * factor.Y,
+ }
+}
+
+func (a Affine2D) rotate(radians float32) Affine2D {
+ sin, cos := math.Sincos(float64(radians))
+ s, c := float32(sin), float32(cos)
+ return Affine2D{
+ (a.a+1)*c - a.d*s - 1, a.b*c - (a.e+1)*s, a.c*c - a.f*s,
+ (a.a+1)*s + a.d*c, a.b*s + (a.e+1)*c - 1, a.c*s + a.f*c,
+ }
+}
+
+func (a Affine2D) shear(radiansX, radiansY float32) Affine2D {
+ tx := float32(math.Tan(float64(radiansX)))
+ ty := float32(math.Tan(float64(radiansY)))
+ return Affine2D{
+ (a.a + 1) + a.d*tx - 1, a.b + (a.e+1)*tx, a.c + a.f*tx,
+ (a.a+1)*ty + a.d, a.b*ty + (a.e + 1) - 1, a.f*ty + a.f,
+ }
+}
+
+func (a Affine2D) String() string {
+ sx, hx, ox, hy, sy, oy := a.Elems()
+ return fmt.Sprintf("[[%f %f %f] [%f %f %f]]", sx, hx, ox, hy, sy, oy)
+}
diff --git a/vendor/gioui.org/f32/f32.go b/vendor/gioui.org/f32/f32.go
new file mode 100644
index 0000000..54882cb
--- /dev/null
+++ b/vendor/gioui.org/f32/f32.go
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+/*
+Package f32 is a float32 implementation of package image's
+Point and Rectangle.
+
+The coordinate space has the origin in the top left
+corner with the axes extending right and down.
+*/
+package f32
+
+import "strconv"
+
+// A Point is a two dimensional point.
+type Point struct {
+ X, Y float32
+}
+
+// String return a string representation of p.
+func (p Point) String() string {
+ return "(" + strconv.FormatFloat(float64(p.X), 'f', -1, 32) +
+ "," + strconv.FormatFloat(float64(p.Y), 'f', -1, 32) + ")"
+}
+
+// A Rectangle contains the points (X, Y) where Min.X <= X < Max.X,
+// Min.Y <= Y < Max.Y.
+type Rectangle struct {
+ Min, Max Point
+}
+
+// String return a string representation of r.
+func (r Rectangle) String() string {
+ return r.Min.String() + "-" + r.Max.String()
+}
+
+// Rect is a shorthand for Rectangle{Point{x0, y0}, Point{x1, y1}}.
+// The returned Rectangle has x0 and y0 swapped if necessary so that
+// it's correctly formed.
+func Rect(x0, y0, x1, y1 float32) Rectangle {
+ if x0 > x1 {
+ x0, x1 = x1, x0
+ }
+ if y0 > y1 {
+ y0, y1 = y1, y0
+ }
+ return Rectangle{Point{x0, y0}, Point{x1, y1}}
+}
+
+// Pt is shorthand for Point{X: x, Y: y}.
+func Pt(x, y float32) Point {
+ return Point{X: x, Y: y}
+}
+
+// Add return the point p+p2.
+func (p Point) Add(p2 Point) Point {
+ return Point{X: p.X + p2.X, Y: p.Y + p2.Y}
+}
+
+// Sub returns the vector p-p2.
+func (p Point) Sub(p2 Point) Point {
+ return Point{X: p.X - p2.X, Y: p.Y - p2.Y}
+}
+
+// Mul returns p scaled by s.
+func (p Point) Mul(s float32) Point {
+ return Point{X: p.X * s, Y: p.Y * s}
+}
+
+// Div returns the vector p/s.
+func (p Point) Div(s float32) Point {
+ return Point{X: p.X / s, Y: p.Y / s}
+}
+
+// In reports whether p is in r.
+func (p Point) In(r Rectangle) bool {
+ return r.Min.X <= p.X && p.X < r.Max.X &&
+ r.Min.Y <= p.Y && p.Y < r.Max.Y
+}
+
+// Size returns r's width and height.
+func (r Rectangle) Size() Point {
+ return Point{X: r.Dx(), Y: r.Dy()}
+}
+
+// Dx returns r's width.
+func (r Rectangle) Dx() float32 {
+ return r.Max.X - r.Min.X
+}
+
+// Dy returns r's Height.
+func (r Rectangle) Dy() float32 {
+ return r.Max.Y - r.Min.Y
+}
+
+// Intersect returns the intersection of r and s.
+func (r Rectangle) Intersect(s Rectangle) Rectangle {
+ if r.Min.X < s.Min.X {
+ r.Min.X = s.Min.X
+ }
+ if r.Min.Y < s.Min.Y {
+ r.Min.Y = s.Min.Y
+ }
+ if r.Max.X > s.Max.X {
+ r.Max.X = s.Max.X
+ }
+ if r.Max.Y > s.Max.Y {
+ r.Max.Y = s.Max.Y
+ }
+ if r.Empty() {
+ return Rectangle{}
+ }
+ return r
+}
+
+// Union returns the union of r and s.
+func (r Rectangle) Union(s Rectangle) Rectangle {
+ if r.Empty() {
+ return s
+ }
+ if s.Empty() {
+ return r
+ }
+ if r.Min.X > s.Min.X {
+ r.Min.X = s.Min.X
+ }
+ if r.Min.Y > s.Min.Y {
+ r.Min.Y = s.Min.Y
+ }
+ if r.Max.X < s.Max.X {
+ r.Max.X = s.Max.X
+ }
+ if r.Max.Y < s.Max.Y {
+ r.Max.Y = s.Max.Y
+ }
+ return r
+}
+
+// Canon returns the canonical version of r, where Min is to
+// the upper left of Max.
+func (r Rectangle) Canon() Rectangle {
+ if r.Max.X < r.Min.X {
+ r.Min.X, r.Max.X = r.Max.X, r.Min.X
+ }
+ if r.Max.Y < r.Min.Y {
+ r.Min.Y, r.Max.Y = r.Max.Y, r.Min.Y
+ }
+ return r
+}
+
+// Empty reports whether r represents the empty area.
+func (r Rectangle) Empty() bool {
+ return r.Min.X >= r.Max.X || r.Min.Y >= r.Max.Y
+}
+
+// Add offsets r with the vector p.
+func (r Rectangle) Add(p Point) Rectangle {
+ return Rectangle{
+ Point{r.Min.X + p.X, r.Min.Y + p.Y},
+ Point{r.Max.X + p.X, r.Max.Y + p.Y},
+ }
+}
+
+// Sub offsets r with the vector -p.
+func (r Rectangle) Sub(p Point) Rectangle {
+ return Rectangle{
+ Point{r.Min.X - p.X, r.Min.Y - p.Y},
+ Point{r.Max.X - p.X, r.Max.Y - p.Y},
+ }
+}
diff --git a/vendor/gioui.org/gesture/gesture.go b/vendor/gioui.org/gesture/gesture.go
new file mode 100644
index 0000000..aa3585b
--- /dev/null
+++ b/vendor/gioui.org/gesture/gesture.go
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+/*
+Package gesture implements common pointer gestures.
+
+Gestures accept low level pointer Events from an event
+Queue and detect higher level actions such as clicks
+and scrolling.
+*/
+package gesture
+
+import (
+ "image"
+ "math"
+ "runtime"
+ "time"
+
+ "gioui.org/f32"
+ "gioui.org/internal/fling"
+ "gioui.org/io/event"
+ "gioui.org/io/key"
+ "gioui.org/io/pointer"
+ "gioui.org/op"
+ "gioui.org/unit"
+)
+
+// The duration is somewhat arbitrary.
+const doubleClickDuration = 200 * time.Millisecond
+
+// Hover detects the hover gesture for a pointer area.
+type Hover struct {
+ // entered tracks whether the pointer is inside the gesture.
+ entered bool
+ // pid is the pointer.ID.
+ pid pointer.ID
+}
+
+// Add the gesture to detect hovering over the current pointer area.
+func (h *Hover) Add(ops *op.Ops) {
+ pointer.InputOp{
+ Tag: h,
+ Types: pointer.Enter | pointer.Leave,
+ }.Add(ops)
+}
+
+// Hovered returns whether a pointer is inside the area.
+func (h *Hover) Hovered(q event.Queue) bool {
+ for _, ev := range q.Events(h) {
+ e, ok := ev.(pointer.Event)
+ if !ok {
+ continue
+ }
+ switch e.Type {
+ case pointer.Leave:
+ if h.entered && h.pid == e.PointerID {
+ h.entered = false
+ }
+ case pointer.Enter:
+ if !h.entered {
+ h.pid = e.PointerID
+ }
+ if h.pid == e.PointerID {
+ h.entered = true
+ }
+ }
+ }
+ return h.entered
+}
+
+// Click detects click gestures in the form
+// of ClickEvents.
+type Click struct {
+ // clickedAt is the timestamp at which
+ // the last click occurred.
+ clickedAt time.Duration
+ // clicks is incremented if successive clicks
+ // are performed within a fixed duration.
+ clicks int
+ // pressed tracks whether the pointer is pressed.
+ pressed bool
+ // entered tracks whether the pointer is inside the gesture.
+ entered bool
+ // pid is the pointer.ID.
+ pid pointer.ID
+}
+
+// ClickEvent represent a click action, either a
+// TypePress for the beginning of a click or a
+// TypeClick for a completed click.
+type ClickEvent struct {
+ Type ClickType
+ Position f32.Point
+ Source pointer.Source
+ Modifiers key.Modifiers
+ // NumClicks records successive clicks occurring
+ // within a short duration of each other.
+ NumClicks int
+}
+
+type ClickType uint8
+
+// Drag detects drag gestures in the form of pointer.Drag events.
+type Drag struct {
+ dragging bool
+ pressed bool
+ pid pointer.ID
+ start f32.Point
+ grab bool
+}
+
+// Scroll detects scroll gestures and reduces them to
+// scroll distances. Scroll recognizes mouse wheel
+// movements as well as drag and fling touch gestures.
+type Scroll struct {
+ dragging bool
+ axis Axis
+ estimator fling.Extrapolation
+ flinger fling.Animation
+ pid pointer.ID
+ grab bool
+ last int
+ // Leftover scroll.
+ scroll float32
+}
+
+type ScrollState uint8
+
+type Axis uint8
+
+const (
+ Horizontal Axis = iota
+ Vertical
+ Both
+)
+
+const (
+ // TypePress is reported for the first pointer
+ // press.
+ TypePress ClickType = iota
+ // TypeClick is reported when a click action
+ // is complete.
+ TypeClick
+ // TypeCancel is reported when the gesture is
+ // cancelled.
+ TypeCancel
+)
+
+const (
+ // StateIdle is the default scroll state.
+ StateIdle ScrollState = iota
+ // StateDragging is reported during drag gestures.
+ StateDragging
+ // StateFlinging is reported when a fling is
+ // in progress.
+ StateFlinging
+)
+
+var touchSlop = unit.Dp(3)
+
+// Add the handler to the operation list to receive click events.
+func (c *Click) Add(ops *op.Ops) {
+ pointer.InputOp{
+ Tag: c,
+ Types: pointer.Press | pointer.Release | pointer.Enter | pointer.Leave,
+ }.Add(ops)
+}
+
+// Hovered returns whether a pointer is inside the area.
+func (c *Click) Hovered() bool {
+ return c.entered
+}
+
+// Pressed returns whether a pointer is pressing.
+func (c *Click) Pressed() bool {
+ return c.pressed
+}
+
+// Events returns the next click events, if any.
+func (c *Click) Events(q event.Queue) []ClickEvent {
+ var events []ClickEvent
+ for _, evt := range q.Events(c) {
+ e, ok := evt.(pointer.Event)
+ if !ok {
+ continue
+ }
+ switch e.Type {
+ case pointer.Release:
+ if !c.pressed || c.pid != e.PointerID {
+ break
+ }
+ c.pressed = false
+ if c.entered {
+ if e.Time-c.clickedAt < doubleClickDuration {
+ c.clicks++
+ } else {
+ c.clicks = 1
+ }
+ c.clickedAt = e.Time
+ events = append(events, ClickEvent{Type: TypeClick, Position: e.Position, Source: e.Source, Modifiers: e.Modifiers, NumClicks: c.clicks})
+ } else {
+ events = append(events, ClickEvent{Type: TypeCancel})
+ }
+ case pointer.Cancel:
+ wasPressed := c.pressed
+ c.pressed = false
+ c.entered = false
+ if wasPressed {
+ events = append(events, ClickEvent{Type: TypeCancel})
+ }
+ case pointer.Press:
+ if c.pressed {
+ break
+ }
+ if e.Source == pointer.Mouse && e.Buttons != pointer.ButtonPrimary {
+ break
+ }
+ if !c.entered {
+ c.pid = e.PointerID
+ }
+ if c.pid != e.PointerID {
+ break
+ }
+ c.pressed = true
+ events = append(events, ClickEvent{Type: TypePress, Position: e.Position, Source: e.Source, Modifiers: e.Modifiers})
+ case pointer.Leave:
+ if !c.pressed {
+ c.pid = e.PointerID
+ }
+ if c.pid == e.PointerID {
+ c.entered = false
+ }
+ case pointer.Enter:
+ if !c.pressed {
+ c.pid = e.PointerID
+ }
+ if c.pid == e.PointerID {
+ c.entered = true
+ }
+ }
+ }
+ return events
+}
+
+func (ClickEvent) ImplementsEvent() {}
+
+// Add the handler to the operation list to receive scroll events.
+func (s *Scroll) Add(ops *op.Ops, bounds image.Rectangle) {
+ oph := pointer.InputOp{
+ Tag: s,
+ Grab: s.grab,
+ Types: pointer.Press | pointer.Drag | pointer.Release | pointer.Scroll,
+ ScrollBounds: bounds,
+ }
+ oph.Add(ops)
+ if s.flinger.Active() {
+ op.InvalidateOp{}.Add(ops)
+ }
+}
+
+// Stop any remaining fling movement.
+func (s *Scroll) Stop() {
+ s.flinger = fling.Animation{}
+}
+
+// Scroll detects the scrolling distance from the available events and
+// ongoing fling gestures.
+func (s *Scroll) Scroll(cfg unit.Metric, q event.Queue, t time.Time, axis Axis) int {
+ if s.axis != axis {
+ s.axis = axis
+ return 0
+ }
+ total := 0
+ for _, evt := range q.Events(s) {
+ e, ok := evt.(pointer.Event)
+ if !ok {
+ continue
+ }
+ switch e.Type {
+ case pointer.Press:
+ if s.dragging {
+ break
+ }
+ // Only scroll on touch drags, or on Android where mice
+ // drags also scroll by convention.
+ if e.Source != pointer.Touch && runtime.GOOS != "android" {
+ break
+ }
+ s.Stop()
+ s.estimator = fling.Extrapolation{}
+ v := s.val(e.Position)
+ s.last = int(math.Round(float64(v)))
+ s.estimator.Sample(e.Time, v)
+ s.dragging = true
+ s.pid = e.PointerID
+ case pointer.Release:
+ if s.pid != e.PointerID {
+ break
+ }
+ fling := s.estimator.Estimate()
+ if slop, d := float32(cfg.Px(touchSlop)), fling.Distance; d < -slop || d > slop {
+ s.flinger.Start(cfg, t, fling.Velocity)
+ }
+ fallthrough
+ case pointer.Cancel:
+ s.dragging = false
+ s.grab = false
+ case pointer.Scroll:
+ switch s.axis {
+ case Horizontal:
+ s.scroll += e.Scroll.X
+ case Vertical:
+ s.scroll += e.Scroll.Y
+ }
+ iscroll := int(s.scroll)
+ s.scroll -= float32(iscroll)
+ total += iscroll
+ case pointer.Drag:
+ if !s.dragging || s.pid != e.PointerID {
+ continue
+ }
+ val := s.val(e.Position)
+ s.estimator.Sample(e.Time, val)
+ v := int(math.Round(float64(val)))
+ dist := s.last - v
+ if e.Priority < pointer.Grabbed {
+ slop := cfg.Px(touchSlop)
+ if dist := dist; dist >= slop || -slop >= dist {
+ s.grab = true
+ }
+ } else {
+ s.last = v
+ total += dist
+ }
+ }
+ }
+ total += s.flinger.Tick(t)
+ return total
+}
+
+func (s *Scroll) val(p f32.Point) float32 {
+ if s.axis == Horizontal {
+ return p.X
+ } else {
+ return p.Y
+ }
+}
+
+// State reports the scroll state.
+func (s *Scroll) State() ScrollState {
+ switch {
+ case s.flinger.Active():
+ return StateFlinging
+ case s.dragging:
+ return StateDragging
+ default:
+ return StateIdle
+ }
+}
+
+// Add the handler to the operation list to receive drag events.
+func (d *Drag) Add(ops *op.Ops) {
+ pointer.InputOp{
+ Tag: d,
+ Grab: d.grab,
+ Types: pointer.Press | pointer.Drag | pointer.Release,
+ }.Add(ops)
+}
+
+// Events returns the next drag events, if any.
+func (d *Drag) Events(cfg unit.Metric, q event.Queue, axis Axis) []pointer.Event {
+ var events []pointer.Event
+ for _, e := range q.Events(d) {
+ e, ok := e.(pointer.Event)
+ if !ok {
+ continue
+ }
+
+ switch e.Type {
+ case pointer.Press:
+ if !(e.Buttons == pointer.ButtonPrimary || e.Source == pointer.Touch) {
+ continue
+ }
+ d.pressed = true
+ if d.dragging {
+ continue
+ }
+ d.dragging = true
+ d.pid = e.PointerID
+ d.start = e.Position
+ case pointer.Drag:
+ if !d.dragging || e.PointerID != d.pid {
+ continue
+ }
+ switch axis {
+ case Horizontal:
+ e.Position.Y = d.start.Y
+ case Vertical:
+ e.Position.X = d.start.X
+ case Both:
+ // Do nothing
+ }
+ if e.Priority < pointer.Grabbed {
+ diff := e.Position.Sub(d.start)
+ slop := cfg.Px(touchSlop)
+ if diff.X*diff.X+diff.Y*diff.Y > float32(slop*slop) {
+ d.grab = true
+ }
+ }
+ case pointer.Release, pointer.Cancel:
+ d.pressed = false
+ if !d.dragging || e.PointerID != d.pid {
+ continue
+ }
+ d.dragging = false
+ d.grab = false
+ }
+
+ events = append(events, e)
+ }
+
+ return events
+}
+
+// Dragging reports whether it is currently in use.
+func (d *Drag) Dragging() bool { return d.dragging }
+
+// Pressed returns whether a pointer is pressing.
+func (d *Drag) Pressed() bool { return d.pressed }
+
+func (a Axis) String() string {
+ switch a {
+ case Horizontal:
+ return "Horizontal"
+ case Vertical:
+ return "Vertical"
+ default:
+ panic("invalid Axis")
+ }
+}
+
+func (ct ClickType) String() string {
+ switch ct {
+ case TypePress:
+ return "TypePress"
+ case TypeClick:
+ return "TypeClick"
+ case TypeCancel:
+ return "TypeCancel"
+ default:
+ panic("invalid ClickType")
+ }
+}
+
+func (s ScrollState) String() string {
+ switch s {
+ case StateIdle:
+ return "StateIdle"
+ case StateDragging:
+ return "StateDragging"
+ case StateFlinging:
+ return "StateFlinging"
+ default:
+ panic("unreachable")
+ }
+}
diff --git a/vendor/gioui.org/gpu/api.go b/vendor/gioui.org/gpu/api.go
new file mode 100644
index 0000000..d347e5a
--- /dev/null
+++ b/vendor/gioui.org/gpu/api.go
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package gpu
+
+import "gioui.org/gpu/internal/driver"
+
+// An API carries the necessary GPU API specific resources to create a Device.
+// There is an API type for each supported GPU API such as OpenGL and Direct3D.
+type API = driver.API
+
+// A RenderTarget denotes the destination framebuffer for a frame.
+type RenderTarget = driver.RenderTarget
+
+// OpenGLRenderTarget is a render target suitable for the OpenGL backend.
+type OpenGLRenderTarget = driver.OpenGLRenderTarget
+
+// Direct3D11RenderTarget is a render target suitable for the Direct3D 11 backend.
+type Direct3D11RenderTarget = driver.Direct3D11RenderTarget
+
+// MetalRenderTarget is a render target suitable for the Metal backend.
+type MetalRenderTarget = driver.MetalRenderTarget
+
+// VulkanRenderTarget is a render target suitable for the Vulkan backend.
+type VulkanRenderTarget = driver.VulkanRenderTarget
+
+// OpenGL denotes the OpenGL or OpenGL ES API.
+type OpenGL = driver.OpenGL
+
+// Direct3D11 denotes the Direct3D API.
+type Direct3D11 = driver.Direct3D11
+
+// Metal denotes the Apple Metal API.
+type Metal = driver.Metal
+
+// Vulkan denotes the Vulkan API.
+type Vulkan = driver.Vulkan
+
+// ErrDeviceLost is returned from GPU operations when the underlying GPU device
+// is lost and should be recreated.
+var ErrDeviceLost = driver.ErrDeviceLost
diff --git a/vendor/gioui.org/gpu/caches.go b/vendor/gioui.org/gpu/caches.go
new file mode 100644
index 0000000..c6a71bc
--- /dev/null
+++ b/vendor/gioui.org/gpu/caches.go
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package gpu
+
+import (
+ "fmt"
+
+ "gioui.org/f32"
+)
+
+type resourceCache struct {
+ res map[interface{}]resource
+ newRes map[interface{}]resource
+}
+
+// opCache is like a resourceCache but using concrete types and a
+// freelist instead of two maps to avoid runtime.mapaccess2 calls
+// since benchmarking showed them as a bottleneck.
+type opCache struct {
+ // store the index + 1 in cache this key is stored in
+ index map[opKey]int
+ // list of indexes in cache that are free and can be used
+ freelist []int
+ cache []opCacheValue
+}
+
+type opCacheValue struct {
+ data pathData
+
+ bounds f32.Rectangle
+ // the fields below are handled by opCache
+ key opKey
+ keep bool
+}
+
+func newResourceCache() *resourceCache {
+ return &resourceCache{
+ res: make(map[interface{}]resource),
+ newRes: make(map[interface{}]resource),
+ }
+}
+
+func (r *resourceCache) get(key interface{}) (resource, bool) {
+ v, exists := r.res[key]
+ if exists {
+ r.newRes[key] = v
+ }
+ return v, exists
+}
+
+func (r *resourceCache) put(key interface{}, val resource) {
+ if _, exists := r.newRes[key]; exists {
+ panic(fmt.Errorf("key exists, %p", key))
+ }
+ r.res[key] = val
+ r.newRes[key] = val
+}
+
+func (r *resourceCache) frame() {
+ for k, v := range r.res {
+ if _, exists := r.newRes[k]; !exists {
+ delete(r.res, k)
+ v.release()
+ }
+ }
+ for k, v := range r.newRes {
+ delete(r.newRes, k)
+ r.res[k] = v
+ }
+}
+
+func (r *resourceCache) release() {
+ r.frame()
+ for _, v := range r.res {
+ v.release()
+ }
+ r.newRes = nil
+ r.res = nil
+}
+
+func newOpCache() *opCache {
+ return &opCache{
+ index: make(map[opKey]int),
+ freelist: make([]int, 0),
+ cache: make([]opCacheValue, 0),
+ }
+}
+
+func (r *opCache) get(key opKey) (o opCacheValue, exist bool) {
+ v := r.index[key]
+ if v == 0 {
+ return
+ }
+ r.cache[v-1].keep = true
+ return r.cache[v-1], true
+}
+
+func (r *opCache) put(key opKey, val opCacheValue) {
+ v := r.index[key]
+ val.keep = true
+ val.key = key
+ if v == 0 {
+ // not in cache
+ i := len(r.cache)
+ if len(r.freelist) > 0 {
+ i = r.freelist[len(r.freelist)-1]
+ r.freelist = r.freelist[:len(r.freelist)-1]
+ r.cache[i] = val
+ } else {
+ r.cache = append(r.cache, val)
+ }
+ r.index[key] = i + 1
+ } else {
+ r.cache[v-1] = val
+ }
+}
+
+func (r *opCache) frame() {
+ r.freelist = r.freelist[:0]
+ for i, v := range r.cache {
+ r.cache[i].keep = false
+ if v.keep {
+ continue
+ }
+ if v.data.data != nil {
+ v.data.release()
+ r.cache[i].data.data = nil
+ }
+ delete(r.index, v.key)
+ r.freelist = append(r.freelist, i)
+ }
+}
+
+func (r *opCache) release() {
+ for i := range r.cache {
+ r.cache[i].keep = false
+ }
+ r.frame()
+ r.index = nil
+ r.freelist = nil
+ r.cache = nil
+}
diff --git a/vendor/gioui.org/gpu/clip.go b/vendor/gioui.org/gpu/clip.go
new file mode 100644
index 0000000..8ae25b5
--- /dev/null
+++ b/vendor/gioui.org/gpu/clip.go
@@ -0,0 +1,114 @@
+package gpu
+
+import (
+ "gioui.org/f32"
+ "gioui.org/internal/stroke"
+)
+
+type quadSplitter struct {
+ bounds f32.Rectangle
+ contour uint32
+ d *drawOps
+}
+
+func encodeQuadTo(data []byte, meta uint32, from, ctrl, to f32.Point) {
+ // NW.
+ encodeVertex(data, meta, -1, 1, from, ctrl, to)
+ // NE.
+ encodeVertex(data[vertStride:], meta, 1, 1, from, ctrl, to)
+ // SW.
+ encodeVertex(data[vertStride*2:], meta, -1, -1, from, ctrl, to)
+ // SE.
+ encodeVertex(data[vertStride*3:], meta, 1, -1, from, ctrl, to)
+}
+
+func encodeVertex(data []byte, meta uint32, cornerx, cornery int16, from, ctrl, to f32.Point) {
+ var corner float32
+ if cornerx == 1 {
+ corner += .5
+ }
+ if cornery == 1 {
+ corner += .25
+ }
+ v := vertex{
+ Corner: corner,
+ FromX: from.X,
+ FromY: from.Y,
+ CtrlX: ctrl.X,
+ CtrlY: ctrl.Y,
+ ToX: to.X,
+ ToY: to.Y,
+ }
+ v.encode(data, meta)
+}
+
+func (qs *quadSplitter) encodeQuadTo(from, ctrl, to f32.Point) {
+ data := qs.d.writeVertCache(vertStride * 4)
+ encodeQuadTo(data, qs.contour, from, ctrl, to)
+}
+
+func (qs *quadSplitter) splitAndEncode(quad stroke.QuadSegment) {
+ cbnd := f32.Rectangle{
+ Min: quad.From,
+ Max: quad.To,
+ }.Canon()
+ from, ctrl, to := quad.From, quad.Ctrl, quad.To
+
+ // If the curve contain areas where a vertical line
+ // intersects it twice, split the curve in two x monotone
+ // lower and upper curves. The stencil fragment program
+ // expects only one intersection per curve.
+
+ // Find the t where the derivative in x is 0.
+ v0 := ctrl.Sub(from)
+ v1 := to.Sub(ctrl)
+ d := v0.X - v1.X
+ // t = v0 / d. Split if t is in ]0;1[.
+ if v0.X > 0 && d > v0.X || v0.X < 0 && d < v0.X {
+ t := v0.X / d
+ ctrl0 := from.Mul(1 - t).Add(ctrl.Mul(t))
+ ctrl1 := ctrl.Mul(1 - t).Add(to.Mul(t))
+ mid := ctrl0.Mul(1 - t).Add(ctrl1.Mul(t))
+ qs.encodeQuadTo(from, ctrl0, mid)
+ qs.encodeQuadTo(mid, ctrl1, to)
+ if mid.X > cbnd.Max.X {
+ cbnd.Max.X = mid.X
+ }
+ if mid.X < cbnd.Min.X {
+ cbnd.Min.X = mid.X
+ }
+ } else {
+ qs.encodeQuadTo(from, ctrl, to)
+ }
+ // Find the y extremum, if any.
+ d = v0.Y - v1.Y
+ if v0.Y > 0 && d > v0.Y || v0.Y < 0 && d < v0.Y {
+ t := v0.Y / d
+ y := (1-t)*(1-t)*from.Y + 2*(1-t)*t*ctrl.Y + t*t*to.Y
+ if y > cbnd.Max.Y {
+ cbnd.Max.Y = y
+ }
+ if y < cbnd.Min.Y {
+ cbnd.Min.Y = y
+ }
+ }
+
+ qs.bounds = unionRect(qs.bounds, cbnd)
+}
+
+// Union is like f32.Rectangle.Union but ignores empty rectangles.
+func unionRect(r, s f32.Rectangle) f32.Rectangle {
+ if r.Min.X > s.Min.X {
+ r.Min.X = s.Min.X
+ }
+ if r.Min.Y > s.Min.Y {
+ r.Min.Y = s.Min.Y
+ }
+ if r.Max.X < s.Max.X {
+ r.Max.X = s.Max.X
+ }
+ if r.Max.Y < s.Max.Y {
+ r.Max.Y = s.Max.Y
+ }
+ return r
+}
diff --git a/vendor/gioui.org/gpu/compute.go b/vendor/gioui.org/gpu/compute.go
new file mode 100644
index 0000000..ce06add
--- /dev/null
+++ b/vendor/gioui.org/gpu/compute.go
@@ -0,0 +1,2215 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package gpu
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash/maphash"
+ "image"
+ "image/color"
+ "image/png"
+ "io/ioutil"
+ "math"
+ "math/bits"
+ "runtime"
+ "sort"
+ "time"
+ "unsafe"
+
+ "gioui.org/cpu"
+ "gioui.org/f32"
+ "gioui.org/gpu/internal/driver"
+ "gioui.org/internal/byteslice"
+ "gioui.org/internal/f32color"
+ "gioui.org/internal/ops"
+ "gioui.org/internal/scene"
+ "gioui.org/layout"
+ "gioui.org/op"
+ "gioui.org/shader"
+ "gioui.org/shader/gio"
+ "gioui.org/shader/piet"
+)
+
+type compute struct {
+ ctx driver.Device
+
+ collector collector
+ enc encoder
+ texOps []textureOp
+ viewport image.Point
+ maxTextureDim int
+ srgb bool
+ atlases []*textureAtlas
+ frameCount uint
+ moves []atlasMove
+
+ programs struct {
+ elements computeProgram
+ tileAlloc computeProgram
+ pathCoarse computeProgram
+ backdrop computeProgram
+ binning computeProgram
+ coarse computeProgram
+ kernel4 computeProgram
+ }
+ buffers struct {
+ config sizedBuffer
+ scene sizedBuffer
+ state sizedBuffer
+ memory sizedBuffer
+ }
+ output struct {
+ blitPipeline driver.Pipeline
+
+ buffer sizedBuffer
+
+ uniforms *copyUniforms
+ uniBuf driver.Buffer
+
+ layerVertices []layerVertex
+ descriptors *piet.Kernel4DescriptorSetLayout
+
+ nullMaterials driver.Texture
+ }
+ // imgAllocs maps imageOpData.handles to allocs.
+ imgAllocs map[interface{}]*atlasAlloc
+ // materials contains the pre-processed materials (transformed images for
+ // now, gradients etc. later) packed in a texture atlas. The atlas is used
+ // as source in kernel4.
+ materials struct {
+ // allocs maps texture ops the their atlases and FillImage offsets.
+ allocs map[textureKey]materialAlloc
+
+ pipeline driver.Pipeline
+ buffer sizedBuffer
+ quads []materialVertex
+ uniforms struct {
+ u *materialUniforms
+ buf driver.Buffer
+ }
+ }
+ timers struct {
+ profile string
+ t *timers
+ compact *timer
+ render *timer
+ blit *timer
+ }
+
+ // CPU fallback fields.
+ useCPU bool
+ dispatcher *dispatcher
+
+ // The following fields hold scratch space to avoid garbage.
+ zeroSlice []byte
+ memHeader *memoryHeader
+ conf *config
+}
+
+type materialAlloc struct {
+ alloc *atlasAlloc
+ offset image.Point
+}
+
+type layer struct {
+ rect image.Rectangle
+ alloc *atlasAlloc
+ ops []paintOp
+ materials *textureAtlas
+}
+
+type allocQuery struct {
+ atlas *textureAtlas
+ size image.Point
+ empty bool
+ format driver.TextureFormat
+ bindings driver.BufferBinding
+ nocompact bool
+}
+
+type atlasAlloc struct {
+ atlas *textureAtlas
+ rect image.Rectangle
+ cpu bool
+ dead bool
+ frameCount uint
+}
+
+type atlasMove struct {
+ src *textureAtlas
+ dstPos image.Point
+ srcRect image.Rectangle
+ cpu bool
+}
+
+type textureAtlas struct {
+ image driver.Texture
+ format driver.TextureFormat
+ bindings driver.BufferBinding
+ hasCPU bool
+ cpuImage cpu.ImageDescriptor
+ size image.Point
+ allocs []*atlasAlloc
+ packer packer
+ realized bool
+ lastFrame uint
+ compact bool
+}
+
+type copyUniforms struct {
+ scale [2]float32
+ pos [2]float32
+ uvScale [2]float32
+ _ [8]byte // Pad to 16 bytes.
+}
+
+type materialUniforms struct {
+ scale [2]float32
+ pos [2]float32
+ emulatesRGB float32
+ _ [12]byte // Pad to 16 bytes
+}
+
+type collector struct {
+ hasher maphash.Hash
+ profile bool
+ reader ops.Reader
+ states []f32.Affine2D
+ clear bool
+ clearColor f32color.RGBA
+ clipStates []clipState
+ order []hashIndex
+ transStack []transEntry
+ prevFrame opsCollector
+ frame opsCollector
+}
+
+type transEntry struct {
+ t f32.Affine2D
+ relTrans f32.Affine2D
+}
+
+type hashIndex struct {
+ index int
+ hash uint64
+}
+
+type opsCollector struct {
+ paths []byte
+ clipCmds []clipCmd
+ ops []paintOp
+ layers []layer
+}
+
+type paintOp struct {
+ clipStack []clipCmd
+ offset image.Point
+ state paintKey
+ intersect f32.Rectangle
+ hash uint64
+ layer int
+ texOpIdx int
+}
+
+// clipCmd describes a clipping command ready to be used for the compute
+// pipeline.
+type clipCmd struct {
+ // union of the bounds of the operations that are clipped.
+ union f32.Rectangle
+ state clipKey
+ path []byte
+ pathKey ops.Key
+ absBounds f32.Rectangle
+}
+
+type encoderState struct {
+ relTrans f32.Affine2D
+ clip *clipState
+
+ paintKey
+}
+
+// clipKey completely describes a clip operation (along with its path) and is appropriate
+// for hashing and equality checks.
+type clipKey struct {
+ bounds f32.Rectangle
+ strokeWidth float32
+ relTrans f32.Affine2D
+ pathHash uint64
+}
+
+// paintKey completely defines a paint operation. It is suitable for hashing and
+// equality checks.
+type paintKey struct {
+ t f32.Affine2D
+ matType materialType
+ // Current paint.ImageOp
+ image imageOpData
+ // Current paint.ColorOp, if any.
+ color color.NRGBA
+
+ // Current paint.LinearGradientOp.
+ stop1 f32.Point
+ stop2 f32.Point
+ color1 color.NRGBA
+ color2 color.NRGBA
+}
+
+type clipState struct {
+ absBounds f32.Rectangle
+ parent *clipState
+ path []byte
+ pathKey ops.Key
+ intersect f32.Rectangle
+ push bool
+
+ clipKey
+}
+
+type layerVertex struct {
+ posX, posY float32
+ u, v float32
+}
+
+// materialVertex describes a vertex of a quad used to render a transformed
+// material.
+type materialVertex struct {
+ posX, posY float32
+ u, v float32
+}
+
+// textureKey identifies textureOp.
+type textureKey struct {
+ handle interface{}
+ transform f32.Affine2D
+ bounds image.Rectangle
+}
+
+// textureOp represents an paintOp that requires texture space.
+type textureOp struct {
+ img imageOpData
+ key textureKey
+ // offset is the integer offset separated from key.transform to increase cache hit rate.
+ off image.Point
+ // matAlloc is the atlas placement for material.
+ matAlloc materialAlloc
+ // imgAlloc is the atlas placement for the source image
+ imgAlloc *atlasAlloc
+}
+
+type encoder struct {
+ scene []scene.Command
+ npath int
+ npathseg int
+ ntrans int
+}
+
+type encodeState struct {
+ trans f32.Affine2D
+ clip f32.Rectangle
+}
+
+// sizedBuffer holds a GPU buffer, or its equivalent CPU memory.
+type sizedBuffer struct {
+ size int
+ buffer driver.Buffer
+ // cpuBuf is initialized when useCPU is true.
+ cpuBuf cpu.BufferDescriptor
+}
+
+// computeProgram holds a compute program, or its equivalent CPU implementation.
+type computeProgram struct {
+ prog driver.Program
+
+ // CPU fields.
+ progInfo *cpu.ProgramInfo
+ descriptors unsafe.Pointer
+ buffers []*cpu.BufferDescriptor
+}
+
+// config matches Config in setup.h
+type config struct {
+ n_elements uint32 // paths
+ n_pathseg uint32
+ width_in_tiles uint32
+ height_in_tiles uint32
+ tile_alloc memAlloc
+ bin_alloc memAlloc
+ ptcl_alloc memAlloc
+ pathseg_alloc memAlloc
+ anno_alloc memAlloc
+ trans_alloc memAlloc
+}
+
+// memAlloc matches Alloc in mem.h
+type memAlloc struct {
+ offset uint32
+ //size uint32
+}
+
+// memoryHeader matches the header of Memory in mem.h.
+type memoryHeader struct {
+ mem_offset uint32
+ mem_error uint32
+}
+
+// rect is a oriented rectangle.
+type rectangle [4]f32.Point
+
+const (
+ layersBindings = driver.BufferBindingShaderStorageWrite | driver.BufferBindingTexture
+ materialsBindings = driver.BufferBindingFramebuffer | driver.BufferBindingShaderStorageRead
+ // Materials and layers can share texture storage if their bindings match.
+ combinedBindings = layersBindings | materialsBindings
+)
+
+// GPU structure sizes and constants.
+const (
+ tileWidthPx = 32
+ tileHeightPx = 32
+ ptclInitialAlloc = 1024
+ kernel4OutputUnit = 2
+ kernel4AtlasUnit = 3
+
+ pathSize = 12
+ binSize = 8
+ pathsegSize = 52
+ annoSize = 32
+ transSize = 24
+ stateSize = 60
+ stateStride = 4 + 2*stateSize
+)
+
+// mem.h constants.
+const (
+ memNoError = 0 // NO_ERROR
+ memMallocFailed = 1 // ERR_MALLOC_FAILED
+)
+
+func newCompute(ctx driver.Device) (*compute, error) {
+ caps := ctx.Caps()
+ maxDim := caps.MaxTextureSize
+ // Large atlas textures cause artifacts due to precision loss in
+ // shaders.
+ if cap := 8192; maxDim > cap {
+ maxDim = cap
+ }
+ // The compute programs can only span 128x64 tiles. Limit to 64 for now, and leave the
+ // complexity of a rectangular limit for later.
+ if computeCap := 4096; maxDim > computeCap {
+ maxDim = computeCap
+ }
+ g := &compute{
+ ctx: ctx,
+ maxTextureDim: maxDim,
+ srgb: caps.Features.Has(driver.FeatureSRGB),
+ conf: new(config),
+ memHeader: new(memoryHeader),
+ }
+ null, err := ctx.NewTexture(driver.TextureFormatRGBA8, 1, 1, driver.FilterNearest, driver.FilterNearest, driver.BufferBindingShaderStorageRead)
+ if err != nil {
+ g.Release()
+ return nil, err
+ }
+ g.output.nullMaterials = null
+ shaders := []struct {
+ prog *computeProgram
+ src shader.Sources
+ info *cpu.ProgramInfo
+ }{
+ {&g.programs.elements, piet.Shader_elements_comp, piet.ElementsProgramInfo},
+ {&g.programs.tileAlloc, piet.Shader_tile_alloc_comp, piet.Tile_allocProgramInfo},
+ {&g.programs.pathCoarse, piet.Shader_path_coarse_comp, piet.Path_coarseProgramInfo},
+ {&g.programs.backdrop, piet.Shader_backdrop_comp, piet.BackdropProgramInfo},
+ {&g.programs.binning, piet.Shader_binning_comp, piet.BinningProgramInfo},
+ {&g.programs.coarse, piet.Shader_coarse_comp, piet.CoarseProgramInfo},
+ {&g.programs.kernel4, piet.Shader_kernel4_comp, piet.Kernel4ProgramInfo},
+ }
+ if !caps.Features.Has(driver.FeatureCompute) {
+ if !cpu.Supported {
+ return nil, errors.New("gpu: missing support for compute programs")
+ }
+ g.useCPU = true
+ }
+ if g.useCPU {
+ g.dispatcher = newDispatcher(runtime.NumCPU())
+ }
+
+ copyVert, copyFrag, err := newShaders(ctx, gio.Shader_copy_vert, gio.Shader_copy_frag)
+ if err != nil {
+ g.Release()
+ return nil, err
+ }
+ defer copyVert.Release()
+ defer copyFrag.Release()
+ pipe, err := ctx.NewPipeline(driver.PipelineDesc{
+ VertexShader: copyVert,
+ FragmentShader: copyFrag,
+ VertexLayout: driver.VertexLayout{
+ Inputs: []driver.InputDesc{
+ {Type: shader.DataTypeFloat, Size: 2, Offset: 0},
+ {Type: shader.DataTypeFloat, Size: 2, Offset: 4 * 2},
+ },
+ Stride: int(unsafe.Sizeof(g.output.layerVertices[0])),
+ },
+ PixelFormat: driver.TextureFormatOutput,
+ BlendDesc: driver.BlendDesc{
+ Enable: true,
+ SrcFactor: driver.BlendFactorOne,
+ DstFactor: driver.BlendFactorOneMinusSrcAlpha,
+ },
+ Topology: driver.TopologyTriangles,
+ })
+ if err != nil {
+ g.Release()
+ return nil, err
+ }
+ g.output.blitPipeline = pipe
+ g.output.uniforms = new(copyUniforms)
+
+ buf, err := ctx.NewBuffer(driver.BufferBindingUniforms, int(unsafe.Sizeof(*g.output.uniforms)))
+ if err != nil {
+ g.Release()
+ return nil, err
+ }
+ g.output.uniBuf = buf
+
+ materialVert, materialFrag, err := newShaders(ctx, gio.Shader_material_vert, gio.Shader_material_frag)
+ if err != nil {
+ g.Release()
+ return nil, err
+ }
+ defer materialVert.Release()
+ defer materialFrag.Release()
+ pipe, err = ctx.NewPipeline(driver.PipelineDesc{
+ VertexShader: materialVert,
+ FragmentShader: materialFrag,
+ VertexLayout: driver.VertexLayout{
+ Inputs: []driver.InputDesc{
+ {Type: shader.DataTypeFloat, Size: 2, Offset: 0},
+ {Type: shader.DataTypeFloat, Size: 2, Offset: 4 * 2},
+ },
+ Stride: int(unsafe.Sizeof(g.materials.quads[0])),
+ },
+ PixelFormat: driver.TextureFormatRGBA8,
+ Topology: driver.TopologyTriangles,
+ })
+ if err != nil {
+ g.Release()
+ return nil, err
+ }
+ g.materials.pipeline = pipe
+ g.materials.uniforms.u = new(materialUniforms)
+
+ buf, err = ctx.NewBuffer(driver.BufferBindingUniforms, int(unsafe.Sizeof(*g.materials.uniforms.u)))
+ if err != nil {
+ g.Release()
+ return nil, err
+ }
+ g.materials.uniforms.buf = buf
+
+ for _, shader := range shaders {
+ if !g.useCPU {
+ p, err := ctx.NewComputeProgram(shader.src)
+ if err != nil {
+ g.Release()
+ return nil, err
+ }
+ shader.prog.prog = p
+ } else {
+ shader.prog.progInfo = shader.info
+ }
+ }
+ if g.useCPU {
+ {
+ desc := new(piet.ElementsDescriptorSetLayout)
+ g.programs.elements.descriptors = unsafe.Pointer(desc)
+ g.programs.elements.buffers = []*cpu.BufferDescriptor{desc.Binding0(), desc.Binding1(), desc.Binding2(), desc.Binding3()}
+ }
+ {
+ desc := new(piet.Tile_allocDescriptorSetLayout)
+ g.programs.tileAlloc.descriptors = unsafe.Pointer(desc)
+ g.programs.tileAlloc.buffers = []*cpu.BufferDescriptor{desc.Binding0(), desc.Binding1()}
+ }
+ {
+ desc := new(piet.Path_coarseDescriptorSetLayout)
+ g.programs.pathCoarse.descriptors = unsafe.Pointer(desc)
+ g.programs.pathCoarse.buffers = []*cpu.BufferDescriptor{desc.Binding0(), desc.Binding1()}
+ }
+ {
+ desc := new(piet.BackdropDescriptorSetLayout)
+ g.programs.backdrop.descriptors = unsafe.Pointer(desc)
+ g.programs.backdrop.buffers = []*cpu.BufferDescriptor{desc.Binding0(), desc.Binding1()}
+ }
+ {
+ desc := new(piet.BinningDescriptorSetLayout)
+ g.programs.binning.descriptors = unsafe.Pointer(desc)
+ g.programs.binning.buffers = []*cpu.BufferDescriptor{desc.Binding0(), desc.Binding1()}
+ }
+ {
+ desc := new(piet.CoarseDescriptorSetLayout)
+ g.programs.coarse.descriptors = unsafe.Pointer(desc)
+ g.programs.coarse.buffers = []*cpu.BufferDescriptor{desc.Binding0(), desc.Binding1()}
+ }
+ {
+ desc := new(piet.Kernel4DescriptorSetLayout)
+ g.programs.kernel4.descriptors = unsafe.Pointer(desc)
+ g.programs.kernel4.buffers = []*cpu.BufferDescriptor{desc.Binding0(), desc.Binding1()}
+ g.output.descriptors = desc
+ }
+ }
+ return g, nil
+}
+
+func newShaders(ctx driver.Device, vsrc, fsrc shader.Sources) (vert driver.VertexShader, frag driver.FragmentShader, err error) {
+ vert, err = ctx.NewVertexShader(vsrc)
+ if err != nil {
+ return
+ }
+ frag, err = ctx.NewFragmentShader(fsrc)
+ if err != nil {
+ vert.Release()
+ }
+ return
+}
+
+func (g *compute) Frame(frameOps *op.Ops, target RenderTarget, viewport image.Point) error {
+ g.frameCount++
+ g.collect(viewport, frameOps)
+ return g.frame(target)
+}
+
+func (g *compute) collect(viewport image.Point, ops *op.Ops) {
+ g.viewport = viewport
+ g.collector.reset()
+
+ g.texOps = g.texOps[:0]
+ g.collector.collect(ops, viewport, &g.texOps)
+}
+
+func (g *compute) Clear(col color.NRGBA) {
+ g.collector.clear = true
+ g.collector.clearColor = f32color.LinearFromSRGB(col)
+}
+
+func (g *compute) frame(target RenderTarget) error {
+ viewport := g.viewport
+ defFBO := g.ctx.BeginFrame(target, g.collector.clear, viewport)
+ defer g.ctx.EndFrame()
+
+ t := &g.timers
+ if g.collector.profile && t.t == nil && g.ctx.Caps().Features.Has(driver.FeatureTimers) {
+ t.t = newTimers(g.ctx)
+ t.compact = t.t.newTimer()
+ t.render = t.t.newTimer()
+ t.blit = t.t.newTimer()
+ }
+
+ if err := g.uploadImages(); err != nil {
+ return err
+ }
+ if err := g.renderMaterials(); err != nil {
+ return err
+ }
+ g.layer(viewport, g.texOps)
+ t.render.begin()
+ if err := g.renderLayers(viewport); err != nil {
+ return err
+ }
+ t.render.end()
+ d := driver.LoadDesc{
+ ClearColor: g.collector.clearColor,
+ }
+ if g.collector.clear {
+ g.collector.clear = false
+ d.Action = driver.LoadActionClear
+ }
+ t.blit.begin()
+ g.blitLayers(d, defFBO, viewport)
+ t.blit.end()
+ t.compact.begin()
+ if err := g.compactAllocs(); err != nil {
+ return err
+ }
+ t.compact.end()
+ if g.collector.profile && t.t.ready() {
+ com, ren, blit := t.compact.Elapsed, t.render.Elapsed, t.blit.Elapsed
+ ft := com + ren + blit
+ q := 100 * time.Microsecond
+ ft = ft.Round(q)
+ com, ren, blit = com.Round(q), ren.Round(q), blit.Round(q)
+ t.profile = fmt.Sprintf("ft:%7s com: %7s ren:%7s blit:%7s", ft, com, ren, blit)
+ }
+ return nil
+}
+
+func (g *compute) dumpAtlases() {
+ for i, a := range g.atlases {
+ dump, err := driver.DownloadImage(g.ctx, a.image, image.Rectangle{Max: a.size})
+ if err != nil {
+ panic(err)
+ }
+ nrgba := image.NewNRGBA(dump.Bounds())
+ bnd := dump.Bounds()
+ for x := bnd.Min.X; x < bnd.Max.X; x++ {
+ for y := bnd.Min.Y; y < bnd.Max.Y; y++ {
+ nrgba.SetNRGBA(x, y, f32color.RGBAToNRGBA(dump.RGBAAt(x, y)))
+ }
+ }
+ var buf bytes.Buffer
+ if err := png.Encode(&buf, nrgba); err != nil {
+ panic(err)
+ }
+ if err := ioutil.WriteFile(fmt.Sprintf("dump-%d.png", i), buf.Bytes(), 0600); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func (g *compute) Profile() string {
+ return g.timers.profile
+}
+
+func (g *compute) compactAllocs() error {
+ const (
+ maxAllocAge = 3
+ maxAtlasAge = 10
+ )
+ atlases := g.atlases
+ for _, a := range atlases {
+ if len(a.allocs) > 0 && g.frameCount-a.lastFrame > maxAtlasAge {
+ a.compact = true
+ }
+ }
+ for len(atlases) > 0 {
+ var (
+ dstAtlas *textureAtlas
+ format driver.TextureFormat
+ bindings driver.BufferBinding
+ )
+ g.moves = g.moves[:0]
+ addedLayers := false
+ useCPU := false
+ fill:
+ for len(atlases) > 0 {
+ srcAtlas := atlases[0]
+ allocs := srcAtlas.allocs
+ if !srcAtlas.compact {
+ atlases = atlases[1:]
+ continue
+ }
+ if addedLayers && (format != srcAtlas.format || srcAtlas.bindings&bindings != srcAtlas.bindings) {
+ break
+ }
+ format = srcAtlas.format
+ bindings = srcAtlas.bindings
+ for len(srcAtlas.allocs) > 0 {
+ a := srcAtlas.allocs[0]
+ n := len(srcAtlas.allocs)
+ if g.frameCount-a.frameCount > maxAllocAge {
+ a.dead = true
+ srcAtlas.allocs[0] = srcAtlas.allocs[n-1]
+ srcAtlas.allocs = srcAtlas.allocs[:n-1]
+ continue
+ }
+ size := a.rect.Size()
+ alloc, fits := g.atlasAlloc(allocQuery{
+ atlas: dstAtlas,
+ size: size,
+ format: format,
+ bindings: bindings,
+ nocompact: true,
+ })
+ if !fits {
+ break fill
+ }
+ dstAtlas = alloc.atlas
+ allocs = append(allocs, a)
+ addedLayers = true
+ useCPU = useCPU || a.cpu
+ dstAtlas.allocs = append(dstAtlas.allocs, a)
+ pos := alloc.rect.Min
+ g.moves = append(g.moves, atlasMove{
+ src: srcAtlas, dstPos: pos, srcRect: a.rect, cpu: a.cpu,
+ })
+ a.atlas = dstAtlas
+ a.rect = image.Rectangle{Min: pos, Max: pos.Add(a.rect.Size())}
+ srcAtlas.allocs[0] = srcAtlas.allocs[n-1]
+ srcAtlas.allocs = srcAtlas.allocs[:n-1]
+ }
+ srcAtlas.compact = false
+ srcAtlas.realized = false
+ srcAtlas.packer.clear()
+ srcAtlas.packer.newPage()
+ srcAtlas.packer.maxDims = image.Pt(g.maxTextureDim, g.maxTextureDim)
+ atlases = atlases[1:]
+ }
+ if !addedLayers {
+ break
+ }
+ outputSize := dstAtlas.packer.sizes[0]
+ if err := g.realizeAtlas(dstAtlas, useCPU, outputSize); err != nil {
+ return err
+ }
+ for _, move := range g.moves {
+ if !move.cpu {
+ g.ctx.CopyTexture(dstAtlas.image, move.dstPos, move.src.image, move.srcRect)
+ } else {
+ src := move.src.cpuImage.Data()
+ dst := dstAtlas.cpuImage.Data()
+ sstride := move.src.size.X * 4
+ dstride := dstAtlas.size.X * 4
+ copyImage(dst, dstride, move.dstPos, src, sstride, move.srcRect)
+ }
+ }
+ }
+ for i := len(g.atlases) - 1; i >= 0; i-- {
+ a := g.atlases[i]
+ if len(a.allocs) == 0 && g.frameCount-a.lastFrame > maxAtlasAge {
+ a.Release()
+ n := len(g.atlases)
+ g.atlases[i] = g.atlases[n-1]
+ g.atlases = g.atlases[:n-1]
+ }
+ }
+ return nil
+}
+
+func copyImage(dst []byte, dstStride int, dstPos image.Point, src []byte, srcStride int, srcRect image.Rectangle) {
+ sz := srcRect.Size()
+ soff := srcRect.Min.Y*srcStride + srcRect.Min.X*4
+ doff := dstPos.Y*dstStride + dstPos.X*4
+ rowLen := sz.X * 4
+ for y := 0; y < sz.Y; y++ {
+ srow := src[soff : soff+rowLen]
+ drow := dst[doff : doff+rowLen]
+ copy(drow, srow)
+ soff += srcStride
+ doff += dstStride
+ }
+}
+
+func (g *compute) renderLayers(viewport image.Point) error {
+ layers := g.collector.frame.layers
+ for len(layers) > 0 {
+ var materials, dst *textureAtlas
+ addedLayers := false
+ g.enc.reset()
+ for len(layers) > 0 {
+ l := &layers[0]
+ if l.alloc != nil {
+ layers = layers[1:]
+ continue
+ }
+ if materials != nil {
+ if l.materials != nil && materials != l.materials {
+ // Only one materials texture per compute pass.
+ break
+ }
+ } else {
+ materials = l.materials
+ }
+ size := l.rect.Size()
+ alloc, fits := g.atlasAlloc(allocQuery{
+ atlas: dst,
+ empty: true,
+ format: driver.TextureFormatRGBA8,
+ bindings: combinedBindings,
+ // Pad to avoid overlap.
+ size: size.Add(image.Pt(1, 1)),
+ })
+ if !fits {
+ // Only one output atlas per compute pass.
+ break
+ }
+ dst = alloc.atlas
+ dst.compact = true
+ addedLayers = true
+ l.alloc = &alloc
+ dst.allocs = append(dst.allocs, l.alloc)
+ encodeLayer(*l, alloc.rect.Min, viewport, &g.enc, g.texOps)
+ layers = layers[1:]
+ }
+ if !addedLayers {
+ break
+ }
+ outputSize := dst.packer.sizes[0]
+ tileDims := image.Point{
+ X: (outputSize.X + tileWidthPx - 1) / tileWidthPx,
+ Y: (outputSize.Y + tileHeightPx - 1) / tileHeightPx,
+ }
+ w, h := tileDims.X*tileWidthPx, tileDims.Y*tileHeightPx
+ if err := g.realizeAtlas(dst, g.useCPU, image.Pt(w, h)); err != nil {
+ return err
+ }
+ if err := g.render(materials, dst.image, dst.cpuImage, tileDims, dst.size.X*4); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (g *compute) blitLayers(d driver.LoadDesc, fbo driver.Texture, viewport image.Point) {
+ layers := g.collector.frame.layers
+ g.output.layerVertices = g.output.layerVertices[:0]
+ for _, l := range layers {
+ placef := layout.FPt(l.alloc.rect.Min)
+ sizef := layout.FPt(l.rect.Size())
+ r := layout.FRect(l.rect)
+ quad := [4]layerVertex{
+ {posX: float32(r.Min.X), posY: float32(r.Min.Y), u: placef.X, v: placef.Y},
+ {posX: float32(r.Max.X), posY: float32(r.Min.Y), u: placef.X + sizef.X, v: placef.Y},
+ {posX: float32(r.Max.X), posY: float32(r.Max.Y), u: placef.X + sizef.X, v: placef.Y + sizef.Y},
+ {posX: float32(r.Min.X), posY: float32(r.Max.Y), u: placef.X, v: placef.Y + sizef.Y},
+ }
+ g.output.layerVertices = append(g.output.layerVertices, quad[0], quad[1], quad[3], quad[3], quad[2], quad[1])
+ g.ctx.PrepareTexture(l.alloc.atlas.image)
+ }
+ if len(g.output.layerVertices) > 0 {
+ vertexData := byteslice.Slice(g.output.layerVertices)
+ g.output.buffer.ensureCapacity(false, g.ctx, driver.BufferBindingVertices, len(vertexData))
+ g.output.buffer.buffer.Upload(vertexData)
+ }
+ g.ctx.BeginRenderPass(fbo, d)
+ defer g.ctx.EndRenderPass()
+ if len(layers) == 0 {
+ return
+ }
+ g.ctx.Viewport(0, 0, viewport.X, viewport.Y)
+ g.ctx.BindPipeline(g.output.blitPipeline)
+ g.ctx.BindVertexBuffer(g.output.buffer.buffer, 0)
+ start := 0
+ for len(layers) > 0 {
+ count := 0
+ atlas := layers[0].alloc.atlas
+ for len(layers) > 0 {
+ l := layers[0]
+ if l.alloc.atlas != atlas {
+ break
+ }
+ layers = layers[1:]
+ const verticesPerQuad = 6
+ count += verticesPerQuad
+ }
+
+ // Transform positions to clip space: [-1, -1] - [1, 1], and texture
+ // coordinates to texture space: [0, 0] - [1, 1].
+ clip := f32.Affine2D{}.Scale(f32.Pt(0, 0), f32.Pt(2/float32(viewport.X), 2/float32(viewport.Y))).Offset(f32.Pt(-1, -1))
+ sx, _, ox, _, sy, oy := clip.Elems()
+ g.output.uniforms.scale = [2]float32{sx, sy}
+ g.output.uniforms.pos = [2]float32{ox, oy}
+ g.output.uniforms.uvScale = [2]float32{1 / float32(atlas.size.X), 1 / float32(atlas.size.Y)}
+ g.output.uniBuf.Upload(byteslice.Struct(g.output.uniforms))
+ g.ctx.BindUniforms(g.output.uniBuf)
+ g.ctx.BindTexture(0, atlas.image)
+ g.ctx.DrawArrays(start, count)
+ start += count
+ }
+}
+
+func (g *compute) renderMaterials() error {
+ m := &g.materials
+ for k, place := range m.allocs {
+ if place.alloc.dead {
+ delete(m.allocs, k)
+ }
+ }
+ texOps := g.texOps
+ for len(texOps) > 0 {
+ m.quads = m.quads[:0]
+ var (
+ atlas *textureAtlas
+ imgAtlas *textureAtlas
+ )
+ // A material is clipped to avoid drawing outside its atlas bounds.
+ // However, imprecision in the clipping may cause a single pixel
+ // overflow.
+ var padding = image.Pt(1, 1)
+ var allocStart int
+ for len(texOps) > 0 {
+ op := &texOps[0]
+ if a, exists := m.allocs[op.key]; exists {
+ g.touchAlloc(a.alloc)
+ op.matAlloc = a
+ texOps = texOps[1:]
+ continue
+ }
+
+ if imgAtlas != nil && op.imgAlloc.atlas != imgAtlas {
+ // Only one image atlas per render pass.
+ break
+ }
+ imgAtlas = op.imgAlloc.atlas
+ quad := g.materialQuad(imgAtlas.size, op.key.transform, op.img, op.imgAlloc.rect.Min)
+ boundsf := quadBounds(quad)
+ bounds := boundRectF(boundsf)
+ bounds = bounds.Intersect(op.key.bounds)
+
+ size := bounds.Size()
+ alloc, fits := g.atlasAlloc(allocQuery{
+ atlas: atlas,
+ size: size.Add(padding),
+ format: driver.TextureFormatRGBA8,
+ bindings: combinedBindings,
+ })
+ if !fits {
+ break
+ }
+ if atlas == nil {
+ allocStart = len(alloc.atlas.allocs)
+ }
+ atlas = alloc.atlas
+ alloc.cpu = g.useCPU
+ offsetf := layout.FPt(bounds.Min.Mul(-1))
+ scale := f32.Pt(float32(size.X), float32(size.Y))
+ for i := range quad {
+ // Position quad to match place.
+ quad[i].posX += offsetf.X
+ quad[i].posY += offsetf.Y
+ // Scale to match viewport [0, 1].
+ quad[i].posX /= scale.X
+ quad[i].posY /= scale.Y
+ }
+ // Draw quad as two triangles.
+ m.quads = append(m.quads, quad[0], quad[1], quad[3], quad[3], quad[1], quad[2])
+ if m.allocs == nil {
+ m.allocs = make(map[textureKey]materialAlloc)
+ }
+ atlasAlloc := materialAlloc{
+ alloc: &alloc,
+ offset: bounds.Min.Mul(-1),
+ }
+ atlas.allocs = append(atlas.allocs, atlasAlloc.alloc)
+ m.allocs[op.key] = atlasAlloc
+ op.matAlloc = atlasAlloc
+ texOps = texOps[1:]
+ }
+ if len(m.quads) == 0 {
+ break
+ }
+ realized := atlas.realized
+ if err := g.realizeAtlas(atlas, g.useCPU, atlas.packer.sizes[0]); err != nil {
+ return err
+ }
+ // Transform to clip space: [-1, -1] - [1, 1].
+ *m.uniforms.u = materialUniforms{
+ scale: [2]float32{2, 2},
+ pos: [2]float32{-1, -1},
+ }
+ if !g.srgb {
+ m.uniforms.u.emulatesRGB = 1.0
+ }
+ m.uniforms.buf.Upload(byteslice.Struct(m.uniforms.u))
+ vertexData := byteslice.Slice(m.quads)
+ n := pow2Ceil(len(vertexData))
+ m.buffer.ensureCapacity(false, g.ctx, driver.BufferBindingVertices, n)
+ m.buffer.buffer.Upload(vertexData)
+ var d driver.LoadDesc
+ if !realized {
+ d.Action = driver.LoadActionClear
+ }
+ g.ctx.PrepareTexture(imgAtlas.image)
+ g.ctx.BeginRenderPass(atlas.image, d)
+ g.ctx.BindTexture(0, imgAtlas.image)
+ g.ctx.BindPipeline(m.pipeline)
+ g.ctx.BindUniforms(m.uniforms.buf)
+ g.ctx.BindVertexBuffer(m.buffer.buffer, 0)
+ newAllocs := atlas.allocs[allocStart:]
+ for i, a := range newAllocs {
+ sz := a.rect.Size().Sub(padding)
+ g.ctx.Viewport(a.rect.Min.X, a.rect.Min.Y, sz.X, sz.Y)
+ g.ctx.DrawArrays(i*6, 6)
+ }
+ g.ctx.EndRenderPass()
+ if !g.useCPU {
+ continue
+ }
+ src := atlas.image
+ data := atlas.cpuImage.Data()
+ for _, a := range newAllocs {
+ stride := atlas.size.X * 4
+ col := a.rect.Min.X * 4
+ row := stride * a.rect.Min.Y
+ off := col + row
+ src.ReadPixels(a.rect, data[off:], stride)
+ }
+ }
+ return nil
+}
+
+func (g *compute) uploadImages() error {
+ for k, a := range g.imgAllocs {
+ if a.dead {
+ delete(g.imgAllocs, k)
+ }
+ }
+ type upload struct {
+ pos image.Point
+ img *image.RGBA
+ }
+ var uploads []upload
+ format := driver.TextureFormatSRGBA
+ if !g.srgb {
+ format = driver.TextureFormatRGBA8
+ }
+ // padding is the number of pixels added to the right and below
+ // images, to avoid atlas filtering artifacts.
+ const padding = 1
+ texOps := g.texOps
+ for len(texOps) > 0 {
+ uploads = uploads[:0]
+ var atlas *textureAtlas
+ for len(texOps) > 0 {
+ op := &texOps[0]
+ if a, exists := g.imgAllocs[op.img.handle]; exists {
+ g.touchAlloc(a)
+ op.imgAlloc = a
+ texOps = texOps[1:]
+ continue
+ }
+ size := op.img.src.Bounds().Size().Add(image.Pt(padding, padding))
+ alloc, fits := g.atlasAlloc(allocQuery{
+ atlas: atlas,
+ size: size,
+ format: format,
+ bindings: driver.BufferBindingTexture | driver.BufferBindingFramebuffer,
+ })
+ if !fits {
+ break
+ }
+ atlas = alloc.atlas
+ if g.imgAllocs == nil {
+ g.imgAllocs = make(map[interface{}]*atlasAlloc)
+ }
+ op.imgAlloc = &alloc
+ atlas.allocs = append(atlas.allocs, op.imgAlloc)
+ g.imgAllocs[op.img.handle] = op.imgAlloc
+ uploads = append(uploads, upload{pos: alloc.rect.Min, img: op.img.src})
+ texOps = texOps[1:]
+ }
+ if len(uploads) == 0 {
+ break
+ }
+ if err := g.realizeAtlas(atlas, false, atlas.packer.sizes[0]); err != nil {
+ return err
+ }
+ for _, u := range uploads {
+ size := u.img.Bounds().Size()
+ driver.UploadImage(atlas.image, u.pos, u.img)
+ rightPadding := image.Pt(padding, size.Y)
+ atlas.image.Upload(image.Pt(u.pos.X+size.X, u.pos.Y), rightPadding, g.zeros(rightPadding.X*rightPadding.Y*4), 0)
+ bottomPadding := image.Pt(size.X, padding)
+ atlas.image.Upload(image.Pt(u.pos.X, u.pos.Y+size.Y), bottomPadding, g.zeros(bottomPadding.X*bottomPadding.Y*4), 0)
+ }
+ }
+ return nil
+}
+
+func pow2Ceil(v int) int {
+ exp := bits.Len(uint(v))
+ if bits.OnesCount(uint(v)) == 1 {
+ exp--
+ }
+ return 1 << exp
+}
+
+// materialQuad constructs a quad that represents the transformed image. It returns the quad
+// and its bounds.
+func (g *compute) materialQuad(imgAtlasSize image.Point, M f32.Affine2D, img imageOpData, uvPos image.Point) [4]materialVertex {
+ imgSize := layout.FPt(img.src.Bounds().Size())
+ sx, hx, ox, hy, sy, oy := M.Elems()
+ transOff := f32.Pt(ox, oy)
+ // The 4 corners of the image rectangle transformed by M, excluding its offset, are:
+ //
+ // q0: M * (0, 0) q3: M * (w, 0)
+ // q1: M * (0, h) q2: M * (w, h)
+ //
+ // Note that q0 = M*0 = 0, q2 = q1 + q3.
+ q0 := f32.Pt(0, 0)
+ q1 := f32.Pt(hx*imgSize.Y, sy*imgSize.Y)
+ q3 := f32.Pt(sx*imgSize.X, hy*imgSize.X)
+ q2 := q1.Add(q3)
+ q0 = q0.Add(transOff)
+ q1 = q1.Add(transOff)
+ q2 = q2.Add(transOff)
+ q3 = q3.Add(transOff)
+
+ uvPosf := layout.FPt(uvPos)
+ atlasScale := f32.Pt(1/float32(imgAtlasSize.X), 1/float32(imgAtlasSize.Y))
+ uvBounds := f32.Rectangle{
+ Min: uvPosf,
+ Max: uvPosf.Add(imgSize),
+ }
+ uvBounds.Min.X *= atlasScale.X
+ uvBounds.Min.Y *= atlasScale.Y
+ uvBounds.Max.X *= atlasScale.X
+ uvBounds.Max.Y *= atlasScale.Y
+ quad := [4]materialVertex{
+ {posX: q0.X, posY: q0.Y, u: uvBounds.Min.X, v: uvBounds.Min.Y},
+ {posX: q1.X, posY: q1.Y, u: uvBounds.Min.X, v: uvBounds.Max.Y},
+ {posX: q2.X, posY: q2.Y, u: uvBounds.Max.X, v: uvBounds.Max.Y},
+ {posX: q3.X, posY: q3.Y, u: uvBounds.Max.X, v: uvBounds.Min.Y},
+ }
+ return quad
+}
+
+func quadBounds(q [4]materialVertex) f32.Rectangle {
+ q0 := f32.Pt(q[0].posX, q[0].posY)
+ q1 := f32.Pt(q[1].posX, q[1].posY)
+ q2 := f32.Pt(q[2].posX, q[2].posY)
+ q3 := f32.Pt(q[3].posX, q[3].posY)
+ return f32.Rectangle{
+ Min: min(min(q0, q1), min(q2, q3)),
+ Max: max(max(q0, q1), max(q2, q3)),
+ }
+}
+
+func max(p1, p2 f32.Point) f32.Point {
+ p := p1
+ if p2.X > p.X {
+ p.X = p2.X
+ }
+ if p2.Y > p.Y {
+ p.Y = p2.Y
+ }
+ return p
+}
+
+func min(p1, p2 f32.Point) f32.Point {
+ p := p1
+ if p2.X < p.X {
+ p.X = p2.X
+ }
+ if p2.Y < p.Y {
+ p.Y = p2.Y
+ }
+ return p
+}
+
+func (enc *encoder) encodePath(verts []byte) {
+ for len(verts) >= scene.CommandSize+4 {
+ cmd := ops.DecodeCommand(verts[4:])
+ enc.scene = append(enc.scene, cmd)
+ enc.npathseg++
+ verts = verts[scene.CommandSize+4:]
+ }
+}
+
+func (g *compute) render(images *textureAtlas, dst driver.Texture, cpuDst cpu.ImageDescriptor, tileDims image.Point, stride int) error {
+ const (
+ // wgSize is the largest and most common workgroup size.
+ wgSize = 128
+ // PARTITION_SIZE from elements.comp
+ partitionSize = 32 * 4
+ )
+ widthInBins := (tileDims.X + 15) / 16
+ heightInBins := (tileDims.Y + 7) / 8
+ if widthInBins*heightInBins > wgSize {
+ return fmt.Errorf("gpu: output too large (%dx%d)", tileDims.X*tileWidthPx, tileDims.Y*tileHeightPx)
+ }
+
+ enc := &g.enc
+ // Pad scene with zeroes to avoid reading garbage in elements.comp.
+ scenePadding := partitionSize - len(enc.scene)%partitionSize
+ enc.scene = append(enc.scene, make([]scene.Command, scenePadding)...)
+
+ scene := byteslice.Slice(enc.scene)
+ if s := len(scene); s > g.buffers.scene.size {
+ paddedCap := s * 11 / 10
+ if err := g.buffers.scene.ensureCapacity(g.useCPU, g.ctx, driver.BufferBindingShaderStorageRead, paddedCap); err != nil {
+ return err
+ }
+ }
+ g.buffers.scene.upload(scene)
+
+ // alloc is the number of allocated bytes for static buffers.
+ var alloc uint32
+ round := func(v, quantum int) int {
+ return (v + quantum - 1) &^ (quantum - 1)
+ }
+ malloc := func(size int) memAlloc {
+ size = round(size, 4)
+ offset := alloc
+ alloc += uint32(size)
+ return memAlloc{offset /*, uint32(size)*/}
+ }
+
+ *g.conf = config{
+ n_elements: uint32(enc.npath),
+ n_pathseg: uint32(enc.npathseg),
+ width_in_tiles: uint32(tileDims.X),
+ height_in_tiles: uint32(tileDims.Y),
+ tile_alloc: malloc(enc.npath * pathSize),
+ bin_alloc: malloc(round(enc.npath, wgSize) * binSize),
+ ptcl_alloc: malloc(tileDims.X * tileDims.Y * ptclInitialAlloc),
+ pathseg_alloc: malloc(enc.npathseg * pathsegSize),
+ anno_alloc: malloc(enc.npath * annoSize),
+ trans_alloc: malloc(enc.ntrans * transSize),
+ }
+
+ numPartitions := (enc.numElements() + 127) / 128
+ // clearSize is the atomic partition counter plus flag and 2 states per partition.
+ clearSize := 4 + numPartitions*stateStride
+ if clearSize > g.buffers.state.size {
+ paddedCap := clearSize * 11 / 10
+ if err := g.buffers.state.ensureCapacity(g.useCPU, g.ctx, driver.BufferBindingShaderStorageRead|driver.BufferBindingShaderStorageWrite, paddedCap); err != nil {
+ return err
+ }
+ }
+
+ confData := byteslice.Struct(g.conf)
+ g.buffers.config.ensureCapacity(g.useCPU, g.ctx, driver.BufferBindingShaderStorageRead, len(confData))
+ g.buffers.config.upload(confData)
+
+ minSize := int(unsafe.Sizeof(memoryHeader{})) + int(alloc)
+ if minSize > g.buffers.memory.size {
+ // Add space for dynamic GPU allocations.
+ const sizeBump = 4 * 1024 * 1024
+ minSize += sizeBump
+ if err := g.buffers.memory.ensureCapacity(g.useCPU, g.ctx, driver.BufferBindingShaderStorageRead|driver.BufferBindingShaderStorageWrite, minSize); err != nil {
+ return err
+ }
+ }
+
+ for {
+ *g.memHeader = memoryHeader{
+ mem_offset: alloc,
+ }
+ g.buffers.memory.upload(byteslice.Struct(g.memHeader))
+ g.buffers.state.upload(g.zeros(clearSize))
+
+ if !g.useCPU {
+ g.ctx.BeginCompute()
+ g.ctx.BindImageTexture(kernel4OutputUnit, dst)
+ img := g.output.nullMaterials
+ if images != nil {
+ img = images.image
+ }
+ g.ctx.BindImageTexture(kernel4AtlasUnit, img)
+ } else {
+ *g.output.descriptors.Binding2() = cpuDst
+ if images != nil {
+ *g.output.descriptors.Binding3() = images.cpuImage
+ }
+ }
+
+ g.bindBuffers()
+ g.memoryBarrier()
+ g.dispatch(g.programs.elements, numPartitions, 1, 1)
+ g.memoryBarrier()
+ g.dispatch(g.programs.tileAlloc, (enc.npath+wgSize-1)/wgSize, 1, 1)
+ g.memoryBarrier()
+ g.dispatch(g.programs.pathCoarse, (enc.npathseg+31)/32, 1, 1)
+ g.memoryBarrier()
+ g.dispatch(g.programs.backdrop, (enc.npath+wgSize-1)/wgSize, 1, 1)
+ // No barrier needed between backdrop and binning.
+ g.dispatch(g.programs.binning, (enc.npath+wgSize-1)/wgSize, 1, 1)
+ g.memoryBarrier()
+ g.dispatch(g.programs.coarse, widthInBins, heightInBins, 1)
+ g.memoryBarrier()
+ g.dispatch(g.programs.kernel4, tileDims.X, tileDims.Y, 1)
+ g.memoryBarrier()
+ if !g.useCPU {
+ g.ctx.EndCompute()
+ } else {
+ g.dispatcher.Sync()
+ }
+
+ if err := g.buffers.memory.download(byteslice.Struct(g.memHeader)); err != nil {
+ if err == driver.ErrContentLost {
+ continue
+ }
+ return err
+ }
+ switch errCode := g.memHeader.mem_error; errCode {
+ case memNoError:
+ if g.useCPU {
+ w, h := tileDims.X*tileWidthPx, tileDims.Y*tileHeightPx
+ dst.Upload(image.Pt(0, 0), image.Pt(w, h), cpuDst.Data(), stride)
+ }
+ return nil
+ case memMallocFailed:
+ // Resize memory and try again.
+ sz := g.buffers.memory.size * 15 / 10
+ if err := g.buffers.memory.ensureCapacity(g.useCPU, g.ctx, driver.BufferBindingShaderStorageRead|driver.BufferBindingShaderStorageWrite, sz); err != nil {
+ return err
+ }
+ continue
+ default:
+ return fmt.Errorf("compute: shader program failed with error %d", errCode)
+ }
+ }
+}
+
+func (g *compute) memoryBarrier() {
+ if g.useCPU {
+ g.dispatcher.Barrier()
+ }
+}
+
+func (g *compute) dispatch(p computeProgram, x, y, z int) {
+ if !g.useCPU {
+ g.ctx.BindProgram(p.prog)
+ g.ctx.DispatchCompute(x, y, z)
+ } else {
+ g.dispatcher.Dispatch(p.progInfo, p.descriptors, x, y, z)
+ }
+}
+
+// zeros returns a byte slice with size bytes of zeros.
+func (g *compute) zeros(size int) []byte {
+ if cap(g.zeroSlice) < size {
+ g.zeroSlice = append(g.zeroSlice, make([]byte, size)...)
+ }
+ return g.zeroSlice[:size]
+}
+
+func (g *compute) touchAlloc(a *atlasAlloc) {
+ if a.dead {
+ panic("re-use of dead allocation")
+ }
+ a.frameCount = g.frameCount
+ a.atlas.lastFrame = a.frameCount
+}
+
+func (g *compute) atlasAlloc(q allocQuery) (atlasAlloc, bool) {
+ var (
+ place placement
+ fits bool
+ atlas = q.atlas
+ )
+ if atlas != nil {
+ place, fits = atlas.packer.tryAdd(q.size)
+ if !fits {
+ atlas.compact = true
+ }
+ }
+ if atlas == nil {
+ // Look for matching atlas to re-use.
+ for _, a := range g.atlases {
+ if q.empty && len(a.allocs) > 0 {
+ continue
+ }
+ if q.nocompact && a.compact {
+ continue
+ }
+ if a.format != q.format || a.bindings&q.bindings != q.bindings {
+ continue
+ }
+ place, fits = a.packer.tryAdd(q.size)
+ if !fits {
+ a.compact = true
+ continue
+ }
+ atlas = a
+ break
+ }
+ }
+ if atlas == nil {
+ atlas = &textureAtlas{
+ format: q.format,
+ bindings: q.bindings,
+ }
+ atlas.packer.maxDims = image.Pt(g.maxTextureDim, g.maxTextureDim)
+ atlas.packer.newPage()
+ g.atlases = append(g.atlases, atlas)
+ place, fits = atlas.packer.tryAdd(q.size)
+ if !fits {
+ panic(fmt.Errorf("compute: atlas allocation too large (%v)", q.size))
+ }
+ }
+ if !fits {
+ return atlasAlloc{}, false
+ }
+ atlas.lastFrame = g.frameCount
+ return atlasAlloc{
+ frameCount: g.frameCount,
+ atlas: atlas,
+ rect: image.Rectangle{Min: place.Pos, Max: place.Pos.Add(q.size)},
+ }, true
+}
+
+func (g *compute) realizeAtlas(atlas *textureAtlas, useCPU bool, size image.Point) error {
+ defer func() {
+ atlas.packer.maxDims = atlas.size
+ atlas.realized = true
+ atlas.ensureCPUImage(useCPU)
+ }()
+ if atlas.size.X >= size.X && atlas.size.Y >= size.Y {
+ return nil
+ }
+ if atlas.realized {
+ panic("resizing a realized atlas")
+ }
+ if err := atlas.resize(g.ctx, size); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (a *textureAtlas) resize(ctx driver.Device, size image.Point) error {
+ a.Release()
+
+ img, err := ctx.NewTexture(a.format, size.X, size.Y,
+ driver.FilterNearest,
+ driver.FilterNearest,
+ a.bindings)
+ if err != nil {
+ return err
+ }
+ a.image = img
+ a.size = size
+ return nil
+}
+
+func (a *textureAtlas) ensureCPUImage(useCPU bool) {
+ if !useCPU || a.hasCPU {
+ return
+ }
+ a.hasCPU = true
+ a.cpuImage = cpu.NewImageRGBA(a.size.X, a.size.Y)
+}
+
+func (g *compute) Release() {
+ if g.useCPU {
+ g.dispatcher.Stop()
+ }
+ type resource interface {
+ Release()
+ }
+ res := []resource{
+ g.output.nullMaterials,
+ &g.programs.elements,
+ &g.programs.tileAlloc,
+ &g.programs.pathCoarse,
+ &g.programs.backdrop,
+ &g.programs.binning,
+ &g.programs.coarse,
+ &g.programs.kernel4,
+ g.output.blitPipeline,
+ &g.output.buffer,
+ g.output.uniBuf,
+ &g.buffers.scene,
+ &g.buffers.state,
+ &g.buffers.memory,
+ &g.buffers.config,
+ g.materials.pipeline,
+ &g.materials.buffer,
+ g.materials.uniforms.buf,
+ g.timers.t,
+ }
+ for _, r := range res {
+ if r != nil {
+ r.Release()
+ }
+ }
+ for _, a := range g.atlases {
+ a.Release()
+ }
+ g.ctx.Release()
+ *g = compute{}
+}
+
+func (a *textureAtlas) Release() {
+ if a.image != nil {
+ a.image.Release()
+ a.image = nil
+ }
+ a.cpuImage.Free()
+ a.hasCPU = false
+}
+
+func (g *compute) bindBuffers() {
+ g.bindStorageBuffers(g.programs.elements, g.buffers.memory, g.buffers.config, g.buffers.scene, g.buffers.state)
+ g.bindStorageBuffers(g.programs.tileAlloc, g.buffers.memory, g.buffers.config)
+ g.bindStorageBuffers(g.programs.pathCoarse, g.buffers.memory, g.buffers.config)
+ g.bindStorageBuffers(g.programs.backdrop, g.buffers.memory, g.buffers.config)
+ g.bindStorageBuffers(g.programs.binning, g.buffers.memory, g.buffers.config)
+ g.bindStorageBuffers(g.programs.coarse, g.buffers.memory, g.buffers.config)
+ g.bindStorageBuffers(g.programs.kernel4, g.buffers.memory, g.buffers.config)
+}
+
+func (p *computeProgram) Release() {
+ if p.prog != nil {
+ p.prog.Release()
+ }
+ *p = computeProgram{}
+}
+
+func (b *sizedBuffer) Release() {
+ if b.buffer != nil {
+ b.buffer.Release()
+ }
+ b.cpuBuf.Free()
+ *b = sizedBuffer{}
+}
+
+func (b *sizedBuffer) ensureCapacity(useCPU bool, ctx driver.Device, binding driver.BufferBinding, size int) error {
+ if b.size >= size {
+ return nil
+ }
+ if b.buffer != nil {
+ b.Release()
+ }
+ b.cpuBuf.Free()
+ if !useCPU {
+ buf, err := ctx.NewBuffer(binding, size)
+ if err != nil {
+ return err
+ }
+ b.buffer = buf
+ } else {
+ b.cpuBuf = cpu.NewBuffer(size)
+ }
+ b.size = size
+ return nil
+}
+
+func (b *sizedBuffer) download(data []byte) error {
+ if b.buffer != nil {
+ return b.buffer.Download(data)
+ } else {
+ copy(data, b.cpuBuf.Data())
+ return nil
+ }
+}
+
+func (b *sizedBuffer) upload(data []byte) {
+ if b.buffer != nil {
+ b.buffer.Upload(data)
+ } else {
+ copy(b.cpuBuf.Data(), data)
+ }
+}
+
+func (g *compute) bindStorageBuffers(prog computeProgram, buffers ...sizedBuffer) {
+ for i, buf := range buffers {
+ if !g.useCPU {
+ g.ctx.BindStorageBuffer(i, buf.buffer)
+ } else {
+ *prog.buffers[i] = buf.cpuBuf
+ }
+ }
+}
+
+var bo = binary.LittleEndian
+
+func (e *encoder) reset() {
+ e.scene = e.scene[:0]
+ e.npath = 0
+ e.npathseg = 0
+ e.ntrans = 0
+}
+
+func (e *encoder) numElements() int {
+ return len(e.scene)
+}
+
+func (e *encoder) append(e2 encoder) {
+ e.scene = append(e.scene, e2.scene...)
+ e.npath += e2.npath
+ e.npathseg += e2.npathseg
+ e.ntrans += e2.ntrans
+}
+
+func (e *encoder) transform(m f32.Affine2D) {
+ e.scene = append(e.scene, scene.Transform(m))
+ e.ntrans++
+}
+
+func (e *encoder) lineWidth(width float32) {
+ e.scene = append(e.scene, scene.SetLineWidth(width))
+}
+
+func (e *encoder) fillMode(mode scene.FillMode) {
+ e.scene = append(e.scene, scene.SetFillMode(mode))
+}
+
+func (e *encoder) beginClip(bbox f32.Rectangle) {
+ e.scene = append(e.scene, scene.BeginClip(bbox))
+ e.npath++
+}
+
+func (e *encoder) endClip(bbox f32.Rectangle) {
+ e.scene = append(e.scene, scene.EndClip(bbox))
+ e.npath++
+}
+
+func (e *encoder) rect(r f32.Rectangle) {
+ // Rectangle corners, clock-wise.
+ c0, c1, c2, c3 := r.Min, f32.Pt(r.Min.X, r.Max.Y), r.Max, f32.Pt(r.Max.X, r.Min.Y)
+ e.line(c0, c1)
+ e.line(c1, c2)
+ e.line(c2, c3)
+ e.line(c3, c0)
+}
+
+func (e *encoder) fillColor(col color.RGBA) {
+ e.scene = append(e.scene, scene.FillColor(col))
+ e.npath++
+}
+
+func (e *encoder) fillImage(index int, offset image.Point) {
+ e.scene = append(e.scene, scene.FillImage(index, offset))
+ e.npath++
+}
+
+func (e *encoder) line(start, end f32.Point) {
+ e.scene = append(e.scene, scene.Line(start, end))
+ e.npathseg++
+}
+
+func (e *encoder) quad(start, ctrl, end f32.Point) {
+ e.scene = append(e.scene, scene.Quad(start, ctrl, end))
+ e.npathseg++
+}
+
+func (c *collector) reset() {
+ c.prevFrame, c.frame = c.frame, c.prevFrame
+ c.profile = false
+ c.clipStates = c.clipStates[:0]
+ c.transStack = c.transStack[:0]
+ c.frame.reset()
+}
+
+func (c *opsCollector) reset() {
+ c.paths = c.paths[:0]
+ c.clipCmds = c.clipCmds[:0]
+ c.ops = c.ops[:0]
+ c.layers = c.layers[:0]
+}
+
+func (c *collector) addClip(state *encoderState, viewport, bounds f32.Rectangle, path []byte, key ops.Key, hash uint64, strokeWidth float32, push bool) {
+ // Rectangle clip regions.
+ if len(path) == 0 && !push {
+ // If the rectangular clip region contains a previous path it can be discarded.
+ p := state.clip
+ t := state.relTrans.Invert()
+ for p != nil {
+ // rect is the parent bounds transformed relative to the rectangle.
+ rect := transformBounds(t, p.bounds)
+ if rect.In(bounds) {
+ return
+ }
+ t = p.relTrans.Invert().Mul(t)
+ p = p.parent
+ }
+ }
+
+ absBounds := transformBounds(state.t, bounds).Bounds()
+ intersect := absBounds
+ if state.clip != nil {
+ intersect = state.clip.intersect.Intersect(intersect)
+ }
+ c.clipStates = append(c.clipStates, clipState{
+ parent: state.clip,
+ absBounds: absBounds,
+ path: path,
+ pathKey: key,
+ intersect: intersect,
+ clipKey: clipKey{
+ bounds: bounds,
+ relTrans: state.relTrans,
+ strokeWidth: strokeWidth,
+ pathHash: hash,
+ },
+ })
+ state.clip = &c.clipStates[len(c.clipStates)-1]
+ state.relTrans = f32.Affine2D{}
+}
+
+func (c *collector) collect(root *op.Ops, viewport image.Point, texOps *[]textureOp) {
+ fview := f32.Rectangle{Max: layout.FPt(viewport)}
+ var intOps *ops.Ops
+ if root != nil {
+ intOps = &root.Internal
+ }
+ c.reader.Reset(intOps)
+ var state encoderState
+ reset := func() {
+ state = encoderState{
+ paintKey: paintKey{
+ color: color.NRGBA{A: 0xff},
+ },
+ }
+ }
+ reset()
+ r := &c.reader
+ var (
+ pathData struct {
+ data []byte
+ key ops.Key
+ hash uint64
+ }
+ strWidth float32
+ )
+ c.addClip(&state, fview, fview, nil, ops.Key{}, 0, 0, false)
+ for encOp, ok := r.Decode(); ok; encOp, ok = r.Decode() {
+ switch ops.OpType(encOp.Data[0]) {
+ case ops.TypeProfile:
+ c.profile = true
+ case ops.TypeTransform:
+ dop, push := ops.DecodeTransform(encOp.Data)
+ if push {
+ c.transStack = append(c.transStack, transEntry{t: state.t, relTrans: state.relTrans})
+ }
+ state.t = state.t.Mul(dop)
+ state.relTrans = state.relTrans.Mul(dop)
+ case ops.TypePopTransform:
+ n := len(c.transStack)
+ st := c.transStack[n-1]
+ c.transStack = c.transStack[:n-1]
+ state.t = st.t
+ state.relTrans = st.relTrans
+ case ops.TypeStroke:
+ strWidth = decodeStrokeOp(encOp.Data)
+ case ops.TypePath:
+ hash := bo.Uint64(encOp.Data[1:])
+ encOp, ok = r.Decode()
+ if !ok {
+ panic("unexpected end of path operation")
+ }
+ pathData.data = encOp.Data[ops.TypeAuxLen:]
+ pathData.key = encOp.Key
+ pathData.hash = hash
+ case ops.TypeClip:
+ var op ops.ClipOp
+ op.Decode(encOp.Data)
+ bounds := layout.FRect(op.Bounds)
+ c.addClip(&state, fview, bounds, pathData.data, pathData.key, pathData.hash, strWidth, true)
+ pathData.data = nil
+ strWidth = 0
+ case ops.TypePopClip:
+ state.relTrans = state.clip.relTrans.Mul(state.relTrans)
+ state.clip = state.clip.parent
+ case ops.TypeColor:
+ state.matType = materialColor
+ state.color = decodeColorOp(encOp.Data)
+ case ops.TypeLinearGradient:
+ state.matType = materialLinearGradient
+ op := decodeLinearGradientOp(encOp.Data)
+ state.stop1 = op.stop1
+ state.stop2 = op.stop2
+ state.color1 = op.color1
+ state.color2 = op.color2
+ case ops.TypeImage:
+ state.matType = materialTexture
+ state.image = decodeImageOp(encOp.Data, encOp.Refs)
+ case ops.TypePaint:
+ paintState := state
+ if paintState.matType == materialTexture {
+ // Clip to the bounds of the image, to hide other images in the atlas.
+ sz := state.image.src.Rect.Size()
+ bounds := f32.Rectangle{Max: layout.FPt(sz)}
+ c.addClip(&paintState, fview, bounds, nil, ops.Key{}, 0, 0, false)
+ }
+ intersect := paintState.clip.intersect
+ if intersect.Empty() {
+ break
+ }
+
+ // If the paint is a uniform opaque color that takes up the whole
+ // screen, it covers all previous paints and we can discard all
+ // rendering commands recorded so far.
+ if paintState.clip == nil && paintState.matType == materialColor && paintState.color.A == 255 {
+ c.clearColor = f32color.LinearFromSRGB(paintState.color).Opaque()
+ c.clear = true
+ c.frame.reset()
+ break
+ }
+
+ // Flatten clip stack.
+ p := paintState.clip
+ startIdx := len(c.frame.clipCmds)
+ for p != nil {
+ idx := len(c.frame.paths)
+ c.frame.paths = append(c.frame.paths, make([]byte, len(p.path))...)
+ path := c.frame.paths[idx:]
+ copy(path, p.path)
+ c.frame.clipCmds = append(c.frame.clipCmds, clipCmd{
+ state: p.clipKey,
+ path: path,
+ pathKey: p.pathKey,
+ absBounds: p.absBounds,
+ })
+ p = p.parent
+ }
+ clipStack := c.frame.clipCmds[startIdx:]
+ c.frame.ops = append(c.frame.ops, paintOp{
+ clipStack: clipStack,
+ state: paintState.paintKey,
+ intersect: intersect,
+ })
+ case ops.TypeSave:
+ id := ops.DecodeSave(encOp.Data)
+ c.save(id, state.t)
+ case ops.TypeLoad:
+ reset()
+ id := ops.DecodeLoad(encOp.Data)
+ state.t = c.states[id]
+ state.relTrans = state.t
+ }
+ }
+ for i := range c.frame.ops {
+ op := &c.frame.ops[i]
+ // For each clip, cull rectangular clip regions that contain its
+ // (transformed) bounds. addClip already handled the converse case.
+ // TODO: do better than O(n²) to efficiently deal with deep stacks.
+ for j := 0; j < len(op.clipStack)-1; j++ {
+ cl := op.clipStack[j]
+ p := cl.state
+ r := transformBounds(p.relTrans, p.bounds)
+ for k := j + 1; k < len(op.clipStack); k++ {
+ cl2 := op.clipStack[k]
+ p2 := cl2.state
+ if len(cl2.path) == 0 && r.In(cl2.state.bounds) {
+ op.clipStack = append(op.clipStack[:k], op.clipStack[k+1:]...)
+ k--
+ op.clipStack[k].state.relTrans = p2.relTrans.Mul(op.clipStack[k].state.relTrans)
+ }
+ r = transformRect(p2.relTrans, r)
+ }
+ }
+ // Separate the integer offset from the first transform. Two ops that differ
+ // only in integer offsets may share backing storage.
+ if len(op.clipStack) > 0 {
+ c := &op.clipStack[len(op.clipStack)-1]
+ t := c.state.relTrans
+ t, off := separateTransform(t)
+ c.state.relTrans = t
+ op.offset = off
+ op.state.t = op.state.t.Offset(layout.FPt(off.Mul(-1)))
+ }
+ op.hash = c.hashOp(*op)
+ op.texOpIdx = -1
+ switch op.state.matType {
+ case materialTexture:
+ op.texOpIdx = len(*texOps)
+ // Separate integer offset from transformation. TextureOps that have identical transforms
+ // except for their integer offsets can share a transformed image.
+ t := op.state.t.Offset(layout.FPt(op.offset))
+ t, off := separateTransform(t)
+ bounds := boundRectF(op.intersect).Sub(off)
+ *texOps = append(*texOps, textureOp{
+ img: op.state.image,
+ off: off,
+ key: textureKey{
+ bounds: bounds,
+ transform: t,
+ handle: op.state.image.handle,
+ },
+ })
+ }
+ }
+}
+
+func (c *collector) hashOp(op paintOp) uint64 {
+ c.hasher.Reset()
+ for _, cl := range op.clipStack {
+ k := cl.state
+ keyBytes := (*[unsafe.Sizeof(k)]byte)(unsafe.Pointer(unsafe.Pointer(&k)))
+ c.hasher.Write(keyBytes[:])
+ }
+ k := op.state
+ keyBytes := (*[unsafe.Sizeof(k)]byte)(unsafe.Pointer(unsafe.Pointer(&k)))
+ c.hasher.Write(keyBytes[:])
+ return c.hasher.Sum64()
+}
+
+func (g *compute) layer(viewport image.Point, texOps []textureOp) {
+ // Sort ops from previous frames by hash.
+ c := &g.collector
+ prevOps := c.prevFrame.ops
+ c.order = c.order[:0]
+ for i, op := range prevOps {
+ c.order = append(c.order, hashIndex{
+ index: i,
+ hash: op.hash,
+ })
+ }
+ sort.Slice(c.order, func(i, j int) bool {
+ return c.order[i].hash < c.order[j].hash
+ })
+ // Split layers with different materials atlas; the compute stage has only
+ // one materials slot.
+ splitLayer := func(ops []paintOp, prevLayerIdx int) {
+ for len(ops) > 0 {
+ var materials *textureAtlas
+ idx := 0
+ for idx < len(ops) {
+ if i := ops[idx].texOpIdx; i != -1 {
+ omats := texOps[i].matAlloc.alloc.atlas
+ if materials != nil && omats != nil && omats != materials {
+ break
+ }
+ materials = omats
+ }
+ idx++
+ }
+ l := layer{ops: ops[:idx], materials: materials}
+ if prevLayerIdx != -1 {
+ prev := c.prevFrame.layers[prevLayerIdx]
+ if !prev.alloc.dead && len(prev.ops) == len(l.ops) {
+ l.alloc = prev.alloc
+ l.materials = prev.materials
+ g.touchAlloc(l.alloc)
+ }
+ }
+ for i, op := range l.ops {
+ l.rect = l.rect.Union(boundRectF(op.intersect))
+ l.ops[i].layer = len(c.frame.layers)
+ }
+ c.frame.layers = append(c.frame.layers, l)
+ ops = ops[idx:]
+ }
+ }
+ ops := c.frame.ops
+ idx := 0
+ for idx < len(ops) {
+ op := ops[idx]
+ // Search for longest matching op sequence.
+ // start is the earliest index of a match.
+ start := searchOp(c.order, op.hash)
+ layerOps, prevLayerIdx := longestLayer(prevOps, c.order[start:], ops[idx:])
+ if len(layerOps) == 0 {
+ idx++
+ continue
+ }
+ if unmatched := ops[:idx]; len(unmatched) > 0 {
+ // Flush layer of unmatched ops.
+ splitLayer(unmatched, -1)
+ ops = ops[idx:]
+ idx = 0
+ }
+ splitLayer(layerOps, prevLayerIdx)
+ ops = ops[len(layerOps):]
+ }
+ if len(ops) > 0 {
+ splitLayer(ops, -1)
+ }
+}
+
+func longestLayer(prev []paintOp, order []hashIndex, ops []paintOp) ([]paintOp, int) {
+ longest := 0
+ longestIdx := -1
+outer:
+ for len(order) > 0 {
+ first := order[0]
+ order = order[1:]
+ match := prev[first.index:]
+ // Potential match found. Now find longest matching sequence.
+ end := 0
+ layer := match[0].layer
+ off := match[0].offset.Sub(ops[0].offset)
+ for end < len(match) && end < len(ops) {
+ m := match[end]
+ o := ops[end]
+ // End layers on previous match.
+ if m.layer != layer {
+ break
+ }
+ // End layer when the next op doesn't match.
+ if m.hash != o.hash {
+ if end == 0 {
+ // Hashes are sorted so if the first op doesn't match, no
+ // more matches are possible.
+ break outer
+ }
+ break
+ }
+ if !opEqual(off, m, o) {
+ break
+ }
+ end++
+ }
+ if end > longest {
+ longest = end
+ longestIdx = layer
+
+ }
+ }
+ return ops[:longest], longestIdx
+}
+
+func searchOp(order []hashIndex, hash uint64) int {
+ lo, hi := 0, len(order)
+ for lo < hi {
+ mid := (lo + hi) / 2
+ if order[mid].hash < hash {
+ lo = mid + 1
+ } else {
+ hi = mid
+ }
+ }
+ return lo
+}
+
+func opEqual(off image.Point, o1 paintOp, o2 paintOp) bool {
+ if len(o1.clipStack) != len(o2.clipStack) {
+ return false
+ }
+ if o1.state != o2.state {
+ return false
+ }
+ if o1.offset.Sub(o2.offset) != off {
+ return false
+ }
+ for i, cl1 := range o1.clipStack {
+ cl2 := o2.clipStack[i]
+ if len(cl1.path) != len(cl2.path) {
+ return false
+ }
+ if cl1.state != cl2.state {
+ return false
+ }
+ if cl1.pathKey != cl2.pathKey && !bytes.Equal(cl1.path, cl2.path) {
+ return false
+ }
+ }
+ return true
+}
+
+func encodeLayer(l layer, pos image.Point, viewport image.Point, enc *encoder, texOps []textureOp) {
+ off := pos.Sub(l.rect.Min)
+ offf := layout.FPt(off)
+
+ enc.transform(f32.Affine2D{}.Offset(offf))
+ for _, op := range l.ops {
+ encodeOp(viewport, off, enc, texOps, op)
+ }
+ enc.transform(f32.Affine2D{}.Offset(offf.Mul(-1)))
+}
+
+func encodeOp(viewport image.Point, absOff image.Point, enc *encoder, texOps []textureOp, op paintOp) {
+ // Fill in clip bounds, which the shaders expect to be the union
+ // of all affected bounds.
+ var union f32.Rectangle
+ for i, cl := range op.clipStack {
+ union = union.Union(cl.absBounds)
+ op.clipStack[i].union = union
+ }
+
+ absOfff := layout.FPt(absOff)
+ fillMode := scene.FillModeNonzero
+ opOff := layout.FPt(op.offset)
+ inv := f32.Affine2D{}.Offset(opOff)
+ enc.transform(inv)
+ for i := len(op.clipStack) - 1; i >= 0; i-- {
+ cl := op.clipStack[i]
+ if w := cl.state.strokeWidth; w > 0 {
+ enc.fillMode(scene.FillModeStroke)
+ enc.lineWidth(w)
+ fillMode = scene.FillModeStroke
+ } else if fillMode != scene.FillModeNonzero {
+ enc.fillMode(scene.FillModeNonzero)
+ fillMode = scene.FillModeNonzero
+ }
+ enc.transform(cl.state.relTrans)
+ inv = inv.Mul(cl.state.relTrans)
+ if len(cl.path) == 0 {
+ enc.rect(cl.state.bounds)
+ } else {
+ enc.encodePath(cl.path)
+ }
+ if i != 0 {
+ enc.beginClip(cl.union.Add(absOfff))
+ }
+ }
+ if len(op.clipStack) == 0 {
+ // No clipping; fill the entire view.
+ enc.rect(f32.Rectangle{Max: layout.FPt(viewport)})
+ }
+
+ switch op.state.matType {
+ case materialTexture:
+ texOp := texOps[op.texOpIdx]
+ off := texOp.matAlloc.alloc.rect.Min.Add(texOp.matAlloc.offset).Sub(texOp.off).Sub(absOff)
+ enc.fillImage(0, off)
+ case materialColor:
+ enc.fillColor(f32color.NRGBAToRGBA(op.state.color))
+ case materialLinearGradient:
+ // TODO: implement.
+ enc.fillColor(f32color.NRGBAToRGBA(op.state.color1))
+ default:
+ panic("not implemented")
+ }
+ enc.transform(inv.Invert())
+ // Pop the clip stack, except the first entry used for fill.
+ for i := 1; i < len(op.clipStack); i++ {
+ cl := op.clipStack[i]
+ enc.endClip(cl.union.Add(absOfff))
+ }
+ if fillMode != scene.FillModeNonzero {
+ enc.fillMode(scene.FillModeNonzero)
+ }
+}
+
+func (c *collector) save(id int, state f32.Affine2D) {
+ if extra := id - len(c.states) + 1; extra > 0 {
+ c.states = append(c.states, make([]f32.Affine2D, extra)...)
+ }
+ c.states[id] = state
+}
+
+func transformBounds(t f32.Affine2D, bounds f32.Rectangle) rectangle {
+ return rectangle{
+ t.Transform(bounds.Min), t.Transform(f32.Pt(bounds.Max.X, bounds.Min.Y)),
+ t.Transform(bounds.Max), t.Transform(f32.Pt(bounds.Min.X, bounds.Max.Y)),
+ }
+}
+
+func separateTransform(t f32.Affine2D) (f32.Affine2D, image.Point) {
+ sx, hx, ox, hy, sy, oy := t.Elems()
+ intx, fracx := math.Modf(float64(ox))
+ inty, fracy := math.Modf(float64(oy))
+ t = f32.NewAffine2D(sx, hx, float32(fracx), hy, sy, float32(fracy))
+ return t, image.Pt(int(intx), int(inty))
+}
+
+func transformRect(t f32.Affine2D, r rectangle) rectangle {
+ var tr rectangle
+ for i, c := range r {
+ tr[i] = t.Transform(c)
+ }
+ return tr
+}
+
+func (r rectangle) In(b f32.Rectangle) bool {
+ for _, c := range r {
+ inside := b.Min.X <= c.X && c.X <= b.Max.X &&
+ b.Min.Y <= c.Y && c.Y <= b.Max.Y
+ if !inside {
+ return false
+ }
+ }
+ return true
+}
+
+func (r rectangle) Contains(b f32.Rectangle) bool {
+ return true
+}
+
+func (r rectangle) Bounds() f32.Rectangle {
+ bounds := f32.Rectangle{
+ Min: f32.Pt(math.MaxFloat32, math.MaxFloat32),
+ Max: f32.Pt(-math.MaxFloat32, -math.MaxFloat32),
+ }
+ for _, c := range r {
+ if c.X < bounds.Min.X {
+ bounds.Min.X = c.X
+ }
+ if c.Y < bounds.Min.Y {
+ bounds.Min.Y = c.Y
+ }
+ if c.X > bounds.Max.X {
+ bounds.Max.X = c.X
+ }
+ if c.Y > bounds.Max.Y {
+ bounds.Max.Y = c.Y
+ }
+ }
+ return bounds
+}
diff --git a/vendor/gioui.org/gpu/cpu.go b/vendor/gioui.org/gpu/cpu.go
new file mode 100644
index 0000000..f2f84ad
--- /dev/null
+++ b/vendor/gioui.org/gpu/cpu.go
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package gpu
+
+import (
+ "unsafe"
+
+ "gioui.org/cpu"
+)
+
+// This file contains code specific to running compute shaders on the CPU.
+
+// dispatcher dispatches CPU compute programs across multiple goroutines.
+type dispatcher struct {
+ // done is notified when a worker completes its work slice.
+ done chan struct{}
+ // work receives work slice indices. It is closed when the dispatcher is released.
+ work chan work
+ // dispatch receives compute jobs, which is then split among workers.
+ dispatch chan dispatch
+ // sync receives notification when a Sync completes.
+ sync chan struct{}
+}
+
+type work struct {
+ ctx *cpu.DispatchContext
+ index int
+}
+
+type dispatch struct {
+ _type jobType
+ program *cpu.ProgramInfo
+ descSet unsafe.Pointer
+ x, y, z int
+}
+
+type jobType uint8
+
+const (
+ jobDispatch jobType = iota
+ jobBarrier
+ jobSync
+)
+
+func newDispatcher(workers int) *dispatcher {
+ d := &dispatcher{
+ work: make(chan work, workers),
+ done: make(chan struct{}, workers),
+ // Leave some room to avoid blocking calls to Dispatch.
+ dispatch: make(chan dispatch, 20),
+ sync: make(chan struct{}),
+ }
+ for i := 0; i < workers; i++ {
+ go d.worker()
+ }
+ go d.dispatcher()
+ return d
+}
+
+func (d *dispatcher) dispatcher() {
+ defer close(d.work)
+ var free []*cpu.DispatchContext
+ defer func() {
+ for _, ctx := range free {
+ ctx.Free()
+ }
+ }()
+ var used []*cpu.DispatchContext
+ for job := range d.dispatch {
+ switch job._type {
+ case jobDispatch:
+ if len(free) == 0 {
+ free = append(free, cpu.NewDispatchContext())
+ }
+ ctx := free[len(free)-1]
+ free = free[:len(free)-1]
+ used = append(used, ctx)
+ ctx.Prepare(cap(d.work), job.program, job.descSet, job.x, job.y, job.z)
+ for i := 0; i < cap(d.work); i++ {
+ d.work <- work{
+ ctx: ctx,
+ index: i,
+ }
+ }
+ case jobBarrier:
+ // Wait for all outstanding dispatches to complete.
+ for i := 0; i < len(used)*cap(d.work); i++ {
+ <-d.done
+ }
+ free = append(free, used...)
+ used = used[:0]
+ case jobSync:
+ d.sync <- struct{}{}
+ }
+ }
+}
+
+func (d *dispatcher) worker() {
+ thread := cpu.NewThreadContext()
+ defer thread.Free()
+ for w := range d.work {
+ w.ctx.Dispatch(w.index, thread)
+ d.done <- struct{}{}
+ }
+}
+
+func (d *dispatcher) Barrier() {
+ d.dispatch <- dispatch{_type: jobBarrier}
+}
+
+func (d *dispatcher) Sync() {
+ d.dispatch <- dispatch{_type: jobSync}
+ <-d.sync
+}
+
+func (d *dispatcher) Dispatch(program *cpu.ProgramInfo, descSet unsafe.Pointer, x, y, z int) {
+ d.dispatch <- dispatch{
+ _type: jobDispatch,
+ program: program,
+ descSet: descSet,
+ x: x,
+ y: y,
+ z: z,
+ }
+}
+
+func (d *dispatcher) Stop() {
+ close(d.dispatch)
+}
diff --git a/vendor/gioui.org/gpu/gpu.go b/vendor/gioui.org/gpu/gpu.go
new file mode 100644
index 0000000..c07e3f8
--- /dev/null
+++ b/vendor/gioui.org/gpu/gpu.go
@@ -0,0 +1,1416 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+/*
+Package gpu implements the rendering of Gio drawing operations. It
+is used by package app and package app/headless and is otherwise not
+useful except for integrating with external window implementations.
+*/
+package gpu
+
+import (
+ "encoding/binary"
+ "fmt"
+ "image"
+ "image/color"
+ "math"
+ "os"
+ "reflect"
+ "time"
+ "unsafe"
+
+ "gioui.org/f32"
+ "gioui.org/gpu/internal/driver"
+ "gioui.org/internal/byteslice"
+ "gioui.org/internal/f32color"
+ "gioui.org/internal/ops"
+ "gioui.org/internal/scene"
+ "gioui.org/internal/stroke"
+ "gioui.org/layout"
+ "gioui.org/op"
+ "gioui.org/shader"
+ "gioui.org/shader/gio"
+
+ // Register backends.
+ _ "gioui.org/gpu/internal/d3d11"
+ _ "gioui.org/gpu/internal/metal"
+ _ "gioui.org/gpu/internal/opengl"
+ _ "gioui.org/gpu/internal/vulkan"
+)
+
+type GPU interface {
+ // Release non-Go resources. The GPU is no longer valid after Release.
+ Release()
+ // Clear sets the clear color for the next Frame.
+ Clear(color color.NRGBA)
+ // Frame draws the graphics operations from op into a viewport of target.
+ Frame(frame *op.Ops, target RenderTarget, viewport image.Point) error
+ // Profile returns the last available profiling information. Profiling
+ // information is requested when Frame sees an io/profile.Op, and the result
+ // is available through Profile at some later time.
+ Profile() string
+}
+
+type gpu struct {
+ cache *resourceCache
+
+ profile string
+ timers *timers
+ frameStart time.Time
+ stencilTimer, coverTimer, cleanupTimer *timer
+ drawOps drawOps
+ ctx driver.Device
+ renderer *renderer
+}
+
+type renderer struct {
+ ctx driver.Device
+ blitter *blitter
+ pather *pather
+ packer packer
+ intersections packer
+}
+
+type drawOps struct {
+ profile bool
+ reader ops.Reader
+ states []f32.Affine2D
+ transStack []f32.Affine2D
+ vertCache []byte
+ viewport image.Point
+ clear bool
+ clearColor f32color.RGBA
+ imageOps []imageOp
+ pathOps []*pathOp
+ pathOpCache []pathOp
+ qs quadSplitter
+ pathCache *opCache
+}
+
+type drawState struct {
+ t f32.Affine2D
+ cpath *pathOp
+
+ matType materialType
+ // Current paint.ImageOp
+ image imageOpData
+ // Current paint.ColorOp, if any.
+ color color.NRGBA
+
+ // Current paint.LinearGradientOp.
+ stop1 f32.Point
+ stop2 f32.Point
+ color1 color.NRGBA
+ color2 color.NRGBA
+}
+
+type pathOp struct {
+ off f32.Point
+ // rect tracks whether the clip stack can be represented by a
+ // pixel-aligned rectangle.
+ rect bool
+ // clip is the union of all
+ // later clip rectangles.
+ clip image.Rectangle
+ bounds f32.Rectangle
+ // intersect is the intersection of bounds and all
+ // previous clip bounds.
+ intersect f32.Rectangle
+ pathKey opKey
+ path bool
+ pathVerts []byte
+ parent *pathOp
+ place placement
+}
+
+type imageOp struct {
+ path *pathOp
+ clip image.Rectangle
+ material material
+ clipType clipType
+ place placement
+}
+
+func decodeStrokeOp(data []byte) float32 {
+ _ = data[4]
+ bo := binary.LittleEndian
+ return math.Float32frombits(bo.Uint32(data[1:]))
+}
+
+type quadsOp struct {
+ key opKey
+ aux []byte
+}
+
+type opKey struct {
+ outline bool
+ strokeWidth float32
+ sx, hx, sy, hy float32
+ ops.Key
+}
+
+type material struct {
+ material materialType
+ opaque bool
+ // For materialTypeColor.
+ color f32color.RGBA
+ // For materialTypeLinearGradient.
+ color1 f32color.RGBA
+ color2 f32color.RGBA
+ // For materialTypeTexture.
+ data imageOpData
+ uvTrans f32.Affine2D
+}
+
+// imageOpData is the shadow of paint.ImageOp.
+type imageOpData struct {
+ src *image.RGBA
+ handle interface{}
+}
+
+type linearGradientOpData struct {
+ stop1 f32.Point
+ color1 color.NRGBA
+ stop2 f32.Point
+ color2 color.NRGBA
+}
+
+func decodeImageOp(data []byte, refs []interface{}) imageOpData {
+ handle := refs[1]
+ if handle == nil {
+ return imageOpData{}
+ }
+ return imageOpData{
+ src: refs[0].(*image.RGBA),
+ handle: handle,
+ }
+}
+
+func decodeColorOp(data []byte) color.NRGBA {
+ return color.NRGBA{
+ R: data[1],
+ G: data[2],
+ B: data[3],
+ A: data[4],
+ }
+}
+
+func decodeLinearGradientOp(data []byte) linearGradientOpData {
+ bo := binary.LittleEndian
+ return linearGradientOpData{
+ stop1: f32.Point{
+ X: math.Float32frombits(bo.Uint32(data[1:])),
+ Y: math.Float32frombits(bo.Uint32(data[5:])),
+ },
+ stop2: f32.Point{
+ X: math.Float32frombits(bo.Uint32(data[9:])),
+ Y: math.Float32frombits(bo.Uint32(data[13:])),
+ },
+ color1: color.NRGBA{
+ R: data[17+0],
+ G: data[17+1],
+ B: data[17+2],
+ A: data[17+3],
+ },
+ color2: color.NRGBA{
+ R: data[21+0],
+ G: data[21+1],
+ B: data[21+2],
+ A: data[21+3],
+ },
+ }
+}
+
+type clipType uint8
+
+type resource interface {
+ release()
+}
+
+type texture struct {
+ src *image.RGBA
+ tex driver.Texture
+}
+
+type blitter struct {
+ ctx driver.Device
+ viewport image.Point
+ pipelines [3]*pipeline
+ colUniforms *blitColUniforms
+ texUniforms *blitTexUniforms
+ linearGradientUniforms *blitLinearGradientUniforms
+ quadVerts driver.Buffer
+}
+
+type blitColUniforms struct {
+ blitUniforms
+ _ [128 - unsafe.Sizeof(blitUniforms{}) - unsafe.Sizeof(colorUniforms{})]byte // Padding to 128 bytes.
+ colorUniforms
+}
+
+type blitTexUniforms struct {
+ blitUniforms
+}
+
+type blitLinearGradientUniforms struct {
+ blitUniforms
+ _ [128 - unsafe.Sizeof(blitUniforms{}) - unsafe.Sizeof(gradientUniforms{})]byte // Padding to 128 bytes.
+ gradientUniforms
+}
+
+type uniformBuffer struct {
+ buf driver.Buffer
+ ptr []byte
+}
+
+type pipeline struct {
+ pipeline driver.Pipeline
+ uniforms *uniformBuffer
+}
+
+type blitUniforms struct {
+ transform [4]float32
+ uvTransformR1 [4]float32
+ uvTransformR2 [4]float32
+}
+
+type colorUniforms struct {
+ color f32color.RGBA
+}
+
+type gradientUniforms struct {
+ color1 f32color.RGBA
+ color2 f32color.RGBA
+}
+
+type materialType uint8
+
+const (
+ clipTypeNone clipType = iota
+ clipTypePath
+ clipTypeIntersection
+)
+
+const (
+ materialColor materialType = iota
+ materialLinearGradient
+ materialTexture
+)
+
+func New(api API) (GPU, error) {
+ d, err := driver.NewDevice(api)
+ if err != nil {
+ return nil, err
+ }
+ d.BeginFrame(nil, false, image.Point{})
+ defer d.EndFrame()
+ forceCompute := os.Getenv("GIORENDERER") == "forcecompute"
+ feats := d.Caps().Features
+ switch {
+ case !forceCompute && feats.Has(driver.FeatureFloatRenderTargets) && feats.Has(driver.FeatureSRGB):
+ return newGPU(d)
+ }
+ return newCompute(d)
+}
+
+func newGPU(ctx driver.Device) (*gpu, error) {
+ g := &gpu{
+ cache: newResourceCache(),
+ }
+ g.drawOps.pathCache = newOpCache()
+ if err := g.init(ctx); err != nil {
+ return nil, err
+ }
+ return g, nil
+}
+
+func (g *gpu) init(ctx driver.Device) error {
+ g.ctx = ctx
+ g.renderer = newRenderer(ctx)
+ return nil
+}
+
+func (g *gpu) Clear(col color.NRGBA) {
+ g.drawOps.clear = true
+ g.drawOps.clearColor = f32color.LinearFromSRGB(col)
+}
+
+func (g *gpu) Release() {
+ g.renderer.release()
+ g.drawOps.pathCache.release()
+ g.cache.release()
+ if g.timers != nil {
+ g.timers.Release()
+ }
+ g.ctx.Release()
+}
+
+func (g *gpu) Frame(frameOps *op.Ops, target RenderTarget, viewport image.Point) error {
+ g.collect(viewport, frameOps)
+ return g.frame(target)
+}
+
+func (g *gpu) collect(viewport image.Point, frameOps *op.Ops) {
+ g.renderer.blitter.viewport = viewport
+ g.renderer.pather.viewport = viewport
+ g.drawOps.reset(viewport)
+ g.drawOps.collect(frameOps, viewport)
+ g.frameStart = time.Now()
+ if g.drawOps.profile && g.timers == nil && g.ctx.Caps().Features.Has(driver.FeatureTimers) {
+ g.timers = newTimers(g.ctx)
+ g.stencilTimer = g.timers.newTimer()
+ g.coverTimer = g.timers.newTimer()
+ g.cleanupTimer = g.timers.newTimer()
+ }
+}
+
+func (g *gpu) frame(target RenderTarget) error {
+ viewport := g.renderer.blitter.viewport
+ defFBO := g.ctx.BeginFrame(target, g.drawOps.clear, viewport)
+ defer g.ctx.EndFrame()
+ g.drawOps.buildPaths(g.ctx)
+ for _, img := range g.drawOps.imageOps {
+ expandPathOp(img.path, img.clip)
+ }
+ g.stencilTimer.begin()
+ g.renderer.packStencils(&g.drawOps.pathOps)
+ g.renderer.stencilClips(g.drawOps.pathCache, g.drawOps.pathOps)
+ g.renderer.packIntersections(g.drawOps.imageOps)
+ g.renderer.prepareIntersections(g.drawOps.imageOps)
+ g.renderer.intersect(g.drawOps.imageOps)
+ g.stencilTimer.end()
+ g.coverTimer.begin()
+ g.renderer.uploadImages(g.cache, g.drawOps.imageOps)
+ g.renderer.prepareDrawOps(g.cache, g.drawOps.imageOps)
+ d := driver.LoadDesc{
+ ClearColor: g.drawOps.clearColor,
+ }
+ if g.drawOps.clear {
+ g.drawOps.clear = false
+ d.Action = driver.LoadActionClear
+ }
+ g.ctx.BeginRenderPass(defFBO, d)
+ g.ctx.Viewport(0, 0, viewport.X, viewport.Y)
+ g.renderer.drawOps(g.cache, g.drawOps.imageOps)
+ g.coverTimer.end()
+ g.ctx.EndRenderPass()
+ g.cleanupTimer.begin()
+ g.cache.frame()
+ g.drawOps.pathCache.frame()
+ g.cleanupTimer.end()
+ if g.drawOps.profile && g.timers.ready() {
+ st, covt, cleant := g.stencilTimer.Elapsed, g.coverTimer.Elapsed, g.cleanupTimer.Elapsed
+ ft := st + covt + cleant
+ q := 100 * time.Microsecond
+ st, covt = st.Round(q), covt.Round(q)
+ frameDur := time.Since(g.frameStart).Round(q)
+ ft = ft.Round(q)
+ g.profile = fmt.Sprintf("draw:%7s gpu:%7s st:%7s cov:%7s", frameDur, ft, st, covt)
+ }
+ return nil
+}
+
+func (g *gpu) Profile() string {
+ return g.profile
+}
+
+func (r *renderer) texHandle(cache *resourceCache, data imageOpData) driver.Texture {
+ var tex *texture
+ t, exists := cache.get(data.handle)
+ if !exists {
+ t = &texture{
+ src: data.src,
+ }
+ cache.put(data.handle, t)
+ }
+ tex = t.(*texture)
+ if tex.tex != nil {
+ return tex.tex
+ }
+ handle, err := r.ctx.NewTexture(driver.TextureFormatSRGBA, data.src.Bounds().Dx(), data.src.Bounds().Dy(), driver.FilterLinear, driver.FilterLinear, driver.BufferBindingTexture)
+ if err != nil {
+ panic(err)
+ }
+ driver.UploadImage(handle, image.Pt(0, 0), data.src)
+ tex.tex = handle
+ return tex.tex
+}
+
+func (t *texture) release() {
+ if t.tex != nil {
+ t.tex.Release()
+ }
+}
+
+func newRenderer(ctx driver.Device) *renderer {
+ r := &renderer{
+ ctx: ctx,
+ blitter: newBlitter(ctx),
+ pather: newPather(ctx),
+ }
+
+ maxDim := ctx.Caps().MaxTextureSize
+ // Large atlas textures cause artifacts due to precision loss in
+ // shaders.
+ if cap := 8192; maxDim > cap {
+ maxDim = cap
+ }
+
+ r.packer.maxDims = image.Pt(maxDim, maxDim)
+ r.intersections.maxDims = image.Pt(maxDim, maxDim)
+ return r
+}
+
+func (r *renderer) release() {
+ r.pather.release()
+ r.blitter.release()
+}
+
+func newBlitter(ctx driver.Device) *blitter {
+ quadVerts, err := ctx.NewImmutableBuffer(driver.BufferBindingVertices,
+ byteslice.Slice([]float32{
+ -1, -1, 0, 0,
+ +1, -1, 1, 0,
+ -1, +1, 0, 1,
+ +1, +1, 1, 1,
+ }),
+ )
+ if err != nil {
+ panic(err)
+ }
+ b := &blitter{
+ ctx: ctx,
+ quadVerts: quadVerts,
+ }
+ b.colUniforms = new(blitColUniforms)
+ b.texUniforms = new(blitTexUniforms)
+ b.linearGradientUniforms = new(blitLinearGradientUniforms)
+ pipelines, err := createColorPrograms(ctx, gio.Shader_blit_vert, gio.Shader_blit_frag,
+ [3]interface{}{b.colUniforms, b.linearGradientUniforms, b.texUniforms},
+ )
+ if err != nil {
+ panic(err)
+ }
+ b.pipelines = pipelines
+ return b
+}
+
+func (b *blitter) release() {
+ b.quadVerts.Release()
+ for _, p := range b.pipelines {
+ p.Release()
+ }
+}
+
+func createColorPrograms(b driver.Device, vsSrc shader.Sources, fsSrc [3]shader.Sources, uniforms [3]interface{}) ([3]*pipeline, error) {
+ var pipelines [3]*pipeline
+ blend := driver.BlendDesc{
+ Enable: true,
+ SrcFactor: driver.BlendFactorOne,
+ DstFactor: driver.BlendFactorOneMinusSrcAlpha,
+ }
+ layout := driver.VertexLayout{
+ Inputs: []driver.InputDesc{
+ {Type: shader.DataTypeFloat, Size: 2, Offset: 0},
+ {Type: shader.DataTypeFloat, Size: 2, Offset: 4 * 2},
+ },
+ Stride: 4 * 4,
+ }
+ vsh, err := b.NewVertexShader(vsSrc)
+ if err != nil {
+ return pipelines, err
+ }
+ defer vsh.Release()
+ {
+ fsh, err := b.NewFragmentShader(fsSrc[materialTexture])
+ if err != nil {
+ return pipelines, err
+ }
+ defer fsh.Release()
+ pipe, err := b.NewPipeline(driver.PipelineDesc{
+ VertexShader: vsh,
+ FragmentShader: fsh,
+ BlendDesc: blend,
+ VertexLayout: layout,
+ PixelFormat: driver.TextureFormatOutput,
+ Topology: driver.TopologyTriangleStrip,
+ })
+ if err != nil {
+ return pipelines, err
+ }
+ var vertBuffer *uniformBuffer
+ if u := uniforms[materialTexture]; u != nil {
+ vertBuffer = newUniformBuffer(b, u)
+ }
+ pipelines[materialTexture] = &pipeline{pipe, vertBuffer}
+ }
+ {
+ var vertBuffer *uniformBuffer
+ fsh, err := b.NewFragmentShader(fsSrc[materialColor])
+ if err != nil {
+ pipelines[materialTexture].Release()
+ return pipelines, err
+ }
+ defer fsh.Release()
+ pipe, err := b.NewPipeline(driver.PipelineDesc{
+ VertexShader: vsh,
+ FragmentShader: fsh,
+ BlendDesc: blend,
+ VertexLayout: layout,
+ PixelFormat: driver.TextureFormatOutput,
+ Topology: driver.TopologyTriangleStrip,
+ })
+ if err != nil {
+ pipelines[materialTexture].Release()
+ return pipelines, err
+ }
+ if u := uniforms[materialColor]; u != nil {
+ vertBuffer = newUniformBuffer(b, u)
+ }
+ pipelines[materialColor] = &pipeline{pipe, vertBuffer}
+ }
+ {
+ var vertBuffer *uniformBuffer
+ fsh, err := b.NewFragmentShader(fsSrc[materialLinearGradient])
+ if err != nil {
+ pipelines[materialTexture].Release()
+ pipelines[materialColor].Release()
+ return pipelines, err
+ }
+ defer fsh.Release()
+ pipe, err := b.NewPipeline(driver.PipelineDesc{
+ VertexShader: vsh,
+ FragmentShader: fsh,
+ BlendDesc: blend,
+ VertexLayout: layout,
+ PixelFormat: driver.TextureFormatOutput,
+ Topology: driver.TopologyTriangleStrip,
+ })
+ if err != nil {
+ pipelines[materialTexture].Release()
+ pipelines[materialColor].Release()
+ return pipelines, err
+ }
+ if u := uniforms[materialLinearGradient]; u != nil {
+ vertBuffer = newUniformBuffer(b, u)
+ }
+ pipelines[materialLinearGradient] = &pipeline{pipe, vertBuffer}
+ }
+ if err != nil {
+ for _, p := range pipelines {
+ p.Release()
+ }
+ return pipelines, err
+ }
+ return pipelines, nil
+}
+
+func (r *renderer) stencilClips(pathCache *opCache, ops []*pathOp) {
+ if len(r.packer.sizes) == 0 {
+ return
+ }
+ fbo := -1
+ r.pather.begin(r.packer.sizes)
+ for _, p := range ops {
+ if fbo != p.place.Idx {
+ if fbo != -1 {
+ r.ctx.EndRenderPass()
+ }
+ fbo = p.place.Idx
+ f := r.pather.stenciler.cover(fbo)
+ r.ctx.BeginRenderPass(f.tex, driver.LoadDesc{Action: driver.LoadActionClear})
+ r.ctx.BindPipeline(r.pather.stenciler.pipeline.pipeline.pipeline)
+ r.ctx.BindIndexBuffer(r.pather.stenciler.indexBuf)
+ }
+ v, _ := pathCache.get(p.pathKey)
+ r.pather.stencilPath(p.clip, p.off, p.place.Pos, v.data)
+ }
+ if fbo != -1 {
+ r.ctx.EndRenderPass()
+ }
+}
+
+func (r *renderer) prepareIntersections(ops []imageOp) {
+ for _, img := range ops {
+ if img.clipType != clipTypeIntersection {
+ continue
+ }
+ fbo := r.pather.stenciler.cover(img.path.place.Idx)
+ r.ctx.PrepareTexture(fbo.tex)
+ }
+}
+
+func (r *renderer) intersect(ops []imageOp) {
+ if len(r.intersections.sizes) == 0 {
+ return
+ }
+ fbo := -1
+ r.pather.stenciler.beginIntersect(r.intersections.sizes)
+ for _, img := range ops {
+ if img.clipType != clipTypeIntersection {
+ continue
+ }
+ if fbo != img.place.Idx {
+ if fbo != -1 {
+ r.ctx.EndRenderPass()
+ }
+ fbo = img.place.Idx
+ f := r.pather.stenciler.intersections.fbos[fbo]
+ d := driver.LoadDesc{Action: driver.LoadActionClear}
+ d.ClearColor.R = 1.0
+ r.ctx.BeginRenderPass(f.tex, d)
+ r.ctx.BindPipeline(r.pather.stenciler.ipipeline.pipeline.pipeline)
+ r.ctx.BindVertexBuffer(r.blitter.quadVerts, 0)
+ }
+ r.ctx.Viewport(img.place.Pos.X, img.place.Pos.Y, img.clip.Dx(), img.clip.Dy())
+ r.intersectPath(img.path, img.clip)
+ }
+ if fbo != -1 {
+ r.ctx.EndRenderPass()
+ }
+}
+
+func (r *renderer) intersectPath(p *pathOp, clip image.Rectangle) {
+ if p.parent != nil {
+ r.intersectPath(p.parent, clip)
+ }
+ if !p.path {
+ return
+ }
+ uv := image.Rectangle{
+ Min: p.place.Pos,
+ Max: p.place.Pos.Add(p.clip.Size()),
+ }
+ o := clip.Min.Sub(p.clip.Min)
+ sub := image.Rectangle{
+ Min: o,
+ Max: o.Add(clip.Size()),
+ }
+ fbo := r.pather.stenciler.cover(p.place.Idx)
+ r.ctx.BindTexture(0, fbo.tex)
+ coverScale, coverOff := texSpaceTransform(layout.FRect(uv), fbo.size)
+ subScale, subOff := texSpaceTransform(layout.FRect(sub), p.clip.Size())
+ r.pather.stenciler.ipipeline.uniforms.vert.uvTransform = [4]float32{coverScale.X, coverScale.Y, coverOff.X, coverOff.Y}
+ r.pather.stenciler.ipipeline.uniforms.vert.subUVTransform = [4]float32{subScale.X, subScale.Y, subOff.X, subOff.Y}
+ r.pather.stenciler.ipipeline.pipeline.UploadUniforms(r.ctx)
+ r.ctx.DrawArrays(0, 4)
+}
+
+func (r *renderer) packIntersections(ops []imageOp) {
+ r.intersections.clear()
+ for i, img := range ops {
+ var npaths int
+ var onePath *pathOp
+ for p := img.path; p != nil; p = p.parent {
+ if p.path {
+ onePath = p
+ npaths++
+ }
+ }
+ switch npaths {
+ case 0:
+ case 1:
+ place := onePath.place
+ place.Pos = place.Pos.Sub(onePath.clip.Min).Add(img.clip.Min)
+ ops[i].place = place
+ ops[i].clipType = clipTypePath
+ default:
+ sz := image.Point{X: img.clip.Dx(), Y: img.clip.Dy()}
+ place, ok := r.intersections.add(sz)
+ if !ok {
+ panic("internal error: if the intersection fit, the intersection should fit as well")
+ }
+ ops[i].clipType = clipTypeIntersection
+ ops[i].place = place
+ }
+ }
+}
+
+func (r *renderer) packStencils(pops *[]*pathOp) {
+ r.packer.clear()
+ ops := *pops
+ // Allocate atlas space for cover textures.
+ var i int
+ for i < len(ops) {
+ p := ops[i]
+ if p.clip.Empty() {
+ ops[i] = ops[len(ops)-1]
+ ops = ops[:len(ops)-1]
+ continue
+ }
+ sz := image.Point{X: p.clip.Dx(), Y: p.clip.Dy()}
+ place, ok := r.packer.add(sz)
+ if !ok {
+ // The clip area is at most the entire screen. Hopefully no
+ // screen is larger than GL_MAX_TEXTURE_SIZE.
+ panic(fmt.Errorf("clip area %v is larger than maximum texture size %v", p.clip, r.packer.maxDims))
+ }
+ p.place = place
+ i++
+ }
+ *pops = ops
+}
+
+// boundRectF returns a bounding image.Rectangle for a f32.Rectangle.
+func boundRectF(r f32.Rectangle) image.Rectangle {
+ return image.Rectangle{
+ Min: image.Point{
+ X: int(floor(r.Min.X)),
+ Y: int(floor(r.Min.Y)),
+ },
+ Max: image.Point{
+ X: int(ceil(r.Max.X)),
+ Y: int(ceil(r.Max.Y)),
+ },
+ }
+}
+
+func ceil(v float32) int {
+ return int(math.Ceil(float64(v)))
+}
+
+func floor(v float32) int {
+ return int(math.Floor(float64(v)))
+}
+
+func (d *drawOps) reset(viewport image.Point) {
+ d.profile = false
+ d.viewport = viewport
+ d.imageOps = d.imageOps[:0]
+ d.pathOps = d.pathOps[:0]
+ d.pathOpCache = d.pathOpCache[:0]
+ d.vertCache = d.vertCache[:0]
+ d.transStack = d.transStack[:0]
+}
+
+func (d *drawOps) collect(root *op.Ops, viewport image.Point) {
+ viewf := f32.Rectangle{
+ Max: f32.Point{X: float32(viewport.X), Y: float32(viewport.Y)},
+ }
+ var ops *ops.Ops
+ if root != nil {
+ ops = &root.Internal
+ }
+ d.reader.Reset(ops)
+ d.collectOps(&d.reader, viewf)
+}
+
+func (d *drawOps) buildPaths(ctx driver.Device) {
+ for _, p := range d.pathOps {
+ if v, exists := d.pathCache.get(p.pathKey); !exists || v.data.data == nil {
+ data := buildPath(ctx, p.pathVerts)
+ d.pathCache.put(p.pathKey, opCacheValue{
+ data: data,
+ bounds: p.bounds,
+ })
+ }
+ p.pathVerts = nil
+ }
+}
+
+func (d *drawOps) newPathOp() *pathOp {
+ d.pathOpCache = append(d.pathOpCache, pathOp{})
+ return &d.pathOpCache[len(d.pathOpCache)-1]
+}
+
+func (d *drawOps) addClipPath(state *drawState, aux []byte, auxKey opKey, bounds f32.Rectangle, off f32.Point, push bool) {
+ npath := d.newPathOp()
+ *npath = pathOp{
+ parent: state.cpath,
+ bounds: bounds,
+ off: off,
+ intersect: bounds.Add(off),
+ rect: true,
+ }
+ if npath.parent != nil {
+ npath.rect = npath.parent.rect
+ npath.intersect = npath.parent.intersect.Intersect(npath.intersect)
+ }
+ if len(aux) > 0 {
+ npath.rect = false
+ npath.pathKey = auxKey
+ npath.path = true
+ npath.pathVerts = aux
+ d.pathOps = append(d.pathOps, npath)
+ }
+ state.cpath = npath
+}
+
+// split a transform into two parts, one which is pure offset and the
+// other representing the scaling, shearing and rotation part
+func splitTransform(t f32.Affine2D) (srs f32.Affine2D, offset f32.Point) {
+ sx, hx, ox, hy, sy, oy := t.Elems()
+ offset = f32.Point{X: ox, Y: oy}
+ srs = f32.NewAffine2D(sx, hx, 0, hy, sy, 0)
+ return
+}
+
+func (d *drawOps) save(id int, state f32.Affine2D) {
+ if extra := id - len(d.states) + 1; extra > 0 {
+ d.states = append(d.states, make([]f32.Affine2D, extra)...)
+ }
+ d.states[id] = state
+}
+
+func (k opKey) SetTransform(t f32.Affine2D) opKey {
+ sx, hx, _, hy, sy, _ := t.Elems()
+ k.sx = sx
+ k.hx = hx
+ k.hy = hy
+ k.sy = sy
+ return k
+}
+
+func (d *drawOps) collectOps(r *ops.Reader, viewport f32.Rectangle) {
+ var (
+ quads quadsOp
+ state drawState
+ )
+ reset := func() {
+ state = drawState{
+ color: color.NRGBA{A: 0xff},
+ }
+ }
+ reset()
+loop:
+ for encOp, ok := r.Decode(); ok; encOp, ok = r.Decode() {
+ switch ops.OpType(encOp.Data[0]) {
+ case ops.TypeProfile:
+ d.profile = true
+ case ops.TypeTransform:
+ dop, push := ops.DecodeTransform(encOp.Data)
+ if push {
+ d.transStack = append(d.transStack, state.t)
+ }
+ state.t = state.t.Mul(dop)
+ case ops.TypePopTransform:
+ n := len(d.transStack)
+ state.t = d.transStack[n-1]
+ d.transStack = d.transStack[:n-1]
+
+ case ops.TypeStroke:
+ quads.key.strokeWidth = decodeStrokeOp(encOp.Data)
+
+ case ops.TypePath:
+ encOp, ok = r.Decode()
+ if !ok {
+ break loop
+ }
+ quads.aux = encOp.Data[ops.TypeAuxLen:]
+ quads.key.Key = encOp.Key
+
+ case ops.TypeClip:
+ var op ops.ClipOp
+ op.Decode(encOp.Data)
+ quads.key.outline = op.Outline
+ bounds := layout.FRect(op.Bounds)
+ trans, off := splitTransform(state.t)
+ if len(quads.aux) > 0 {
+ // There is a clipping path, build the gpu data and update the
+ // cache key such that it will be equal only if the transform is the
+ // same also. Use cached data if we have it.
+ quads.key = quads.key.SetTransform(trans)
+ if v, ok := d.pathCache.get(quads.key); ok {
+ // Since the GPU data exists in the cache aux will not be used.
+ // Why is this not used for the offset shapes?
+ bounds = v.bounds
+ } else {
+ var pathData []byte
+ pathData, bounds = d.buildVerts(
+ quads.aux, trans, quads.key.outline, quads.key.strokeWidth,
+ )
+ quads.aux = pathData
+ // add it to the cache, without GPU data, so the transform can be
+ // reused.
+ d.pathCache.put(quads.key, opCacheValue{bounds: bounds})
+ }
+ } else {
+ quads.aux, bounds, _ = d.boundsForTransformedRect(bounds, trans)
+ quads.key = opKey{Key: encOp.Key}
+ }
+ d.addClipPath(&state, quads.aux, quads.key, bounds, off, true)
+ quads = quadsOp{}
+ case ops.TypePopClip:
+ state.cpath = state.cpath.parent
+
+ case ops.TypeColor:
+ state.matType = materialColor
+ state.color = decodeColorOp(encOp.Data)
+ case ops.TypeLinearGradient:
+ state.matType = materialLinearGradient
+ op := decodeLinearGradientOp(encOp.Data)
+ state.stop1 = op.stop1
+ state.stop2 = op.stop2
+ state.color1 = op.color1
+ state.color2 = op.color2
+ case ops.TypeImage:
+ state.matType = materialTexture
+ state.image = decodeImageOp(encOp.Data, encOp.Refs)
+ case ops.TypePaint:
+ // Transform (if needed) the painting rectangle and if so generate a clip path,
+ // for those cases also compute a partialTrans that maps texture coordinates between
+ // the new bounding rectangle and the transformed original paint rectangle.
+ t, off := splitTransform(state.t)
+ // Fill the clip area, unless the material is a (bounded) image.
+ // TODO: Find a tighter bound.
+ inf := float32(1e6)
+ dst := f32.Rect(-inf, -inf, inf, inf)
+ if state.matType == materialTexture {
+ sz := state.image.src.Rect.Size()
+ dst = f32.Rectangle{Max: layout.FPt(sz)}
+ }
+ clipData, bnd, partialTrans := d.boundsForTransformedRect(dst, t)
+ cl := viewport.Intersect(bnd.Add(off))
+ if state.cpath != nil {
+ cl = state.cpath.intersect.Intersect(cl)
+ }
+ if cl.Empty() {
+ continue
+ }
+
+ if clipData != nil {
+ // The paint operation is sheared or rotated, add a clip path representing
+ // this transformed rectangle.
+ k := opKey{Key: encOp.Key}
+ k.SetTransform(t) // TODO: This call has no effect.
+ d.addClipPath(&state, clipData, k, bnd, off, false)
+ }
+
+ bounds := boundRectF(cl)
+ mat := state.materialFor(bnd, off, partialTrans, bounds)
+
+ rect := state.cpath == nil || state.cpath.rect
+ if bounds.Min == (image.Point{}) && bounds.Max == d.viewport && rect && mat.opaque && (mat.material == materialColor) {
+ // The image is a uniform opaque color and takes up the whole screen.
+ // Scrap images up to and including this image and set clear color.
+ d.imageOps = d.imageOps[:0]
+ d.clearColor = mat.color.Opaque()
+ d.clear = true
+ continue
+ }
+ img := imageOp{
+ path: state.cpath,
+ clip: bounds,
+ material: mat,
+ }
+
+ d.imageOps = append(d.imageOps, img)
+ if clipData != nil {
+ // we added a clip path that should not remain
+ state.cpath = state.cpath.parent
+ }
+ case ops.TypeSave:
+ id := ops.DecodeSave(encOp.Data)
+ d.save(id, state.t)
+ case ops.TypeLoad:
+ reset()
+ id := ops.DecodeLoad(encOp.Data)
+ state.t = d.states[id]
+ }
+ }
+}
+
+func expandPathOp(p *pathOp, clip image.Rectangle) {
+ for p != nil {
+ pclip := p.clip
+ if !pclip.Empty() {
+ clip = clip.Union(pclip)
+ }
+ p.clip = clip
+ p = p.parent
+ }
+}
+
+func (d *drawState) materialFor(rect f32.Rectangle, off f32.Point, partTrans f32.Affine2D, clip image.Rectangle) material {
+ var m material
+ switch d.matType {
+ case materialColor:
+ m.material = materialColor
+ m.color = f32color.LinearFromSRGB(d.color)
+ m.opaque = m.color.A == 1.0
+ case materialLinearGradient:
+ m.material = materialLinearGradient
+
+ m.color1 = f32color.LinearFromSRGB(d.color1)
+ m.color2 = f32color.LinearFromSRGB(d.color2)
+ m.opaque = m.color1.A == 1.0 && m.color2.A == 1.0
+
+ m.uvTrans = partTrans.Mul(gradientSpaceTransform(clip, off, d.stop1, d.stop2))
+ case materialTexture:
+ m.material = materialTexture
+ dr := boundRectF(rect.Add(off))
+ sz := d.image.src.Bounds().Size()
+ sr := f32.Rectangle{
+ Max: f32.Point{
+ X: float32(sz.X),
+ Y: float32(sz.Y),
+ },
+ }
+ dx := float32(dr.Dx())
+ sdx := sr.Dx()
+ sr.Min.X += float32(clip.Min.X-dr.Min.X) * sdx / dx
+ sr.Max.X -= float32(dr.Max.X-clip.Max.X) * sdx / dx
+ dy := float32(dr.Dy())
+ sdy := sr.Dy()
+ sr.Min.Y += float32(clip.Min.Y-dr.Min.Y) * sdy / dy
+ sr.Max.Y -= float32(dr.Max.Y-clip.Max.Y) * sdy / dy
+ uvScale, uvOffset := texSpaceTransform(sr, sz)
+ m.uvTrans = partTrans.Mul(f32.Affine2D{}.Scale(f32.Point{}, uvScale).Offset(uvOffset))
+ m.data = d.image
+ }
+ return m
+}
+
+func (r *renderer) uploadImages(cache *resourceCache, ops []imageOp) {
+ for _, img := range ops {
+ m := img.material
+ if m.material == materialTexture {
+ r.texHandle(cache, m.data)
+ }
+ }
+}
+
+func (r *renderer) prepareDrawOps(cache *resourceCache, ops []imageOp) {
+ for _, img := range ops {
+ m := img.material
+ switch m.material {
+ case materialTexture:
+ r.ctx.PrepareTexture(r.texHandle(cache, m.data))
+ }
+
+ var fbo stencilFBO
+ switch img.clipType {
+ case clipTypeNone:
+ continue
+ case clipTypePath:
+ fbo = r.pather.stenciler.cover(img.place.Idx)
+ case clipTypeIntersection:
+ fbo = r.pather.stenciler.intersections.fbos[img.place.Idx]
+ }
+ r.ctx.PrepareTexture(fbo.tex)
+ }
+}
+
+func (r *renderer) drawOps(cache *resourceCache, ops []imageOp) {
+ var coverTex driver.Texture
+ for _, img := range ops {
+ m := img.material
+ switch m.material {
+ case materialTexture:
+ r.ctx.BindTexture(0, r.texHandle(cache, m.data))
+ }
+ drc := img.clip
+
+ scale, off := clipSpaceTransform(drc, r.blitter.viewport)
+ var fbo stencilFBO
+ switch img.clipType {
+ case clipTypeNone:
+ p := r.blitter.pipelines[m.material]
+ r.ctx.BindPipeline(p.pipeline)
+ r.ctx.BindVertexBuffer(r.blitter.quadVerts, 0)
+ r.blitter.blit(m.material, m.color, m.color1, m.color2, scale, off, m.uvTrans)
+ continue
+ case clipTypePath:
+ fbo = r.pather.stenciler.cover(img.place.Idx)
+ case clipTypeIntersection:
+ fbo = r.pather.stenciler.intersections.fbos[img.place.Idx]
+ }
+ if coverTex != fbo.tex {
+ coverTex = fbo.tex
+ r.ctx.BindTexture(1, coverTex)
+ }
+ uv := image.Rectangle{
+ Min: img.place.Pos,
+ Max: img.place.Pos.Add(drc.Size()),
+ }
+ coverScale, coverOff := texSpaceTransform(layout.FRect(uv), fbo.size)
+ p := r.pather.coverer.pipelines[m.material]
+ r.ctx.BindPipeline(p.pipeline)
+ r.ctx.BindVertexBuffer(r.blitter.quadVerts, 0)
+ r.pather.cover(m.material, m.color, m.color1, m.color2, scale, off, m.uvTrans, coverScale, coverOff)
+ }
+}
+
+func (b *blitter) blit(mat materialType, col f32color.RGBA, col1, col2 f32color.RGBA, scale, off f32.Point, uvTrans f32.Affine2D) {
+ p := b.pipelines[mat]
+ b.ctx.BindPipeline(p.pipeline)
+ var uniforms *blitUniforms
+ switch mat {
+ case materialColor:
+ b.colUniforms.color = col
+ uniforms = &b.colUniforms.blitUniforms
+ case materialTexture:
+ t1, t2, t3, t4, t5, t6 := uvTrans.Elems()
+ b.texUniforms.blitUniforms.uvTransformR1 = [4]float32{t1, t2, t3, 0}
+ b.texUniforms.blitUniforms.uvTransformR2 = [4]float32{t4, t5, t6, 0}
+ uniforms = &b.texUniforms.blitUniforms
+ case materialLinearGradient:
+ b.linearGradientUniforms.color1 = col1
+ b.linearGradientUniforms.color2 = col2
+
+ t1, t2, t3, t4, t5, t6 := uvTrans.Elems()
+ b.linearGradientUniforms.blitUniforms.uvTransformR1 = [4]float32{t1, t2, t3, 0}
+ b.linearGradientUniforms.blitUniforms.uvTransformR2 = [4]float32{t4, t5, t6, 0}
+ uniforms = &b.linearGradientUniforms.blitUniforms
+ }
+ uniforms.transform = [4]float32{scale.X, scale.Y, off.X, off.Y}
+ p.UploadUniforms(b.ctx)
+ b.ctx.DrawArrays(0, 4)
+}
+
+// newUniformBuffer creates a new GPU uniform buffer backed by the
+// structure uniformBlock points to.
+func newUniformBuffer(b driver.Device, uniformBlock interface{}) *uniformBuffer {
+ ref := reflect.ValueOf(uniformBlock)
+ // Determine the size of the uniforms structure, *uniforms.
+ size := ref.Elem().Type().Size()
+ // Map the uniforms structure as a byte slice.
+ ptr := (*[1 << 30]byte)(unsafe.Pointer(ref.Pointer()))[:size:size]
+ ubuf, err := b.NewBuffer(driver.BufferBindingUniforms, len(ptr))
+ if err != nil {
+ panic(err)
+ }
+ return &uniformBuffer{buf: ubuf, ptr: ptr}
+}
+
+func (u *uniformBuffer) Upload() {
+ u.buf.Upload(u.ptr)
+}
+
+func (u *uniformBuffer) Release() {
+ u.buf.Release()
+ u.buf = nil
+}
+
+func (p *pipeline) UploadUniforms(ctx driver.Device) {
+ if p.uniforms != nil {
+ p.uniforms.Upload()
+ ctx.BindUniforms(p.uniforms.buf)
+ }
+}
+
+func (p *pipeline) Release() {
+ p.pipeline.Release()
+ if p.uniforms != nil {
+ p.uniforms.Release()
+ }
+ *p = pipeline{}
+}
+
+// texSpaceTransform return the scale and offset that transforms the given subimage
+// into quad texture coordinates.
+func texSpaceTransform(r f32.Rectangle, bounds image.Point) (f32.Point, f32.Point) {
+ size := f32.Point{X: float32(bounds.X), Y: float32(bounds.Y)}
+ scale := f32.Point{X: r.Dx() / size.X, Y: r.Dy() / size.Y}
+ offset := f32.Point{X: r.Min.X / size.X, Y: r.Min.Y / size.Y}
+ return scale, offset
+}
+
+// gradientSpaceTransform transforms stop1 and stop2 to [(0,0), (1,1)].
+func gradientSpaceTransform(clip image.Rectangle, off f32.Point, stop1, stop2 f32.Point) f32.Affine2D {
+ d := stop2.Sub(stop1)
+ l := float32(math.Sqrt(float64(d.X*d.X + d.Y*d.Y)))
+ a := float32(math.Atan2(float64(-d.Y), float64(d.X)))
+
+ // TODO: optimize
+ zp := f32.Point{}
+ return f32.Affine2D{}.
+ Scale(zp, layout.FPt(clip.Size())). // scale to pixel space
+ Offset(zp.Sub(off).Add(layout.FPt(clip.Min))). // offset to clip space
+ Offset(zp.Sub(stop1)). // offset to first stop point
+ Rotate(zp, a). // rotate to align gradient
+ Scale(zp, f32.Pt(1/l, 1/l)) // scale gradient to right size
+}
+
+// clipSpaceTransform returns the scale and offset that transforms the given
+// rectangle from a viewport into GPU driver device coordinates.
+func clipSpaceTransform(r image.Rectangle, viewport image.Point) (f32.Point, f32.Point) {
+ // First, transform UI coordinates to device coordinates:
+ //
+ // [(-1, -1) (+1, -1)]
+ // [(-1, +1) (+1, +1)]
+ //
+ x, y := float32(r.Min.X), float32(r.Min.Y)
+ w, h := float32(r.Dx()), float32(r.Dy())
+ vx, vy := 2/float32(viewport.X), 2/float32(viewport.Y)
+ x = x*vx - 1
+ y = y*vy - 1
+ w *= vx
+ h *= vy
+
+ // Then, compute the transformation from the fullscreen quad to
+ // the rectangle at (x, y) and dimensions (w, h).
+ scale := f32.Point{X: w * .5, Y: h * .5}
+ offset := f32.Point{X: x + w*.5, Y: y + h*.5}
+
+ return scale, offset
+}
+
+// Fill in maximal Y coordinates of the NW and NE corners.
+func fillMaxY(verts []byte) {
+ contour := 0
+ bo := binary.LittleEndian
+ for len(verts) > 0 {
+ maxy := float32(math.Inf(-1))
+ i := 0
+ for ; i+vertStride*4 <= len(verts); i += vertStride * 4 {
+ vert := verts[i : i+vertStride]
+ // MaxY contains the integer contour index.
+ pathContour := int(bo.Uint32(vert[int(unsafe.Offsetof(((*vertex)(nil)).MaxY)):]))
+ if contour != pathContour {
+ contour = pathContour
+ break
+ }
+ fromy := math.Float32frombits(bo.Uint32(vert[int(unsafe.Offsetof(((*vertex)(nil)).FromY)):]))
+ ctrly := math.Float32frombits(bo.Uint32(vert[int(unsafe.Offsetof(((*vertex)(nil)).CtrlY)):]))
+ toy := math.Float32frombits(bo.Uint32(vert[int(unsafe.Offsetof(((*vertex)(nil)).ToY)):]))
+ if fromy > maxy {
+ maxy = fromy
+ }
+ if ctrly > maxy {
+ maxy = ctrly
+ }
+ if toy > maxy {
+ maxy = toy
+ }
+ }
+ fillContourMaxY(maxy, verts[:i])
+ verts = verts[i:]
+ }
+}
+
+func fillContourMaxY(maxy float32, verts []byte) {
+ bo := binary.LittleEndian
+ for i := 0; i < len(verts); i += vertStride {
+ off := int(unsafe.Offsetof(((*vertex)(nil)).MaxY))
+ bo.PutUint32(verts[i+off:], math.Float32bits(maxy))
+ }
+}
+
+func (d *drawOps) writeVertCache(n int) []byte {
+ d.vertCache = append(d.vertCache, make([]byte, n)...)
+ return d.vertCache[len(d.vertCache)-n:]
+}
+
+// transform, split paths as needed, calculate maxY, bounds and create GPU vertices.
+func (d *drawOps) buildVerts(pathData []byte, tr f32.Affine2D, outline bool, strWidth float32) (verts []byte, bounds f32.Rectangle) {
+ inf := float32(math.Inf(+1))
+ d.qs.bounds = f32.Rectangle{
+ Min: f32.Point{X: inf, Y: inf},
+ Max: f32.Point{X: -inf, Y: -inf},
+ }
+ d.qs.d = d
+ startLength := len(d.vertCache)
+
+ switch {
+ case strWidth > 0:
+ // Stroke path.
+ ss := stroke.StrokeStyle{
+ Width: strWidth,
+ }
+ quads := stroke.StrokePathCommands(ss, pathData)
+ for _, quad := range quads {
+ d.qs.contour = quad.Contour
+ quad.Quad = quad.Quad.Transform(tr)
+
+ d.qs.splitAndEncode(quad.Quad)
+ }
+
+ case outline:
+ decodeToOutlineQuads(&d.qs, tr, pathData)
+ }
+
+ fillMaxY(d.vertCache[startLength:])
+ return d.vertCache[startLength:], d.qs.bounds
+}
+
+// decodeOutlineQuads decodes scene commands, splits them into quadratic béziers
+// as needed and feeds them to the supplied splitter.
+func decodeToOutlineQuads(qs *quadSplitter, tr f32.Affine2D, pathData []byte) {
+ for len(pathData) >= scene.CommandSize+4 {
+ qs.contour = bo.Uint32(pathData)
+ cmd := ops.DecodeCommand(pathData[4:])
+ switch cmd.Op() {
+ case scene.OpLine:
+ var q stroke.QuadSegment
+ q.From, q.To = scene.DecodeLine(cmd)
+ q.Ctrl = q.From.Add(q.To).Mul(.5)
+ q = q.Transform(tr)
+ qs.splitAndEncode(q)
+ case scene.OpQuad:
+ var q stroke.QuadSegment
+ q.From, q.Ctrl, q.To = scene.DecodeQuad(cmd)
+ q = q.Transform(tr)
+ qs.splitAndEncode(q)
+ case scene.OpCubic:
+ for _, q := range stroke.SplitCubic(scene.DecodeCubic(cmd)) {
+ q = q.Transform(tr)
+ qs.splitAndEncode(q)
+ }
+ default:
+ panic("unsupported scene command")
+ }
+ pathData = pathData[scene.CommandSize+4:]
+ }
+}
+
+// create GPU vertices for transformed r, find the bounds and establish texture transform.
+func (d *drawOps) boundsForTransformedRect(r f32.Rectangle, tr f32.Affine2D) (aux []byte, bnd f32.Rectangle, ptr f32.Affine2D) {
+ if isPureOffset(tr) {
+ // fast-path to allow blitting of pure rectangles
+ _, _, ox, _, _, oy := tr.Elems()
+ off := f32.Pt(ox, oy)
+ bnd.Min = r.Min.Add(off)
+ bnd.Max = r.Max.Add(off)
+ return
+ }
+
+ // transform all corners, find new bounds
+ corners := [4]f32.Point{
+ tr.Transform(r.Min), tr.Transform(f32.Pt(r.Max.X, r.Min.Y)),
+ tr.Transform(r.Max), tr.Transform(f32.Pt(r.Min.X, r.Max.Y)),
+ }
+ bnd.Min = f32.Pt(math.MaxFloat32, math.MaxFloat32)
+ bnd.Max = f32.Pt(-math.MaxFloat32, -math.MaxFloat32)
+ for _, c := range corners {
+ if c.X < bnd.Min.X {
+ bnd.Min.X = c.X
+ }
+ if c.Y < bnd.Min.Y {
+ bnd.Min.Y = c.Y
+ }
+ if c.X > bnd.Max.X {
+ bnd.Max.X = c.X
+ }
+ if c.Y > bnd.Max.Y {
+ bnd.Max.Y = c.Y
+ }
+ }
+
+ // build the GPU vertices
+ l := len(d.vertCache)
+ d.vertCache = append(d.vertCache, make([]byte, vertStride*4*4)...)
+ aux = d.vertCache[l:]
+ encodeQuadTo(aux, 0, corners[0], corners[0].Add(corners[1]).Mul(0.5), corners[1])
+ encodeQuadTo(aux[vertStride*4:], 0, corners[1], corners[1].Add(corners[2]).Mul(0.5), corners[2])
+ encodeQuadTo(aux[vertStride*4*2:], 0, corners[2], corners[2].Add(corners[3]).Mul(0.5), corners[3])
+ encodeQuadTo(aux[vertStride*4*3:], 0, corners[3], corners[3].Add(corners[0]).Mul(0.5), corners[0])
+ fillMaxY(aux)
+
+ // establish the transform mapping from bounds rectangle to transformed corners
+ var P1, P2, P3 f32.Point
+ P1.X = (corners[1].X - bnd.Min.X) / (bnd.Max.X - bnd.Min.X)
+ P1.Y = (corners[1].Y - bnd.Min.Y) / (bnd.Max.Y - bnd.Min.Y)
+ P2.X = (corners[2].X - bnd.Min.X) / (bnd.Max.X - bnd.Min.X)
+ P2.Y = (corners[2].Y - bnd.Min.Y) / (bnd.Max.Y - bnd.Min.Y)
+ P3.X = (corners[3].X - bnd.Min.X) / (bnd.Max.X - bnd.Min.X)
+ P3.Y = (corners[3].Y - bnd.Min.Y) / (bnd.Max.Y - bnd.Min.Y)
+ sx, sy := P2.X-P3.X, P2.Y-P3.Y
+ ptr = f32.NewAffine2D(sx, P2.X-P1.X, P1.X-sx, sy, P2.Y-P1.Y, P1.Y-sy).Invert()
+
+ return
+}
+
+func isPureOffset(t f32.Affine2D) bool {
+ a, b, _, d, e, _ := t.Elems()
+ return a == 1 && b == 0 && d == 0 && e == 1
+}
diff --git a/vendor/gioui.org/gpu/internal/d3d11/d3d11.go b/vendor/gioui.org/gpu/internal/d3d11/d3d11.go
new file mode 100644
index 0000000..3ddf7c3
--- /dev/null
+++ b/vendor/gioui.org/gpu/internal/d3d11/d3d11.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+// This file exists so this package builds on non-Windows platforms.
+
+package d3d11
diff --git a/vendor/gioui.org/gpu/internal/d3d11/d3d11_windows.go b/vendor/gioui.org/gpu/internal/d3d11/d3d11_windows.go
new file mode 100644
index 0000000..08698c3
--- /dev/null
+++ b/vendor/gioui.org/gpu/internal/d3d11/d3d11_windows.go
@@ -0,0 +1,859 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package d3d11
+
+import (
+ "errors"
+ "fmt"
+ "image"
+ "math"
+ "reflect"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+
+ "gioui.org/gpu/internal/driver"
+ "gioui.org/internal/d3d11"
+ "gioui.org/shader"
+)
+
+type Backend struct {
+ dev *d3d11.Device
+ ctx *d3d11.DeviceContext
+
+ // Temporary storage to avoid garbage.
+ clearColor [4]float32
+ viewport d3d11.VIEWPORT
+
+ pipeline *Pipeline
+ vert struct {
+ buffer *Buffer
+ offset int
+ }
+
+ program *Program
+
+ caps driver.Caps
+
+ floatFormat uint32
+}
+
+type Pipeline struct {
+ vert *d3d11.VertexShader
+ frag *d3d11.PixelShader
+ layout *d3d11.InputLayout
+ blend *d3d11.BlendState
+ stride int
+ topology driver.Topology
+}
+
+type Texture struct {
+ backend *Backend
+ format uint32
+ bindings driver.BufferBinding
+ tex *d3d11.Texture2D
+ sampler *d3d11.SamplerState
+ resView *d3d11.ShaderResourceView
+ uaView *d3d11.UnorderedAccessView
+ renderTarget *d3d11.RenderTargetView
+
+ width int
+ height int
+ foreign bool
+}
+
+type VertexShader struct {
+ backend *Backend
+ shader *d3d11.VertexShader
+ src shader.Sources
+}
+
+type FragmentShader struct {
+ backend *Backend
+ shader *d3d11.PixelShader
+}
+
+type Program struct {
+ backend *Backend
+ shader *d3d11.ComputeShader
+}
+
+type Buffer struct {
+ backend *Backend
+ bind uint32
+ buf *d3d11.Buffer
+ resView *d3d11.ShaderResourceView
+ uaView *d3d11.UnorderedAccessView
+ size int
+ immutable bool
+}
+
+func init() {
+ driver.NewDirect3D11Device = newDirect3D11Device
+}
+
+func detectFloatFormat(dev *d3d11.Device) (uint32, bool) {
+ formats := []uint32{
+ d3d11.DXGI_FORMAT_R16_FLOAT,
+ d3d11.DXGI_FORMAT_R32_FLOAT,
+ d3d11.DXGI_FORMAT_R16G16_FLOAT,
+ d3d11.DXGI_FORMAT_R32G32_FLOAT,
+ // These last two are really wasteful, but c'est la vie.
+ d3d11.DXGI_FORMAT_R16G16B16A16_FLOAT,
+ d3d11.DXGI_FORMAT_R32G32B32A32_FLOAT,
+ }
+ for _, format := range formats {
+ need := uint32(d3d11.FORMAT_SUPPORT_TEXTURE2D | d3d11.FORMAT_SUPPORT_RENDER_TARGET)
+ if support, _ := dev.CheckFormatSupport(format); support&need == need {
+ return format, true
+ }
+ }
+ return 0, false
+}
+
+func newDirect3D11Device(api driver.Direct3D11) (driver.Device, error) {
+ dev := (*d3d11.Device)(api.Device)
+ b := &Backend{
+ dev: dev,
+ ctx: dev.GetImmediateContext(),
+ caps: driver.Caps{
+ MaxTextureSize: 2048, // 9.1 maximum
+ Features: driver.FeatureSRGB,
+ },
+ }
+ featLvl := dev.GetFeatureLevel()
+ switch {
+ case featLvl < d3d11.FEATURE_LEVEL_9_1:
+ d3d11.IUnknownRelease(unsafe.Pointer(dev), dev.Vtbl.Release)
+ d3d11.IUnknownRelease(unsafe.Pointer(b.ctx), b.ctx.Vtbl.Release)
+ return nil, fmt.Errorf("d3d11: feature level too low: %d", featLvl)
+ case featLvl >= d3d11.FEATURE_LEVEL_11_0:
+ b.caps.MaxTextureSize = 16384
+ b.caps.Features |= driver.FeatureCompute
+ case featLvl >= d3d11.FEATURE_LEVEL_9_3:
+ b.caps.MaxTextureSize = 4096
+ }
+ if fmt, ok := detectFloatFormat(dev); ok {
+ b.floatFormat = fmt
+ b.caps.Features |= driver.FeatureFloatRenderTargets
+ }
+ // Disable backface culling to match OpenGL.
+ state, err := dev.CreateRasterizerState(&d3d11.RASTERIZER_DESC{
+ CullMode: d3d11.CULL_NONE,
+ FillMode: d3d11.FILL_SOLID,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer d3d11.IUnknownRelease(unsafe.Pointer(state), state.Vtbl.Release)
+ b.ctx.RSSetState(state)
+ return b, nil
+}
+
+func (b *Backend) BeginFrame(target driver.RenderTarget, clear bool, viewport image.Point) driver.Texture {
+ var (
+ renderTarget *d3d11.RenderTargetView
+ )
+ if target != nil {
+ switch t := target.(type) {
+ case driver.Direct3D11RenderTarget:
+ renderTarget = (*d3d11.RenderTargetView)(t.RenderTarget)
+ case *Texture:
+ renderTarget = t.renderTarget
+ default:
+ panic(fmt.Errorf("d3d11: invalid render target type: %T", target))
+ }
+ }
+ b.ctx.OMSetRenderTargets(renderTarget, nil)
+ return &Texture{backend: b, renderTarget: renderTarget, foreign: true}
+}
+
+func (b *Backend) CopyTexture(dstTex driver.Texture, dstOrigin image.Point, srcTex driver.Texture, srcRect image.Rectangle) {
+ dst := (*d3d11.Resource)(unsafe.Pointer(dstTex.(*Texture).tex))
+ src := (*d3d11.Resource)(srcTex.(*Texture).tex)
+ b.ctx.CopySubresourceRegion(
+ dst,
+ 0, // Destination subresource.
+ uint32(dstOrigin.X), uint32(dstOrigin.Y), 0, // Destination coordinates (x, y, z).
+ src,
+ 0, // Source subresource.
+ &d3d11.BOX{
+ Left: uint32(srcRect.Min.X),
+ Top: uint32(srcRect.Min.Y),
+ Right: uint32(srcRect.Max.X),
+ Bottom: uint32(srcRect.Max.Y),
+ Front: 0,
+ Back: 1,
+ },
+ )
+}
+
+func (b *Backend) EndFrame() {
+}
+
+func (b *Backend) Caps() driver.Caps {
+ return b.caps
+}
+
+func (b *Backend) NewTimer() driver.Timer {
+ panic("timers not supported")
+}
+
+func (b *Backend) IsTimeContinuous() bool {
+ panic("timers not supported")
+}
+
+func (b *Backend) Release() {
+ d3d11.IUnknownRelease(unsafe.Pointer(b.ctx), b.ctx.Vtbl.Release)
+ *b = Backend{}
+}
+
+func (b *Backend) NewTexture(format driver.TextureFormat, width, height int, minFilter, magFilter driver.TextureFilter, bindings driver.BufferBinding) (driver.Texture, error) {
+ var d3dfmt uint32
+ switch format {
+ case driver.TextureFormatFloat:
+ d3dfmt = b.floatFormat
+ case driver.TextureFormatSRGBA:
+ d3dfmt = d3d11.DXGI_FORMAT_R8G8B8A8_UNORM_SRGB
+ case driver.TextureFormatRGBA8:
+ d3dfmt = d3d11.DXGI_FORMAT_R8G8B8A8_UNORM
+ default:
+ return nil, fmt.Errorf("unsupported texture format %d", format)
+ }
+ tex, err := b.dev.CreateTexture2D(&d3d11.TEXTURE2D_DESC{
+ Width: uint32(width),
+ Height: uint32(height),
+ MipLevels: 1,
+ ArraySize: 1,
+ Format: d3dfmt,
+ SampleDesc: d3d11.DXGI_SAMPLE_DESC{
+ Count: 1,
+ Quality: 0,
+ },
+ BindFlags: convBufferBinding(bindings),
+ })
+ if err != nil {
+ return nil, err
+ }
+ var (
+ sampler *d3d11.SamplerState
+ resView *d3d11.ShaderResourceView
+ uaView *d3d11.UnorderedAccessView
+ fbo *d3d11.RenderTargetView
+ )
+ if bindings&driver.BufferBindingTexture != 0 {
+ var filter uint32
+ switch {
+ case minFilter == driver.FilterNearest && magFilter == driver.FilterNearest:
+ filter = d3d11.FILTER_MIN_MAG_MIP_POINT
+ case minFilter == driver.FilterLinear && magFilter == driver.FilterLinear:
+ filter = d3d11.FILTER_MIN_MAG_LINEAR_MIP_POINT
+ default:
+ d3d11.IUnknownRelease(unsafe.Pointer(tex), tex.Vtbl.Release)
+ return nil, fmt.Errorf("unsupported texture filter combination %d, %d", minFilter, magFilter)
+ }
+ var err error
+ sampler, err = b.dev.CreateSamplerState(&d3d11.SAMPLER_DESC{
+ Filter: filter,
+ AddressU: d3d11.TEXTURE_ADDRESS_CLAMP,
+ AddressV: d3d11.TEXTURE_ADDRESS_CLAMP,
+ AddressW: d3d11.TEXTURE_ADDRESS_CLAMP,
+ MaxAnisotropy: 1,
+ MinLOD: -math.MaxFloat32,
+ MaxLOD: math.MaxFloat32,
+ })
+ if err != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(tex), tex.Vtbl.Release)
+ return nil, err
+ }
+ resView, err = b.dev.CreateShaderResourceView(
+ (*d3d11.Resource)(unsafe.Pointer(tex)),
+ unsafe.Pointer(&d3d11.SHADER_RESOURCE_VIEW_DESC_TEX2D{
+ SHADER_RESOURCE_VIEW_DESC: d3d11.SHADER_RESOURCE_VIEW_DESC{
+ Format: d3dfmt,
+ ViewDimension: d3d11.SRV_DIMENSION_TEXTURE2D,
+ },
+ Texture2D: d3d11.TEX2D_SRV{
+ MostDetailedMip: 0,
+ MipLevels: ^uint32(0),
+ },
+ }),
+ )
+ if err != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(tex), tex.Vtbl.Release)
+ d3d11.IUnknownRelease(unsafe.Pointer(sampler), sampler.Vtbl.Release)
+ return nil, err
+ }
+ }
+ if bindings&driver.BufferBindingShaderStorageWrite != 0 {
+ uaView, err = b.dev.CreateUnorderedAccessView(
+ (*d3d11.Resource)(unsafe.Pointer(tex)),
+ unsafe.Pointer(&d3d11.UNORDERED_ACCESS_VIEW_DESC_TEX2D{
+ UNORDERED_ACCESS_VIEW_DESC: d3d11.UNORDERED_ACCESS_VIEW_DESC{
+ Format: d3dfmt,
+ ViewDimension: d3d11.UAV_DIMENSION_TEXTURE2D,
+ },
+ Texture2D: d3d11.TEX2D_UAV{
+ MipSlice: 0,
+ },
+ }),
+ )
+ if err != nil {
+ if sampler != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(sampler), sampler.Vtbl.Release)
+ }
+ if resView != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(resView), resView.Vtbl.Release)
+ }
+ d3d11.IUnknownRelease(unsafe.Pointer(tex), tex.Vtbl.Release)
+ return nil, err
+ }
+ }
+ if bindings&driver.BufferBindingFramebuffer != 0 {
+ resource := (*d3d11.Resource)(unsafe.Pointer(tex))
+ fbo, err = b.dev.CreateRenderTargetView(resource)
+ if err != nil {
+ if uaView != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(uaView), uaView.Vtbl.Release)
+ }
+ if sampler != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(sampler), sampler.Vtbl.Release)
+ }
+ if resView != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(resView), resView.Vtbl.Release)
+ }
+ d3d11.IUnknownRelease(unsafe.Pointer(tex), tex.Vtbl.Release)
+ return nil, err
+ }
+ }
+ return &Texture{backend: b, format: d3dfmt, tex: tex, sampler: sampler, resView: resView, uaView: uaView, renderTarget: fbo, bindings: bindings, width: width, height: height}, nil
+}
+
+func (b *Backend) newInputLayout(vertexShader shader.Sources, layout []driver.InputDesc) (*d3d11.InputLayout, error) {
+ if len(vertexShader.Inputs) != len(layout) {
+ return nil, fmt.Errorf("NewInputLayout: got %d inputs, expected %d", len(layout), len(vertexShader.Inputs))
+ }
+ descs := make([]d3d11.INPUT_ELEMENT_DESC, len(layout))
+ for i, l := range layout {
+ inp := vertexShader.Inputs[i]
+ cname, err := windows.BytePtrFromString(inp.Semantic)
+ if err != nil {
+ return nil, err
+ }
+ var format uint32
+ switch l.Type {
+ case shader.DataTypeFloat:
+ switch l.Size {
+ case 1:
+ format = d3d11.DXGI_FORMAT_R32_FLOAT
+ case 2:
+ format = d3d11.DXGI_FORMAT_R32G32_FLOAT
+ case 3:
+ format = d3d11.DXGI_FORMAT_R32G32B32_FLOAT
+ case 4:
+ format = d3d11.DXGI_FORMAT_R32G32B32A32_FLOAT
+ default:
+ panic("unsupported data size")
+ }
+ case shader.DataTypeShort:
+ switch l.Size {
+ case 1:
+ format = d3d11.DXGI_FORMAT_R16_SINT
+ case 2:
+ format = d3d11.DXGI_FORMAT_R16G16_SINT
+ default:
+ panic("unsupported data size")
+ }
+ default:
+ panic("unsupported data type")
+ }
+ descs[i] = d3d11.INPUT_ELEMENT_DESC{
+ SemanticName: cname,
+ SemanticIndex: uint32(inp.SemanticIndex),
+ Format: format,
+ AlignedByteOffset: uint32(l.Offset),
+ }
+ }
+ return b.dev.CreateInputLayout(descs, []byte(vertexShader.DXBC))
+}
+
+func (b *Backend) NewBuffer(typ driver.BufferBinding, size int) (driver.Buffer, error) {
+ return b.newBuffer(typ, size, nil, false)
+}
+
+func (b *Backend) NewImmutableBuffer(typ driver.BufferBinding, data []byte) (driver.Buffer, error) {
+ return b.newBuffer(typ, len(data), data, true)
+}
+
+func (b *Backend) newBuffer(typ driver.BufferBinding, size int, data []byte, immutable bool) (*Buffer, error) {
+ if typ&driver.BufferBindingUniforms != 0 {
+ if typ != driver.BufferBindingUniforms {
+ return nil, errors.New("uniform buffers cannot have other bindings")
+ }
+ if size%16 != 0 {
+ return nil, fmt.Errorf("constant buffer size is %d, expected a multiple of 16", size)
+ }
+ }
+ bind := convBufferBinding(typ)
+ var usage, miscFlags, cpuFlags uint32
+ if immutable {
+ usage = d3d11.USAGE_IMMUTABLE
+ }
+ if typ&driver.BufferBindingShaderStorageWrite != 0 {
+ cpuFlags = d3d11.CPU_ACCESS_READ
+ }
+ if typ&(driver.BufferBindingShaderStorageRead|driver.BufferBindingShaderStorageWrite) != 0 {
+ miscFlags |= d3d11.RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS
+ }
+ buf, err := b.dev.CreateBuffer(&d3d11.BUFFER_DESC{
+ ByteWidth: uint32(size),
+ Usage: usage,
+ BindFlags: bind,
+ CPUAccessFlags: cpuFlags,
+ MiscFlags: miscFlags,
+ }, data)
+ if err != nil {
+ return nil, err
+ }
+ var (
+ resView *d3d11.ShaderResourceView
+ uaView *d3d11.UnorderedAccessView
+ )
+ if typ&driver.BufferBindingShaderStorageWrite != 0 {
+ uaView, err = b.dev.CreateUnorderedAccessView(
+ (*d3d11.Resource)(unsafe.Pointer(buf)),
+ unsafe.Pointer(&d3d11.UNORDERED_ACCESS_VIEW_DESC_BUFFER{
+ UNORDERED_ACCESS_VIEW_DESC: d3d11.UNORDERED_ACCESS_VIEW_DESC{
+ Format: d3d11.DXGI_FORMAT_R32_TYPELESS,
+ ViewDimension: d3d11.UAV_DIMENSION_BUFFER,
+ },
+ Buffer: d3d11.BUFFER_UAV{
+ FirstElement: 0,
+ NumElements: uint32(size / 4),
+ Flags: d3d11.BUFFER_UAV_FLAG_RAW,
+ },
+ }),
+ )
+ if err != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(buf), buf.Vtbl.Release)
+ return nil, err
+ }
+ } else if typ&driver.BufferBindingShaderStorageRead != 0 {
+ resView, err = b.dev.CreateShaderResourceView(
+ (*d3d11.Resource)(unsafe.Pointer(buf)),
+ unsafe.Pointer(&d3d11.SHADER_RESOURCE_VIEW_DESC_BUFFEREX{
+ SHADER_RESOURCE_VIEW_DESC: d3d11.SHADER_RESOURCE_VIEW_DESC{
+ Format: d3d11.DXGI_FORMAT_R32_TYPELESS,
+ ViewDimension: d3d11.SRV_DIMENSION_BUFFEREX,
+ },
+ Buffer: d3d11.BUFFEREX_SRV{
+ FirstElement: 0,
+ NumElements: uint32(size / 4),
+ Flags: d3d11.BUFFEREX_SRV_FLAG_RAW,
+ },
+ }),
+ )
+ if err != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(buf), buf.Vtbl.Release)
+ return nil, err
+ }
+ }
+ return &Buffer{backend: b, buf: buf, bind: bind, size: size, resView: resView, uaView: uaView, immutable: immutable}, nil
+}
+
+func (b *Backend) NewComputeProgram(shader shader.Sources) (driver.Program, error) {
+ cs, err := b.dev.CreateComputeShader([]byte(shader.DXBC))
+ if err != nil {
+ return nil, err
+ }
+ return &Program{backend: b, shader: cs}, nil
+}
+
+func (b *Backend) NewPipeline(desc driver.PipelineDesc) (driver.Pipeline, error) {
+ vsh := desc.VertexShader.(*VertexShader)
+ fsh := desc.FragmentShader.(*FragmentShader)
+ blend, err := b.newBlendState(desc.BlendDesc)
+ if err != nil {
+ return nil, err
+ }
+ var layout *d3d11.InputLayout
+ if l := desc.VertexLayout; l.Stride > 0 {
+ var err error
+ layout, err = b.newInputLayout(vsh.src, l.Inputs)
+ if err != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(blend), blend.Vtbl.AddRef)
+ return nil, err
+ }
+ }
+
+ // Retain shaders.
+ vshRef := vsh.shader
+ fshRef := fsh.shader
+ d3d11.IUnknownAddRef(unsafe.Pointer(vshRef), vshRef.Vtbl.AddRef)
+ d3d11.IUnknownAddRef(unsafe.Pointer(fshRef), fshRef.Vtbl.AddRef)
+
+ return &Pipeline{
+ vert: vshRef,
+ frag: fshRef,
+ layout: layout,
+ stride: desc.VertexLayout.Stride,
+ blend: blend,
+ topology: desc.Topology,
+ }, nil
+}
+
+func (b *Backend) newBlendState(desc driver.BlendDesc) (*d3d11.BlendState, error) {
+ var d3ddesc d3d11.BLEND_DESC
+ t0 := &d3ddesc.RenderTarget[0]
+ t0.RenderTargetWriteMask = d3d11.COLOR_WRITE_ENABLE_ALL
+ t0.BlendOp = d3d11.BLEND_OP_ADD
+ t0.BlendOpAlpha = d3d11.BLEND_OP_ADD
+ if desc.Enable {
+ t0.BlendEnable = 1
+ }
+ scol, salpha := toBlendFactor(desc.SrcFactor)
+ dcol, dalpha := toBlendFactor(desc.DstFactor)
+ t0.SrcBlend = scol
+ t0.SrcBlendAlpha = salpha
+ t0.DestBlend = dcol
+ t0.DestBlendAlpha = dalpha
+ return b.dev.CreateBlendState(&d3ddesc)
+}
+
+func (b *Backend) NewVertexShader(src shader.Sources) (driver.VertexShader, error) {
+ vs, err := b.dev.CreateVertexShader([]byte(src.DXBC))
+ if err != nil {
+ return nil, err
+ }
+ return &VertexShader{b, vs, src}, nil
+}
+
+func (b *Backend) NewFragmentShader(src shader.Sources) (driver.FragmentShader, error) {
+ fs, err := b.dev.CreatePixelShader([]byte(src.DXBC))
+ if err != nil {
+ return nil, err
+ }
+ return &FragmentShader{b, fs}, nil
+}
+
+func (b *Backend) Viewport(x, y, width, height int) {
+ b.viewport = d3d11.VIEWPORT{
+ TopLeftX: float32(x),
+ TopLeftY: float32(y),
+ Width: float32(width),
+ Height: float32(height),
+ MinDepth: 0.0,
+ MaxDepth: 1.0,
+ }
+ b.ctx.RSSetViewports(&b.viewport)
+}
+
+func (b *Backend) DrawArrays(off, count int) {
+ b.prepareDraw()
+ b.ctx.Draw(uint32(count), uint32(off))
+}
+
+func (b *Backend) DrawElements(off, count int) {
+ b.prepareDraw()
+ b.ctx.DrawIndexed(uint32(count), uint32(off), 0)
+}
+
+func (b *Backend) prepareDraw() {
+ p := b.pipeline
+ if p == nil {
+ return
+ }
+ b.ctx.VSSetShader(p.vert)
+ b.ctx.PSSetShader(p.frag)
+ b.ctx.IASetInputLayout(p.layout)
+ b.ctx.OMSetBlendState(p.blend, nil, 0xffffffff)
+ if b.vert.buffer != nil {
+ b.ctx.IASetVertexBuffers(b.vert.buffer.buf, uint32(p.stride), uint32(b.vert.offset))
+ }
+ var topology uint32
+ switch p.topology {
+ case driver.TopologyTriangles:
+ topology = d3d11.PRIMITIVE_TOPOLOGY_TRIANGLELIST
+ case driver.TopologyTriangleStrip:
+ topology = d3d11.PRIMITIVE_TOPOLOGY_TRIANGLESTRIP
+ default:
+ panic("unsupported draw mode")
+ }
+ b.ctx.IASetPrimitiveTopology(topology)
+}
+
+func (b *Backend) BindImageTexture(unit int, tex driver.Texture) {
+ t := tex.(*Texture)
+ if t.uaView != nil {
+ b.ctx.CSSetUnorderedAccessViews(uint32(unit), t.uaView)
+ } else {
+ b.ctx.CSSetShaderResources(uint32(unit), t.resView)
+ }
+}
+
+func (b *Backend) DispatchCompute(x, y, z int) {
+ b.ctx.CSSetShader(b.program.shader)
+ b.ctx.Dispatch(uint32(x), uint32(y), uint32(z))
+}
+
+func (t *Texture) Upload(offset, size image.Point, pixels []byte, stride int) {
+ if stride == 0 {
+ stride = size.X * 4
+ }
+ dst := &d3d11.BOX{
+ Left: uint32(offset.X),
+ Top: uint32(offset.Y),
+ Right: uint32(offset.X + size.X),
+ Bottom: uint32(offset.Y + size.Y),
+ Front: 0,
+ Back: 1,
+ }
+ res := (*d3d11.Resource)(unsafe.Pointer(t.tex))
+ t.backend.ctx.UpdateSubresource(res, dst, uint32(stride), uint32(len(pixels)), pixels)
+}
+
+func (t *Texture) Release() {
+ if t.foreign {
+ panic("texture not created by NewTexture")
+ }
+ if t.renderTarget != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(t.renderTarget), t.renderTarget.Vtbl.Release)
+ }
+ if t.sampler != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(t.sampler), t.sampler.Vtbl.Release)
+ }
+ if t.resView != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(t.resView), t.resView.Vtbl.Release)
+ }
+ if t.uaView != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(t.uaView), t.uaView.Vtbl.Release)
+ }
+ d3d11.IUnknownRelease(unsafe.Pointer(t.tex), t.tex.Vtbl.Release)
+ *t = Texture{}
+}
+
+func (b *Backend) PrepareTexture(tex driver.Texture) {}
+
+func (b *Backend) BindTexture(unit int, tex driver.Texture) {
+ t := tex.(*Texture)
+ b.ctx.PSSetSamplers(uint32(unit), t.sampler)
+ b.ctx.PSSetShaderResources(uint32(unit), t.resView)
+}
+
+func (b *Backend) BindPipeline(pipe driver.Pipeline) {
+ b.pipeline = pipe.(*Pipeline)
+}
+
+func (b *Backend) BindProgram(prog driver.Program) {
+ b.program = prog.(*Program)
+}
+
+func (s *VertexShader) Release() {
+ d3d11.IUnknownRelease(unsafe.Pointer(s.shader), s.shader.Vtbl.Release)
+ *s = VertexShader{}
+}
+
+func (s *FragmentShader) Release() {
+ d3d11.IUnknownRelease(unsafe.Pointer(s.shader), s.shader.Vtbl.Release)
+ *s = FragmentShader{}
+}
+
+func (s *Program) Release() {
+ d3d11.IUnknownRelease(unsafe.Pointer(s.shader), s.shader.Vtbl.Release)
+ *s = Program{}
+}
+
+func (p *Pipeline) Release() {
+ d3d11.IUnknownRelease(unsafe.Pointer(p.vert), p.vert.Vtbl.Release)
+ d3d11.IUnknownRelease(unsafe.Pointer(p.frag), p.frag.Vtbl.Release)
+ d3d11.IUnknownRelease(unsafe.Pointer(p.blend), p.blend.Vtbl.Release)
+ if l := p.layout; l != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(l), l.Vtbl.Release)
+ }
+ *p = Pipeline{}
+}
+
+func (b *Backend) BindStorageBuffer(binding int, buffer driver.Buffer) {
+ buf := buffer.(*Buffer)
+ if buf.resView != nil {
+ b.ctx.CSSetShaderResources(uint32(binding), buf.resView)
+ } else {
+ b.ctx.CSSetUnorderedAccessViews(uint32(binding), buf.uaView)
+ }
+}
+
+func (b *Backend) BindUniforms(buffer driver.Buffer) {
+ buf := buffer.(*Buffer)
+ b.ctx.VSSetConstantBuffers(buf.buf)
+ b.ctx.PSSetConstantBuffers(buf.buf)
+}
+
+func (b *Backend) BindVertexBuffer(buf driver.Buffer, offset int) {
+ b.vert.buffer = buf.(*Buffer)
+ b.vert.offset = offset
+}
+
+func (b *Backend) BindIndexBuffer(buf driver.Buffer) {
+ b.ctx.IASetIndexBuffer(buf.(*Buffer).buf, d3d11.DXGI_FORMAT_R16_UINT, 0)
+}
+
+func (b *Buffer) Download(dst []byte) error {
+ res := (*d3d11.Resource)(unsafe.Pointer(b.buf))
+ resMap, err := b.backend.ctx.Map(res, 0, d3d11.MAP_READ, 0)
+ if err != nil {
+ return fmt.Errorf("d3d11: %v", err)
+ }
+ defer b.backend.ctx.Unmap(res, 0)
+ data := sliceOf(resMap.PData, len(dst))
+ copy(dst, data)
+ return nil
+}
+
+func (b *Buffer) Upload(data []byte) {
+ var dst *d3d11.BOX
+ if len(data) < b.size {
+ dst = &d3d11.BOX{
+ Left: 0,
+ Right: uint32(len(data)),
+ Top: 0,
+ Bottom: 1,
+ Front: 0,
+ Back: 1,
+ }
+ }
+ b.backend.ctx.UpdateSubresource((*d3d11.Resource)(unsafe.Pointer(b.buf)), dst, 0, 0, data)
+}
+
+func (b *Buffer) Release() {
+ if b.resView != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(b.resView), b.resView.Vtbl.Release)
+ }
+ if b.uaView != nil {
+ d3d11.IUnknownRelease(unsafe.Pointer(b.uaView), b.uaView.Vtbl.Release)
+ }
+ d3d11.IUnknownRelease(unsafe.Pointer(b.buf), b.buf.Vtbl.Release)
+ *b = Buffer{}
+}
+
+func (t *Texture) ReadPixels(src image.Rectangle, pixels []byte, stride int) error {
+ w, h := src.Dx(), src.Dy()
+ tex, err := t.backend.dev.CreateTexture2D(&d3d11.TEXTURE2D_DESC{
+ Width: uint32(w),
+ Height: uint32(h),
+ MipLevels: 1,
+ ArraySize: 1,
+ Format: t.format,
+ SampleDesc: d3d11.DXGI_SAMPLE_DESC{
+ Count: 1,
+ Quality: 0,
+ },
+ Usage: d3d11.USAGE_STAGING,
+ CPUAccessFlags: d3d11.CPU_ACCESS_READ,
+ })
+ if err != nil {
+ return fmt.Errorf("ReadPixels: %v", err)
+ }
+ defer d3d11.IUnknownRelease(unsafe.Pointer(tex), tex.Vtbl.Release)
+ res := (*d3d11.Resource)(unsafe.Pointer(tex))
+ t.backend.ctx.CopySubresourceRegion(
+ res,
+ 0, // Destination subresource.
+ 0, 0, 0, // Destination coordinates (x, y, z).
+ (*d3d11.Resource)(t.tex),
+ 0, // Source subresource.
+ &d3d11.BOX{
+ Left: uint32(src.Min.X),
+ Top: uint32(src.Min.Y),
+ Right: uint32(src.Max.X),
+ Bottom: uint32(src.Max.Y),
+ Front: 0,
+ Back: 1,
+ },
+ )
+ resMap, err := t.backend.ctx.Map(res, 0, d3d11.MAP_READ, 0)
+ if err != nil {
+ return fmt.Errorf("ReadPixels: %v", err)
+ }
+ defer t.backend.ctx.Unmap(res, 0)
+ srcPitch := stride
+ dstPitch := int(resMap.RowPitch)
+ mapSize := dstPitch * h
+ data := sliceOf(resMap.PData, mapSize)
+ width := w * 4
+ for r := 0; r < h; r++ {
+ pixels := pixels[r*srcPitch:]
+ copy(pixels[:width], data[r*dstPitch:])
+ }
+ return nil
+}
+
+func (b *Backend) BeginCompute() {
+}
+
+func (b *Backend) EndCompute() {
+}
+
+func (b *Backend) BeginRenderPass(tex driver.Texture, d driver.LoadDesc) {
+ t := tex.(*Texture)
+ b.ctx.OMSetRenderTargets(t.renderTarget, nil)
+ if d.Action == driver.LoadActionClear {
+ c := d.ClearColor
+ b.clearColor = [4]float32{c.R, c.G, c.B, c.A}
+ b.ctx.ClearRenderTargetView(t.renderTarget, &b.clearColor)
+ }
+}
+
+func (b *Backend) EndRenderPass() {
+}
+
+func (f *Texture) ImplementsRenderTarget() {}
+
+func convBufferBinding(typ driver.BufferBinding) uint32 {
+ var bindings uint32
+ if typ&driver.BufferBindingVertices != 0 {
+ bindings |= d3d11.BIND_VERTEX_BUFFER
+ }
+ if typ&driver.BufferBindingIndices != 0 {
+ bindings |= d3d11.BIND_INDEX_BUFFER
+ }
+ if typ&driver.BufferBindingUniforms != 0 {
+ bindings |= d3d11.BIND_CONSTANT_BUFFER
+ }
+ if typ&driver.BufferBindingTexture != 0 {
+ bindings |= d3d11.BIND_SHADER_RESOURCE
+ }
+ if typ&driver.BufferBindingFramebuffer != 0 {
+ bindings |= d3d11.BIND_RENDER_TARGET
+ }
+ if typ&driver.BufferBindingShaderStorageWrite != 0 {
+ bindings |= d3d11.BIND_UNORDERED_ACCESS
+ } else if typ&driver.BufferBindingShaderStorageRead != 0 {
+ bindings |= d3d11.BIND_SHADER_RESOURCE
+ }
+ return bindings
+}
+
+func toBlendFactor(f driver.BlendFactor) (uint32, uint32) {
+ switch f {
+ case driver.BlendFactorOne:
+ return d3d11.BLEND_ONE, d3d11.BLEND_ONE
+ case driver.BlendFactorOneMinusSrcAlpha:
+ return d3d11.BLEND_INV_SRC_ALPHA, d3d11.BLEND_INV_SRC_ALPHA
+ case driver.BlendFactorZero:
+ return d3d11.BLEND_ZERO, d3d11.BLEND_ZERO
+ case driver.BlendFactorDstColor:
+ return d3d11.BLEND_DEST_COLOR, d3d11.BLEND_DEST_ALPHA
+ default:
+ panic("unsupported blend source factor")
+ }
+}
+
+// sliceOf returns a slice from a (native) pointer.
+func sliceOf(ptr uintptr, cap int) []byte {
+ var data []byte
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
+ h.Data = ptr
+ h.Cap = cap
+ h.Len = cap
+ return data
+}
diff --git a/vendor/gioui.org/gpu/internal/driver/api.go b/vendor/gioui.org/gpu/internal/driver/api.go
new file mode 100644
index 0000000..9a762a6
--- /dev/null
+++ b/vendor/gioui.org/gpu/internal/driver/api.go
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package driver
+
+import (
+ "fmt"
+ "unsafe"
+
+ "gioui.org/internal/gl"
+)
+
+// See gpu/api.go for documentation for the API types.
+
+type API interface {
+ implementsAPI()
+}
+
+type RenderTarget interface {
+ ImplementsRenderTarget()
+}
+
+type OpenGLRenderTarget gl.Framebuffer
+
+type Direct3D11RenderTarget struct {
+ // RenderTarget is a *ID3D11RenderTargetView.
+ RenderTarget unsafe.Pointer
+}
+
+type MetalRenderTarget struct {
+ // Texture is a MTLTexture.
+ Texture uintptr
+}
+
+type VulkanRenderTarget struct {
+ // WaitSem is a VkSemaphore that must signaled before accessing Framebuffer.
+ WaitSem uint64
+ // SignalSem is a VkSemaphore that signal access to Framebuffer is complete.
+ SignalSem uint64
+ // Image is the VkImage to render into.
+ Image uint64
+ // Framebuffer is a VkFramebuffer for Image.
+ Framebuffer uint64
+}
+
+type OpenGL struct {
+ // ES forces the use of ANGLE OpenGL ES libraries on macOS. It is
+ // ignored on all other platforms.
+ ES bool
+ // Context contains the WebGL context for WebAssembly platforms. It is
+ // empty for all other platforms; an OpenGL context is assumed current when
+ // calling NewDevice.
+ Context gl.Context
+ // Shared instructs users of the context to restore the GL state after
+ // use.
+ Shared bool
+}
+
+type Direct3D11 struct {
+ // Device contains a *ID3D11Device.
+ Device unsafe.Pointer
+}
+
+type Metal struct {
+ // Device is an MTLDevice.
+ Device uintptr
+ // Queue is a MTLCommandQueue.
+ Queue uintptr
+ // PixelFormat is the MTLPixelFormat of the default framebuffer.
+ PixelFormat int
+}
+
+type Vulkan struct {
+ // PhysDevice is a VkPhysicalDevice.
+ PhysDevice unsafe.Pointer
+ // Device is a VkDevice.
+ Device unsafe.Pointer
+ // QueueFamily is the queue familily index of the queue.
+ QueueFamily int
+ // QueueIndex is the logical queue index of the queue.
+ QueueIndex int
+ // Format is a VkFormat that matches render targets.
+ Format int
+}
+
+// API specific device constructors.
+var (
+ NewOpenGLDevice func(api OpenGL) (Device, error)
+ NewDirect3D11Device func(api Direct3D11) (Device, error)
+ NewMetalDevice func(api Metal) (Device, error)
+ NewVulkanDevice func(api Vulkan) (Device, error)
+)
+
+// NewDevice creates a new Device given the api.
+//
+// Note that the device does not assume ownership of the resources contained in
+// api; the caller must ensure the resources are valid until the device is
+// released.
+func NewDevice(api API) (Device, error) {
+ switch api := api.(type) {
+ case OpenGL:
+ if NewOpenGLDevice != nil {
+ return NewOpenGLDevice(api)
+ }
+ case Direct3D11:
+ if NewDirect3D11Device != nil {
+ return NewDirect3D11Device(api)
+ }
+ case Metal:
+ if NewMetalDevice != nil {
+ return NewMetalDevice(api)
+ }
+ case Vulkan:
+ if NewVulkanDevice != nil {
+ return NewVulkanDevice(api)
+ }
+ }
+ return nil, fmt.Errorf("driver: no driver available for the API %T", api)
+}
+
+func (OpenGL) implementsAPI() {}
+func (Direct3D11) implementsAPI() {}
+func (Metal) implementsAPI() {}
+func (Vulkan) implementsAPI() {}
+func (OpenGLRenderTarget) ImplementsRenderTarget() {}
+func (Direct3D11RenderTarget) ImplementsRenderTarget() {}
+func (MetalRenderTarget) ImplementsRenderTarget() {}
+func (VulkanRenderTarget) ImplementsRenderTarget() {}
diff --git a/vendor/gioui.org/gpu/internal/driver/driver.go b/vendor/gioui.org/gpu/internal/driver/driver.go
new file mode 100644
index 0000000..6979144
--- /dev/null
+++ b/vendor/gioui.org/gpu/internal/driver/driver.go
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package driver
+
+import (
+ "errors"
+ "image"
+ "time"
+
+ "gioui.org/internal/f32color"
+ "gioui.org/shader"
+)
+
+// Device represents the abstraction of underlying GPU
+// APIs such as OpenGL, Direct3D useful for rendering Gio
+// operations.
+type Device interface {
+ BeginFrame(target RenderTarget, clear bool, viewport image.Point) Texture
+ EndFrame()
+ Caps() Caps
+ NewTimer() Timer
+ // IsContinuousTime reports whether all timer measurements
+ // are valid at the point of call.
+ IsTimeContinuous() bool
+ NewTexture(format TextureFormat, width, height int, minFilter, magFilter TextureFilter, bindings BufferBinding) (Texture, error)
+ NewImmutableBuffer(typ BufferBinding, data []byte) (Buffer, error)
+ NewBuffer(typ BufferBinding, size int) (Buffer, error)
+ NewComputeProgram(shader shader.Sources) (Program, error)
+ NewVertexShader(src shader.Sources) (VertexShader, error)
+ NewFragmentShader(src shader.Sources) (FragmentShader, error)
+ NewPipeline(desc PipelineDesc) (Pipeline, error)
+
+ Viewport(x, y, width, height int)
+ DrawArrays(off, count int)
+ DrawElements(off, count int)
+
+ BeginRenderPass(t Texture, desc LoadDesc)
+ EndRenderPass()
+ PrepareTexture(t Texture)
+ BindProgram(p Program)
+ BindPipeline(p Pipeline)
+ BindTexture(unit int, t Texture)
+ BindVertexBuffer(b Buffer, offset int)
+ BindIndexBuffer(b Buffer)
+ BindImageTexture(unit int, texture Texture)
+ BindUniforms(buf Buffer)
+ BindStorageBuffer(binding int, buf Buffer)
+
+ BeginCompute()
+ EndCompute()
+ CopyTexture(dst Texture, dstOrigin image.Point, src Texture, srcRect image.Rectangle)
+ DispatchCompute(x, y, z int)
+
+ Release()
+}
+
+var ErrDeviceLost = errors.New("GPU device lost")
+
+type LoadDesc struct {
+ Action LoadAction
+ ClearColor f32color.RGBA
+}
+
+type Pipeline interface {
+ Release()
+}
+
+type PipelineDesc struct {
+ VertexShader VertexShader
+ FragmentShader FragmentShader
+ VertexLayout VertexLayout
+ BlendDesc BlendDesc
+ PixelFormat TextureFormat
+ Topology Topology
+}
+
+type VertexLayout struct {
+ Inputs []InputDesc
+ Stride int
+}
+
+// InputDesc describes a vertex attribute as laid out in a Buffer.
+type InputDesc struct {
+ Type shader.DataType
+ Size int
+
+ Offset int
+}
+
+type BlendDesc struct {
+ Enable bool
+ SrcFactor, DstFactor BlendFactor
+}
+
+type BlendFactor uint8
+
+type Topology uint8
+
+type TextureFilter uint8
+type TextureFormat uint8
+
+type BufferBinding uint8
+
+type LoadAction uint8
+
+type Features uint
+
+type Caps struct {
+ // BottomLeftOrigin is true if the driver has the origin in the lower left
+ // corner. The OpenGL driver returns true.
+ BottomLeftOrigin bool
+ Features Features
+ MaxTextureSize int
+}
+
+type VertexShader interface {
+ Release()
+}
+
+type FragmentShader interface {
+ Release()
+}
+
+type Program interface {
+ Release()
+}
+
+type Buffer interface {
+ Release()
+ Upload(data []byte)
+ Download(data []byte) error
+}
+
+type Timer interface {
+ Begin()
+ End()
+ Duration() (time.Duration, bool)
+ Release()
+}
+
+type Texture interface {
+ RenderTarget
+ Upload(offset, size image.Point, pixels []byte, stride int)
+ ReadPixels(src image.Rectangle, pixels []byte, stride int) error
+ Release()
+}
+
+const (
+ BufferBindingIndices BufferBinding = 1 << iota
+ BufferBindingVertices
+ BufferBindingUniforms
+ BufferBindingTexture
+ BufferBindingFramebuffer
+ BufferBindingShaderStorageRead
+ BufferBindingShaderStorageWrite
+)
+
+const (
+ TextureFormatSRGBA TextureFormat = iota
+ TextureFormatFloat
+ TextureFormatRGBA8
+ // TextureFormatOutput denotes the format used by the output framebuffer.
+ TextureFormatOutput
+)
+
+const (
+ FilterNearest TextureFilter = iota
+ FilterLinear
+)
+
+const (
+ FeatureTimers Features = 1 << iota
+ FeatureFloatRenderTargets
+ FeatureCompute
+ FeatureSRGB
+)
+
+const (
+ TopologyTriangleStrip Topology = iota
+ TopologyTriangles
+)
+
+const (
+ BlendFactorOne BlendFactor = iota
+ BlendFactorOneMinusSrcAlpha
+ BlendFactorZero
+ BlendFactorDstColor
+)
+
+const (
+ LoadActionKeep LoadAction = iota
+ LoadActionClear
+ LoadActionInvalidate
+)
+
+var ErrContentLost = errors.New("buffer content lost")
+
+func (f Features) Has(feats Features) bool {
+ return f&feats == feats
+}
+
+func DownloadImage(d Device, t Texture, r image.Rectangle) (*image.RGBA, error) {
+ img := image.NewRGBA(r)
+ if err := t.ReadPixels(r, img.Pix, img.Stride); err != nil {
+ return nil, err
+ }
+ if d.Caps().BottomLeftOrigin {
+ // OpenGL origin is in the lower-left corner. Flip the image to
+ // match.
+ flipImageY(r.Dx()*4, r.Dy(), img.Pix)
+ }
+ return img, nil
+}
+
+func flipImageY(stride, height int, pixels []byte) {
+ // Flip image in y-direction. OpenGL's origin is in the lower
+ // left corner.
+ row := make([]uint8, stride)
+ for y := 0; y < height/2; y++ {
+ y1 := height - y - 1
+ dest := y1 * stride
+ src := y * stride
+ copy(row, pixels[dest:])
+ copy(pixels[dest:], pixels[src:src+len(row)])
+ copy(pixels[src:], row)
+ }
+}
+
+func UploadImage(t Texture, offset image.Point, img *image.RGBA) {
+ var pixels []byte
+ size := img.Bounds().Size()
+ min := img.Rect.Min
+ start := img.PixOffset(min.X, min.Y)
+ end := img.PixOffset(min.X+size.X, min.Y+size.Y-1)
+ pixels = img.Pix[start:end]
+ t.Upload(offset, size, pixels, img.Stride)
+}
diff --git a/vendor/gioui.org/gpu/internal/metal/metal.go b/vendor/gioui.org/gpu/internal/metal/metal.go
new file mode 100644
index 0000000..b9739af
--- /dev/null
+++ b/vendor/gioui.org/gpu/internal/metal/metal.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+// This file exists so this package builds on non-Darwin platforms.
+
+package metal
diff --git a/vendor/gioui.org/gpu/internal/metal/metal_darwin.go b/vendor/gioui.org/gpu/internal/metal/metal_darwin.go
new file mode 100644
index 0000000..c180731
--- /dev/null
+++ b/vendor/gioui.org/gpu/internal/metal/metal_darwin.go
@@ -0,0 +1,1141 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package metal
+
+import (
+ "errors"
+ "fmt"
+ "image"
+ "unsafe"
+
+ "gioui.org/gpu/internal/driver"
+ "gioui.org/shader"
+)
+
+/*
+#cgo CFLAGS: -Werror -xobjective-c -fmodules -fobjc-arc
+#cgo LDFLAGS: -framework CoreGraphics
+
+@import Metal;
+
+#include
+#include
+
+typedef struct {
+ void *addr;
+ NSUInteger size;
+} slice;
+
+static CFTypeRef queueNewBuffer(CFTypeRef queueRef) {
+ @autoreleasepool {
+ id queue = (__bridge id)queueRef;
+ return CFBridgingRetain([queue commandBuffer]);
+ }
+}
+
+static void cmdBufferCommit(CFTypeRef cmdBufRef) {
+ @autoreleasepool {
+ id cmdBuf = (__bridge id)cmdBufRef;
+ [cmdBuf commit];
+ }
+}
+
+static void cmdBufferWaitUntilCompleted(CFTypeRef cmdBufRef) {
+ @autoreleasepool {
+ id cmdBuf = (__bridge id)cmdBufRef;
+ [cmdBuf waitUntilCompleted];
+ }
+}
+
+static CFTypeRef cmdBufferRenderEncoder(CFTypeRef cmdBufRef, CFTypeRef textureRef, MTLLoadAction act, float r, float g, float b, float a) {
+ @autoreleasepool {
+ id cmdBuf = (__bridge id)cmdBufRef;
+ MTLRenderPassDescriptor *desc = [MTLRenderPassDescriptor new];
+ desc.colorAttachments[0].texture = (__bridge id)textureRef;
+ desc.colorAttachments[0].loadAction = act;
+ desc.colorAttachments[0].clearColor = MTLClearColorMake(r, g, b, a);
+ return CFBridgingRetain([cmdBuf renderCommandEncoderWithDescriptor:desc]);
+ }
+}
+
+static CFTypeRef cmdBufferComputeEncoder(CFTypeRef cmdBufRef) {
+ @autoreleasepool {
+ id cmdBuf = (__bridge id)cmdBufRef;
+ return CFBridgingRetain([cmdBuf computeCommandEncoder]);
+ }
+}
+
+static CFTypeRef cmdBufferBlitEncoder(CFTypeRef cmdBufRef) {
+ @autoreleasepool {
+ id cmdBuf = (__bridge id)cmdBufRef;
+ return CFBridgingRetain([cmdBuf blitCommandEncoder]);
+ }
+}
+
+static void renderEncEnd(CFTypeRef renderEncRef) {
+ @autoreleasepool {
+ id enc = (__bridge id)renderEncRef;
+ [enc endEncoding];
+ }
+}
+
+static void renderEncViewport(CFTypeRef renderEncRef, MTLViewport viewport) {
+ @autoreleasepool {
+ id enc = (__bridge id)renderEncRef;
+ [enc setViewport:viewport];
+ }
+}
+
+static void renderEncSetFragmentTexture(CFTypeRef renderEncRef, NSUInteger index, CFTypeRef texRef) {
+ @autoreleasepool {
+ id enc = (__bridge id)renderEncRef;
+ id tex = (__bridge id)texRef;
+ [enc setFragmentTexture:tex atIndex:index];
+ }
+}
+
+static void renderEncSetFragmentSamplerState(CFTypeRef renderEncRef, NSUInteger index, CFTypeRef samplerRef) {
+ @autoreleasepool {
+ id enc = (__bridge id)renderEncRef;
+ id sampler = (__bridge id)samplerRef;
+ [enc setFragmentSamplerState:sampler atIndex:index];
+ }
+}
+
+static void renderEncSetVertexBuffer(CFTypeRef renderEncRef, CFTypeRef bufRef, NSUInteger idx, NSUInteger offset) {
+ @autoreleasepool {
+ id enc = (__bridge id)renderEncRef;
+ id buf = (__bridge id)bufRef;
+ [enc setVertexBuffer:buf offset:offset atIndex:idx];
+ }
+}
+
+static void renderEncSetFragmentBuffer(CFTypeRef renderEncRef, CFTypeRef bufRef, NSUInteger idx, NSUInteger offset) {
+ @autoreleasepool {
+ id enc = (__bridge id)renderEncRef;
+ id buf = (__bridge id)bufRef;
+ [enc setFragmentBuffer:buf offset:offset atIndex:idx];
+ }
+}
+
+static void renderEncSetFragmentBytes(CFTypeRef renderEncRef, const void *bytes, NSUInteger length, NSUInteger idx) {
+ @autoreleasepool {
+ id enc = (__bridge id)renderEncRef;
+ [enc setFragmentBytes:bytes length:length atIndex:idx];
+ }
+}
+
+static void renderEncSetVertexBytes(CFTypeRef renderEncRef, const void *bytes, NSUInteger length, NSUInteger idx) {
+ @autoreleasepool {
+ id enc = (__bridge id)renderEncRef;
+ [enc setVertexBytes:bytes length:length atIndex:idx];
+ }
+}
+
+static void renderEncSetRenderPipelineState(CFTypeRef renderEncRef, CFTypeRef pipeRef) {
+ @autoreleasepool {
+ id enc = (__bridge id)renderEncRef;
+ id pipe = (__bridge id)pipeRef;
+ [enc setRenderPipelineState:pipe];
+ }
+}
+
+static void renderEncDrawPrimitives(CFTypeRef renderEncRef, MTLPrimitiveType type, NSUInteger start, NSUInteger count) {
+ @autoreleasepool {
+ id enc = (__bridge id)renderEncRef;
+ [enc drawPrimitives:type vertexStart:start vertexCount:count];
+ }
+}
+
+static void renderEncDrawIndexedPrimitives(CFTypeRef renderEncRef, MTLPrimitiveType type, CFTypeRef bufRef, NSUInteger offset, NSUInteger count) {
+ @autoreleasepool {
+ id enc = (__bridge id)renderEncRef;
+ id buf = (__bridge id)bufRef;
+ [enc drawIndexedPrimitives:type indexCount:count indexType:MTLIndexTypeUInt16 indexBuffer:buf indexBufferOffset:offset];
+ }
+}
+
+static void computeEncSetPipeline(CFTypeRef computeEncRef, CFTypeRef pipeRef) {
+ @autoreleasepool {
+ id enc = (__bridge id)computeEncRef;
+ id pipe = (__bridge id)pipeRef;
+ [enc setComputePipelineState:pipe];
+ }
+}
+
+static void computeEncSetTexture(CFTypeRef computeEncRef, NSUInteger index, CFTypeRef texRef) {
+ @autoreleasepool {
+ id enc = (__bridge id)computeEncRef;
+ id tex = (__bridge id)texRef;
+ [enc setTexture:tex atIndex:index];
+ }
+}
+
+static void computeEncEnd(CFTypeRef computeEncRef) {
+ @autoreleasepool {
+ id enc = (__bridge id)computeEncRef;
+ [enc endEncoding];
+ }
+}
+
+static void computeEncSetBuffer(CFTypeRef computeEncRef, NSUInteger index, CFTypeRef bufRef) {
+ @autoreleasepool {
+ id enc = (__bridge id)computeEncRef;
+ id buf = (__bridge id)bufRef;
+ [enc setBuffer:buf offset:0 atIndex:index];
+ }
+}
+
+static void computeEncDispatch(CFTypeRef computeEncRef, MTLSize threadgroupsPerGrid, MTLSize threadsPerThreadgroup) {
+ @autoreleasepool {
+ id enc = (__bridge id)computeEncRef;
+ [enc dispatchThreadgroups:threadgroupsPerGrid threadsPerThreadgroup:threadsPerThreadgroup];
+ }
+}
+
+static void computeEncSetBytes(CFTypeRef computeEncRef, const void *bytes, NSUInteger length, NSUInteger index) {
+ @autoreleasepool {
+ id enc = (__bridge id)computeEncRef;
+ [enc setBytes:bytes length:length atIndex:index];
+ }
+}
+
+static void blitEncEnd(CFTypeRef blitEncRef) {
+ @autoreleasepool {
+ id enc = (__bridge id)blitEncRef;
+ [enc endEncoding];
+ }
+}
+
+static void blitEncCopyFromTexture(CFTypeRef blitEncRef, CFTypeRef srcRef, MTLOrigin srcOrig, MTLSize srcSize, CFTypeRef dstRef, MTLOrigin dstOrig) {
+ @autoreleasepool {
+ id enc = (__bridge id)blitEncRef;
+ id src = (__bridge id)srcRef;
+ id dst = (__bridge id)dstRef;
+ [enc copyFromTexture:src
+ sourceSlice:0
+ sourceLevel:0
+ sourceOrigin:srcOrig
+ sourceSize:srcSize
+ toTexture:dst
+ destinationSlice:0
+ destinationLevel:0
+ destinationOrigin:dstOrig];
+ }
+}
+
+static void blitEncCopyBufferToTexture(CFTypeRef blitEncRef, CFTypeRef bufRef, CFTypeRef texRef, NSUInteger offset, NSUInteger stride, NSUInteger length, MTLSize dims, MTLOrigin orig) {
+ @autoreleasepool {
+ id enc = (__bridge id)blitEncRef;
+ id src = (__bridge id)bufRef;
+ id dst = (__bridge id)texRef;
+ [enc copyFromBuffer:src
+ sourceOffset:offset
+ sourceBytesPerRow:stride
+ sourceBytesPerImage:length
+ sourceSize:dims
+ toTexture:dst
+ destinationSlice:0
+ destinationLevel:0
+ destinationOrigin:orig];
+ }
+}
+
+static void blitEncCopyTextureToBuffer(CFTypeRef blitEncRef, CFTypeRef texRef, CFTypeRef bufRef, NSUInteger offset, NSUInteger stride, NSUInteger length, MTLSize dims, MTLOrigin orig) {
+ @autoreleasepool {
+ id enc = (__bridge id)blitEncRef;
+ id src = (__bridge id)texRef;
+ id dst = (__bridge id)bufRef;
+ [enc copyFromTexture:src
+ sourceSlice:0
+ sourceLevel:0
+ sourceOrigin:orig
+ sourceSize:dims
+ toBuffer:dst
+ destinationOffset:offset
+ destinationBytesPerRow:stride
+ destinationBytesPerImage:length];
+ }
+}
+
+static void blitEncCopyBufferToBuffer(CFTypeRef blitEncRef, CFTypeRef srcRef, CFTypeRef dstRef, NSUInteger srcOff, NSUInteger dstOff, NSUInteger size) {
+ @autoreleasepool {
+ id enc = (__bridge id)blitEncRef;
+ id src = (__bridge id)srcRef;
+ id dst = (__bridge id)dstRef;
+ [enc copyFromBuffer:src
+ sourceOffset:srcOff
+ toBuffer:dst
+ destinationOffset:dstOff
+ size:size];
+ }
+}
+
+static CFTypeRef newTexture(CFTypeRef devRef, NSUInteger width, NSUInteger height, MTLPixelFormat format, MTLTextureUsage usage) {
+ @autoreleasepool {
+ id dev = (__bridge id)devRef;
+ MTLTextureDescriptor *mtlDesc = [MTLTextureDescriptor texture2DDescriptorWithPixelFormat: format
+ width: width
+ height: height
+ mipmapped: NO];
+ mtlDesc.usage = usage;
+ mtlDesc.storageMode = MTLStorageModePrivate;
+ return CFBridgingRetain([dev newTextureWithDescriptor:mtlDesc]);
+ }
+}
+
+static CFTypeRef newSampler(CFTypeRef devRef, MTLSamplerMinMagFilter minFilter, MTLSamplerMinMagFilter magFilter) {
+ @autoreleasepool {
+ id dev = (__bridge id)devRef;
+ MTLSamplerDescriptor *desc = [MTLSamplerDescriptor new];
+ desc.minFilter = minFilter;
+ desc.magFilter = magFilter;
+ return CFBridgingRetain([dev newSamplerStateWithDescriptor:desc]);
+ }
+}
+
+static CFTypeRef newBuffer(CFTypeRef devRef, NSUInteger size, MTLResourceOptions opts) {
+ @autoreleasepool {
+ id dev = (__bridge id)devRef;
+ id buf = [dev newBufferWithLength:size
+ options:opts];
+ return CFBridgingRetain(buf);
+ }
+}
+
+static slice bufferContents(CFTypeRef bufRef) {
+ @autoreleasepool {
+ id buf = (__bridge id)bufRef;
+ slice s = {.addr = [buf contents], .size = [buf length]};
+ return s;
+ }
+}
+
+static CFTypeRef newLibrary(CFTypeRef devRef, char *name, void *mtllib, size_t size) {
+ @autoreleasepool {
+ id dev = (__bridge id)devRef;
+ dispatch_data_t data = dispatch_data_create(mtllib, size, DISPATCH_TARGET_QUEUE_DEFAULT, DISPATCH_DATA_DESTRUCTOR_DEFAULT);
+ id lib = [dev newLibraryWithData:data error:nil];
+ lib.label = [NSString stringWithUTF8String:name];
+ return CFBridgingRetain(lib);
+ }
+}
+
+static CFTypeRef libraryNewFunction(CFTypeRef libRef, char *funcName) {
+ @autoreleasepool {
+ id lib = (__bridge id)libRef;
+ NSString *name = [NSString stringWithUTF8String:funcName];
+ return CFBridgingRetain([lib newFunctionWithName:name]);
+ }
+}
+
+static CFTypeRef newComputePipeline(CFTypeRef devRef, CFTypeRef funcRef) {
+ @autoreleasepool {
+ id dev = (__bridge id)devRef;
+ id func = (__bridge id)funcRef;
+ return CFBridgingRetain([dev newComputePipelineStateWithFunction:func error:nil]);
+ }
+}
+
+static CFTypeRef newRenderPipeline(CFTypeRef devRef, CFTypeRef vertFunc, CFTypeRef fragFunc, MTLPixelFormat pixelFormat, NSUInteger bufIdx, NSUInteger nverts, MTLVertexFormat *fmts, NSUInteger *offsets, NSUInteger stride, int blend, MTLBlendFactor srcFactor, MTLBlendFactor dstFactor, NSUInteger nvertBufs, NSUInteger nfragBufs) {
+ @autoreleasepool {
+ id dev = (__bridge id)devRef;
+ id vfunc = (__bridge id)vertFunc;
+ id ffunc = (__bridge id)fragFunc;
+ MTLVertexDescriptor *vdesc = [MTLVertexDescriptor vertexDescriptor];
+ vdesc.layouts[bufIdx].stride = stride;
+ for (NSUInteger i = 0; i < nverts; i++) {
+ vdesc.attributes[i].format = fmts[i];
+ vdesc.attributes[i].offset = offsets[i];
+ vdesc.attributes[i].bufferIndex = bufIdx;
+ }
+ MTLRenderPipelineDescriptor *desc = [MTLRenderPipelineDescriptor new];
+ desc.vertexFunction = vfunc;
+ desc.fragmentFunction = ffunc;
+ desc.vertexDescriptor = vdesc;
+ for (NSUInteger i = 0; i < nvertBufs; i++) {
+ if (@available(iOS 11.0, *)) {
+ desc.vertexBuffers[i].mutability = MTLMutabilityImmutable;
+ }
+ }
+ for (NSUInteger i = 0; i < nfragBufs; i++) {
+ if (@available(iOS 11.0, *)) {
+ desc.fragmentBuffers[i].mutability = MTLMutabilityImmutable;
+ }
+ }
+ desc.colorAttachments[0].pixelFormat = pixelFormat;
+ desc.colorAttachments[0].blendingEnabled = blend ? YES : NO;
+ desc.colorAttachments[0].sourceAlphaBlendFactor = srcFactor;
+ desc.colorAttachments[0].sourceRGBBlendFactor = srcFactor;
+ desc.colorAttachments[0].destinationAlphaBlendFactor = dstFactor;
+ desc.colorAttachments[0].destinationRGBBlendFactor = dstFactor;
+ return CFBridgingRetain([dev newRenderPipelineStateWithDescriptor:desc
+ error:nil]);
+ }
+}
+*/
+import "C"
+
+type Backend struct {
+ dev C.CFTypeRef
+ queue C.CFTypeRef
+ pixelFmt C.MTLPixelFormat
+
+ cmdBuffer C.CFTypeRef
+ lastCmdBuffer C.CFTypeRef
+ renderEnc C.CFTypeRef
+ computeEnc C.CFTypeRef
+ blitEnc C.CFTypeRef
+
+ prog *Program
+ topology C.MTLPrimitiveType
+
+ stagingBuf C.CFTypeRef
+ stagingOff int
+
+ indexBuf *Buffer
+
+ // bufSizes is scratch space for filling out the spvBufferSizeConstants
+ // that spirv-cross generates for emulating buffer.length expressions in
+ // shaders.
+ bufSizes []uint32
+}
+
+type Texture struct {
+ backend *Backend
+ texture C.CFTypeRef
+ sampler C.CFTypeRef
+ width int
+ height int
+ foreign bool
+}
+
+type Shader struct {
+ function C.CFTypeRef
+ inputs []shader.InputLocation
+}
+
+type Program struct {
+ pipeline C.CFTypeRef
+ groupSize [3]int
+}
+
+type Pipeline struct {
+ pipeline C.CFTypeRef
+ topology C.MTLPrimitiveType
+}
+
+type Buffer struct {
+ backend *Backend
+ size int
+ buffer C.CFTypeRef
+
+ // store is the buffer contents For buffers not allocated on the GPU.
+ store []byte
+}
+
+const (
+ uniformBufferIndex = 0
+ attributeBufferIndex = 1
+
+ spvBufferSizeConstantsBinding = 25
+)
+
+const (
+ texUnits = 4
+ bufferUnits = 4
+)
+
+func init() {
+ driver.NewMetalDevice = newMetalDevice
+}
+
+func newMetalDevice(api driver.Metal) (driver.Device, error) {
+ dev := C.CFTypeRef(api.Device)
+ C.CFRetain(dev)
+ queue := C.CFTypeRef(api.Queue)
+ C.CFRetain(queue)
+ b := &Backend{
+ dev: dev,
+ queue: queue,
+ pixelFmt: C.MTLPixelFormat(api.PixelFormat),
+ bufSizes: make([]uint32, bufferUnits),
+ }
+ return b, nil
+}
+
+func (b *Backend) BeginFrame(target driver.RenderTarget, clear bool, viewport image.Point) driver.Texture {
+ if b.lastCmdBuffer != 0 {
+ C.cmdBufferWaitUntilCompleted(b.lastCmdBuffer)
+ b.stagingOff = 0
+ }
+ if target == nil {
+ return nil
+ }
+ switch t := target.(type) {
+ case driver.MetalRenderTarget:
+ texture := C.CFTypeRef(t.Texture)
+ return &Texture{texture: texture, foreign: true}
+ case *Texture:
+ return t
+ default:
+ panic(fmt.Sprintf("metal: unsupported render target type: %T", t))
+ }
+}
+
+func (b *Backend) startBlit() C.CFTypeRef {
+ if b.blitEnc != 0 {
+ return b.blitEnc
+ }
+ b.endEncoder()
+ b.ensureCmdBuffer()
+ b.blitEnc = C.cmdBufferBlitEncoder(b.cmdBuffer)
+ if b.blitEnc == 0 {
+ panic("metal: [MTLCommandBuffer blitCommandEncoder:] failed")
+ }
+ return b.blitEnc
+}
+
+func (b *Backend) CopyTexture(dst driver.Texture, dorig image.Point, src driver.Texture, srect image.Rectangle) {
+ enc := b.startBlit()
+ dstTex := dst.(*Texture).texture
+ srcTex := src.(*Texture).texture
+ ssz := srect.Size()
+ C.blitEncCopyFromTexture(
+ enc,
+ srcTex,
+ C.MTLOrigin{
+ x: C.NSUInteger(srect.Min.X),
+ y: C.NSUInteger(srect.Min.Y),
+ },
+ C.MTLSize{
+ width: C.NSUInteger(ssz.X),
+ height: C.NSUInteger(ssz.Y),
+ depth: 1,
+ },
+ dstTex,
+ C.MTLOrigin{
+ x: C.NSUInteger(dorig.X),
+ y: C.NSUInteger(dorig.Y),
+ },
+ )
+}
+
+func (b *Backend) EndFrame() {
+ b.endCmdBuffer(false)
+}
+
+func (b *Backend) endCmdBuffer(wait bool) {
+ b.endEncoder()
+ if b.cmdBuffer == 0 {
+ return
+ }
+ C.cmdBufferCommit(b.cmdBuffer)
+ if wait {
+ C.cmdBufferWaitUntilCompleted(b.cmdBuffer)
+ }
+ if b.lastCmdBuffer != 0 {
+ C.CFRelease(b.lastCmdBuffer)
+ }
+ b.lastCmdBuffer = b.cmdBuffer
+ b.cmdBuffer = 0
+}
+
+func (b *Backend) Caps() driver.Caps {
+ return driver.Caps{
+ MaxTextureSize: 8192,
+ Features: driver.FeatureSRGB | driver.FeatureCompute | driver.FeatureFloatRenderTargets,
+ }
+}
+
+func (b *Backend) NewTimer() driver.Timer {
+ panic("timers not supported")
+}
+
+func (b *Backend) IsTimeContinuous() bool {
+ panic("timers not supported")
+}
+
+func (b *Backend) Release() {
+ if b.cmdBuffer != 0 {
+ C.CFRelease(b.cmdBuffer)
+ }
+ if b.lastCmdBuffer != 0 {
+ C.CFRelease(b.lastCmdBuffer)
+ }
+ if b.stagingBuf != 0 {
+ C.CFRelease(b.stagingBuf)
+ }
+ C.CFRelease(b.queue)
+ C.CFRelease(b.dev)
+ *b = Backend{}
+}
+
+func (b *Backend) NewTexture(format driver.TextureFormat, width, height int, minFilter, magFilter driver.TextureFilter, bindings driver.BufferBinding) (driver.Texture, error) {
+ mformat := pixelFormatFor(format)
+ var usage C.MTLTextureUsage
+ if bindings&(driver.BufferBindingTexture|driver.BufferBindingShaderStorageRead) != 0 {
+ usage |= C.MTLTextureUsageShaderRead
+ }
+ if bindings&driver.BufferBindingFramebuffer != 0 {
+ usage |= C.MTLTextureUsageRenderTarget
+ }
+ if bindings&driver.BufferBindingShaderStorageWrite != 0 {
+ usage |= C.MTLTextureUsageShaderWrite
+ }
+ tex := C.newTexture(b.dev, C.NSUInteger(width), C.NSUInteger(height), mformat, usage)
+ if tex == 0 {
+ return nil, errors.New("metal: [MTLDevice newTextureWithDescriptor:] failed")
+ }
+ min := samplerFilterFor(minFilter)
+ max := samplerFilterFor(magFilter)
+ s := C.newSampler(b.dev, min, max)
+ if s == 0 {
+ C.CFRelease(tex)
+ return nil, errors.New("metal: [MTLDevice newSamplerStateWithDescriptor:] failed")
+ }
+ return &Texture{backend: b, texture: tex, sampler: s, width: width, height: height}, nil
+}
+
+func samplerFilterFor(f driver.TextureFilter) C.MTLSamplerMinMagFilter {
+ switch f {
+ case driver.FilterNearest:
+ return C.MTLSamplerMinMagFilterNearest
+ case driver.FilterLinear:
+ return C.MTLSamplerMinMagFilterLinear
+ default:
+ panic("invalid texture filter")
+ }
+}
+
+func (b *Backend) NewPipeline(desc driver.PipelineDesc) (driver.Pipeline, error) {
+ vsh, fsh := desc.VertexShader.(*Shader), desc.FragmentShader.(*Shader)
+ layout := desc.VertexLayout.Inputs
+ if got, exp := len(layout), len(vsh.inputs); got != exp {
+ return nil, fmt.Errorf("metal: number of input descriptors (%d) doesn't match number of inputs (%d)", got, exp)
+ }
+ formats := make([]C.MTLVertexFormat, len(layout))
+ offsets := make([]C.NSUInteger, len(layout))
+ for i, inp := range layout {
+ index := vsh.inputs[i].Location
+ formats[index] = vertFormatFor(vsh.inputs[i])
+ offsets[index] = C.NSUInteger(inp.Offset)
+ }
+ var (
+ fmtPtr *C.MTLVertexFormat
+ offPtr *C.NSUInteger
+ )
+ if len(layout) > 0 {
+ fmtPtr = &formats[0]
+ offPtr = &offsets[0]
+ }
+ srcFactor := blendFactorFor(desc.BlendDesc.SrcFactor)
+ dstFactor := blendFactorFor(desc.BlendDesc.DstFactor)
+ blend := C.int(0)
+ if desc.BlendDesc.Enable {
+ blend = 1
+ }
+ pf := b.pixelFmt
+ if f := desc.PixelFormat; f != driver.TextureFormatOutput {
+ pf = pixelFormatFor(f)
+ }
+ pipe := C.newRenderPipeline(
+ b.dev,
+ vsh.function,
+ fsh.function,
+ pf,
+ attributeBufferIndex,
+ C.NSUInteger(len(layout)), fmtPtr, offPtr,
+ C.NSUInteger(desc.VertexLayout.Stride),
+ blend, srcFactor, dstFactor,
+ 2, // Number of vertex buffers.
+ 1, // Number of fragment buffers.
+ )
+ if pipe == 0 {
+ return nil, errors.New("metal: pipeline construction failed")
+ }
+ return &Pipeline{pipeline: pipe, topology: primitiveFor(desc.Topology)}, nil
+}
+
+func dataTypeSize(d shader.DataType) int {
+ switch d {
+ case shader.DataTypeFloat:
+ return 4
+ default:
+ panic("unsupported data type")
+ }
+}
+
+func blendFactorFor(f driver.BlendFactor) C.MTLBlendFactor {
+ switch f {
+ case driver.BlendFactorZero:
+ return C.MTLBlendFactorZero
+ case driver.BlendFactorOne:
+ return C.MTLBlendFactorOne
+ case driver.BlendFactorOneMinusSrcAlpha:
+ return C.MTLBlendFactorOneMinusSourceAlpha
+ case driver.BlendFactorDstColor:
+ return C.MTLBlendFactorDestinationColor
+ default:
+ panic("unsupported blend factor")
+ }
+}
+
+func vertFormatFor(f shader.InputLocation) C.MTLVertexFormat {
+ t := f.Type
+ s := f.Size
+ switch {
+ case t == shader.DataTypeFloat && s == 1:
+ return C.MTLVertexFormatFloat
+ case t == shader.DataTypeFloat && s == 2:
+ return C.MTLVertexFormatFloat2
+ case t == shader.DataTypeFloat && s == 3:
+ return C.MTLVertexFormatFloat3
+ case t == shader.DataTypeFloat && s == 4:
+ return C.MTLVertexFormatFloat4
+ default:
+ panic("unsupported data type")
+ }
+}
+
+func pixelFormatFor(f driver.TextureFormat) C.MTLPixelFormat {
+ switch f {
+ case driver.TextureFormatFloat:
+ return C.MTLPixelFormatR16Float
+ case driver.TextureFormatRGBA8:
+ return C.MTLPixelFormatRGBA8Unorm
+ case driver.TextureFormatSRGBA:
+ return C.MTLPixelFormatRGBA8Unorm_sRGB
+ default:
+ panic("unsupported pixel format")
+ }
+}
+
+func (b *Backend) NewBuffer(typ driver.BufferBinding, size int) (driver.Buffer, error) {
+ // Transfer buffer contents in command encoders on every use for
+ // smaller buffers. The advantage is that buffer re-use during a frame
+ // won't occur a GPU wait.
+ // We can't do this for buffers written to by the GPU and read by the client,
+ // and Metal doesn't require a buffer for indexed draws.
+ if size <= 4096 && typ&(driver.BufferBindingShaderStorageWrite|driver.BufferBindingIndices) == 0 {
+ return &Buffer{size: size, store: make([]byte, size)}, nil
+ }
+ buf := C.newBuffer(b.dev, C.NSUInteger(size), C.MTLResourceStorageModePrivate)
+ return &Buffer{backend: b, size: size, buffer: buf}, nil
+}
+
+func (b *Backend) NewImmutableBuffer(typ driver.BufferBinding, data []byte) (driver.Buffer, error) {
+ buf, err := b.NewBuffer(typ, len(data))
+ if err != nil {
+ return nil, err
+ }
+ buf.Upload(data)
+ return buf, nil
+}
+
+func (b *Backend) NewComputeProgram(src shader.Sources) (driver.Program, error) {
+ sh, err := b.newShader(src)
+ if err != nil {
+ return nil, err
+ }
+ defer sh.Release()
+ pipe := C.newComputePipeline(b.dev, sh.function)
+ if pipe == 0 {
+ return nil, fmt.Errorf("metal: compute program %q load failed", src.Name)
+ }
+ return &Program{pipeline: pipe, groupSize: src.WorkgroupSize}, nil
+}
+
+func (b *Backend) NewVertexShader(src shader.Sources) (driver.VertexShader, error) {
+ return b.newShader(src)
+}
+
+func (b *Backend) NewFragmentShader(src shader.Sources) (driver.FragmentShader, error) {
+ return b.newShader(src)
+}
+
+func (b *Backend) newShader(src shader.Sources) (*Shader, error) {
+ vsrc := []byte(src.MetalLib)
+ cname := C.CString(src.Name)
+ defer C.free(unsafe.Pointer(cname))
+ vlib := C.newLibrary(b.dev, cname, unsafe.Pointer(&vsrc[0]), C.size_t(len(vsrc)))
+ if vlib == 0 {
+ return nil, fmt.Errorf("metal: vertex shader %q load failed", src.Name)
+ }
+ defer C.CFRelease(vlib)
+ funcName := C.CString("main0")
+ defer C.free(unsafe.Pointer(funcName))
+ f := C.libraryNewFunction(vlib, funcName)
+ if f == 0 {
+ return nil, fmt.Errorf("metal: main function not found in %q", src.Name)
+ }
+ return &Shader{function: f, inputs: src.Inputs}, nil
+}
+
+func (b *Backend) Viewport(x, y, width, height int) {
+ enc := b.renderEnc
+ if enc == 0 {
+ panic("no active render pass")
+ }
+ C.renderEncViewport(enc, C.MTLViewport{
+ originX: C.double(x),
+ originY: C.double(y),
+ width: C.double(width),
+ height: C.double(height),
+ znear: 0.0,
+ zfar: 1.0,
+ })
+}
+
+func (b *Backend) DrawArrays(off, count int) {
+ enc := b.renderEnc
+ if enc == 0 {
+ panic("no active render pass")
+ }
+ C.renderEncDrawPrimitives(enc, b.topology, C.NSUInteger(off), C.NSUInteger(count))
+}
+
+func (b *Backend) DrawElements(off, count int) {
+ enc := b.renderEnc
+ if enc == 0 {
+ panic("no active render pass")
+ }
+ C.renderEncDrawIndexedPrimitives(enc, b.topology, b.indexBuf.buffer, C.NSUInteger(off), C.NSUInteger(count))
+}
+
+func primitiveFor(mode driver.Topology) C.MTLPrimitiveType {
+ switch mode {
+ case driver.TopologyTriangles:
+ return C.MTLPrimitiveTypeTriangle
+ case driver.TopologyTriangleStrip:
+ return C.MTLPrimitiveTypeTriangleStrip
+ default:
+ panic("metal: unknown draw mode")
+ }
+}
+
+func (b *Backend) BindImageTexture(unit int, tex driver.Texture) {
+ b.BindTexture(unit, tex)
+}
+
+func (b *Backend) BeginCompute() {
+ b.endEncoder()
+ b.ensureCmdBuffer()
+ for i := range b.bufSizes {
+ b.bufSizes[i] = 0
+ }
+ b.computeEnc = C.cmdBufferComputeEncoder(b.cmdBuffer)
+ if b.computeEnc == 0 {
+ panic("metal: [MTLCommandBuffer computeCommandEncoder:] failed")
+ }
+}
+
+func (b *Backend) EndCompute() {
+ if b.computeEnc == 0 {
+ panic("no active compute pass")
+ }
+ C.computeEncEnd(b.computeEnc)
+ C.CFRelease(b.computeEnc)
+ b.computeEnc = 0
+}
+
+func (b *Backend) DispatchCompute(x, y, z int) {
+ enc := b.computeEnc
+ if enc == 0 {
+ panic("no active compute pass")
+ }
+ C.computeEncSetBytes(enc, unsafe.Pointer(&b.bufSizes[0]), C.NSUInteger(len(b.bufSizes)*4), spvBufferSizeConstantsBinding)
+ threadgroupsPerGrid := C.MTLSize{
+ width: C.NSUInteger(x), height: C.NSUInteger(y), depth: C.NSUInteger(z),
+ }
+ sz := b.prog.groupSize
+ threadsPerThreadgroup := C.MTLSize{
+ width: C.NSUInteger(sz[0]), height: C.NSUInteger(sz[1]), depth: C.NSUInteger(sz[2]),
+ }
+ C.computeEncDispatch(enc, threadgroupsPerGrid, threadsPerThreadgroup)
+}
+
+func (b *Backend) stagingBuffer(size int) (C.CFTypeRef, int) {
+ if b.stagingBuf == 0 || b.stagingOff+size > len(bufferStore(b.stagingBuf)) {
+ if b.stagingBuf != 0 {
+ C.CFRelease(b.stagingBuf)
+ }
+ cap := 2 * (b.stagingOff + size)
+ b.stagingBuf = C.newBuffer(b.dev, C.NSUInteger(cap), C.MTLResourceStorageModeShared|C.MTLResourceCPUCacheModeWriteCombined)
+ if b.stagingBuf == 0 {
+ panic(fmt.Errorf("metal: failed to allocate %d bytes of staging buffer", cap))
+ }
+ b.stagingOff = 0
+ }
+ off := b.stagingOff
+ b.stagingOff += size
+ return b.stagingBuf, off
+}
+
+func (t *Texture) Upload(offset, size image.Point, pixels []byte, stride int) {
+ if len(pixels) == 0 {
+ return
+ }
+ if stride == 0 {
+ stride = size.X * 4
+ }
+ dstStride := size.X * 4
+ n := size.Y * dstStride
+ buf, off := t.backend.stagingBuffer(n)
+ store := bufferSlice(buf, off, n)
+ var srcOff, dstOff int
+ for y := 0; y < size.Y; y++ {
+ srcRow := pixels[srcOff : srcOff+dstStride]
+ dstRow := store[dstOff : dstOff+dstStride]
+ copy(dstRow, srcRow)
+ dstOff += dstStride
+ srcOff += stride
+ }
+ enc := t.backend.startBlit()
+ orig := C.MTLOrigin{
+ x: C.NSUInteger(offset.X),
+ y: C.NSUInteger(offset.Y),
+ }
+ msize := C.MTLSize{
+ width: C.NSUInteger(size.X),
+ height: C.NSUInteger(size.Y),
+ depth: 1,
+ }
+ C.blitEncCopyBufferToTexture(enc, buf, t.texture, C.NSUInteger(off), C.NSUInteger(dstStride), C.NSUInteger(len(store)), msize, orig)
+}
+
+func (t *Texture) Release() {
+ if t.foreign {
+ panic("metal: release of external texture")
+ }
+ C.CFRelease(t.texture)
+ C.CFRelease(t.sampler)
+ *t = Texture{}
+}
+
+func (p *Pipeline) Release() {
+ C.CFRelease(p.pipeline)
+ *p = Pipeline{}
+}
+
+func (b *Backend) PrepareTexture(tex driver.Texture) {}
+
+func (b *Backend) BindTexture(unit int, tex driver.Texture) {
+ t := tex.(*Texture)
+ if enc := b.renderEnc; enc != 0 {
+ C.renderEncSetFragmentTexture(enc, C.NSUInteger(unit), t.texture)
+ C.renderEncSetFragmentSamplerState(enc, C.NSUInteger(unit), t.sampler)
+ } else if enc := b.computeEnc; enc != 0 {
+ C.computeEncSetTexture(enc, C.NSUInteger(unit), t.texture)
+ } else {
+ panic("no active render nor compute pass")
+ }
+}
+
+func (b *Backend) ensureCmdBuffer() {
+ if b.cmdBuffer != 0 {
+ return
+ }
+ b.cmdBuffer = C.queueNewBuffer(b.queue)
+ if b.cmdBuffer == 0 {
+ panic("metal: [MTLCommandQueue cmdBuffer] failed")
+ }
+}
+
+func (b *Backend) BindPipeline(pipe driver.Pipeline) {
+ p := pipe.(*Pipeline)
+ enc := b.renderEnc
+ if enc == 0 {
+ panic("no active render pass")
+ }
+ C.renderEncSetRenderPipelineState(enc, p.pipeline)
+ b.topology = p.topology
+}
+
+func (b *Backend) BindProgram(prog driver.Program) {
+ enc := b.computeEnc
+ if enc == 0 {
+ panic("no active compute pass")
+ }
+ p := prog.(*Program)
+ C.computeEncSetPipeline(enc, p.pipeline)
+ b.prog = p
+}
+
+func (s *Shader) Release() {
+ C.CFRelease(s.function)
+ *s = Shader{}
+}
+
+func (p *Program) Release() {
+ C.CFRelease(p.pipeline)
+ *p = Program{}
+}
+
+func (b *Backend) BindStorageBuffer(binding int, buffer driver.Buffer) {
+ buf := buffer.(*Buffer)
+ b.bufSizes[binding] = uint32(buf.size)
+ enc := b.computeEnc
+ if enc == 0 {
+ panic("no active compute pass")
+ }
+ if buf.buffer != 0 {
+ C.computeEncSetBuffer(enc, C.NSUInteger(binding), buf.buffer)
+ } else if buf.size > 0 {
+ C.computeEncSetBytes(enc, unsafe.Pointer(&buf.store[0]), C.NSUInteger(buf.size), C.NSUInteger(binding))
+ }
+}
+
+func (b *Backend) BindUniforms(buf driver.Buffer) {
+ bf := buf.(*Buffer)
+ enc := b.renderEnc
+ if enc == 0 {
+ panic("no active render pass")
+ }
+ if bf.buffer != 0 {
+ C.renderEncSetVertexBuffer(enc, bf.buffer, uniformBufferIndex, 0)
+ C.renderEncSetFragmentBuffer(enc, bf.buffer, uniformBufferIndex, 0)
+ } else if bf.size > 0 {
+ C.renderEncSetVertexBytes(enc, unsafe.Pointer(&bf.store[0]), C.NSUInteger(bf.size), uniformBufferIndex)
+ C.renderEncSetFragmentBytes(enc, unsafe.Pointer(&bf.store[0]), C.NSUInteger(bf.size), uniformBufferIndex)
+ }
+}
+
+func (b *Backend) BindVertexBuffer(buf driver.Buffer, offset int) {
+ bf := buf.(*Buffer)
+ enc := b.renderEnc
+ if enc == 0 {
+ panic("no active render pass")
+ }
+ if bf.buffer != 0 {
+ C.renderEncSetVertexBuffer(enc, bf.buffer, attributeBufferIndex, C.NSUInteger(offset))
+ } else if n := bf.size - offset; n > 0 {
+ C.renderEncSetVertexBytes(enc, unsafe.Pointer(&bf.store[offset]), C.NSUInteger(n), attributeBufferIndex)
+ }
+}
+
+func (b *Backend) BindIndexBuffer(buf driver.Buffer) {
+ b.indexBuf = buf.(*Buffer)
+}
+
+func (b *Buffer) Download(data []byte) error {
+ if len(data) > b.size {
+ panic(fmt.Errorf("len(data) (%d) larger than len(content) (%d)", len(data), b.size))
+ }
+ buf, off := b.backend.stagingBuffer(len(data))
+ enc := b.backend.startBlit()
+ C.blitEncCopyBufferToBuffer(enc, b.buffer, buf, 0, C.NSUInteger(off), C.NSUInteger(len(data)))
+ b.backend.endCmdBuffer(true)
+ store := bufferSlice(buf, off, len(data))
+ copy(data, store)
+ return nil
+}
+
+func (b *Buffer) Upload(data []byte) {
+ if len(data) > b.size {
+ panic(fmt.Errorf("len(data) (%d) larger than len(content) (%d)", len(data), b.size))
+ }
+ if b.buffer == 0 {
+ copy(b.store, data)
+ return
+ }
+ buf, off := b.backend.stagingBuffer(len(data))
+ store := bufferSlice(buf, off, len(data))
+ copy(store, data)
+ enc := b.backend.startBlit()
+ C.blitEncCopyBufferToBuffer(enc, buf, b.buffer, C.NSUInteger(off), 0, C.NSUInteger(len(store)))
+}
+
+func bufferStore(buf C.CFTypeRef) []byte {
+ contents := C.bufferContents(buf)
+ return (*(*[1 << 30]byte)(contents.addr))[:contents.size:contents.size]
+}
+
+func bufferSlice(buf C.CFTypeRef, off, len int) []byte {
+ store := bufferStore(buf)
+ return store[off : off+len]
+}
+
+func (b *Buffer) Release() {
+ if b.buffer != 0 {
+ C.CFRelease(b.buffer)
+ }
+ *b = Buffer{}
+}
+
+func (t *Texture) ReadPixels(src image.Rectangle, pixels []byte, stride int) error {
+ if len(pixels) == 0 {
+ return nil
+ }
+ sz := src.Size()
+ orig := C.MTLOrigin{
+ x: C.NSUInteger(src.Min.X),
+ y: C.NSUInteger(src.Min.Y),
+ }
+ msize := C.MTLSize{
+ width: C.NSUInteger(sz.X),
+ height: C.NSUInteger(sz.Y),
+ depth: 1,
+ }
+ stageStride := sz.X * 4
+ n := sz.Y * stageStride
+ buf, off := t.backend.stagingBuffer(n)
+ enc := t.backend.startBlit()
+ C.blitEncCopyTextureToBuffer(enc, t.texture, buf, C.NSUInteger(off), C.NSUInteger(stageStride), C.NSUInteger(n), msize, orig)
+ t.backend.endCmdBuffer(true)
+ store := bufferSlice(buf, off, n)
+ var srcOff, dstOff int
+ for y := 0; y < sz.Y; y++ {
+ dstRow := pixels[srcOff : srcOff+stageStride]
+ srcRow := store[dstOff : dstOff+stageStride]
+ copy(dstRow, srcRow)
+ dstOff += stageStride
+ srcOff += stride
+ }
+ return nil
+}
+
+func (b *Backend) BeginRenderPass(tex driver.Texture, d driver.LoadDesc) {
+ b.endEncoder()
+ b.ensureCmdBuffer()
+ f := tex.(*Texture)
+ col := d.ClearColor
+ var act C.MTLLoadAction
+ switch d.Action {
+ case driver.LoadActionKeep:
+ act = C.MTLLoadActionLoad
+ case driver.LoadActionClear:
+ act = C.MTLLoadActionClear
+ case driver.LoadActionInvalidate:
+ act = C.MTLLoadActionDontCare
+ }
+ b.renderEnc = C.cmdBufferRenderEncoder(b.cmdBuffer, f.texture, act, C.float(col.R), C.float(col.G), C.float(col.B), C.float(col.A))
+ if b.renderEnc == 0 {
+ panic("metal: [MTLCommandBuffer renderCommandEncoderWithDescriptor:] failed")
+ }
+}
+
+func (b *Backend) EndRenderPass() {
+ if b.renderEnc == 0 {
+ panic("no active render pass")
+ }
+ C.renderEncEnd(b.renderEnc)
+ C.CFRelease(b.renderEnc)
+ b.renderEnc = 0
+}
+
+func (b *Backend) endEncoder() {
+ if b.renderEnc != 0 {
+ panic("active render pass")
+ }
+ if b.computeEnc != 0 {
+ panic("active compute pass")
+ }
+ if b.blitEnc != 0 {
+ C.blitEncEnd(b.blitEnc)
+ C.CFRelease(b.blitEnc)
+ b.blitEnc = 0
+ }
+}
+
+func (f *Texture) ImplementsRenderTarget() {}
diff --git a/vendor/gioui.org/gpu/internal/opengl/opengl.go b/vendor/gioui.org/gpu/internal/opengl/opengl.go
new file mode 100644
index 0000000..ef89197
--- /dev/null
+++ b/vendor/gioui.org/gpu/internal/opengl/opengl.go
@@ -0,0 +1,1357 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package opengl
+
+import (
+ "errors"
+ "fmt"
+ "image"
+ "strings"
+ "time"
+ "unsafe"
+
+ "gioui.org/gpu/internal/driver"
+ "gioui.org/internal/gl"
+ "gioui.org/shader"
+)
+
+// Backend implements driver.Device.
+type Backend struct {
+ funcs *gl.Functions
+
+ clear bool
+ glstate glState
+ state state
+ savedState glState
+ sharedCtx bool
+
+ glver [2]int
+ gles bool
+ feats driver.Caps
+ // floatTriple holds the settings for floating point
+ // textures.
+ floatTriple textureTriple
+ // Single channel alpha textures.
+ alphaTriple textureTriple
+ srgbaTriple textureTriple
+ storage [storageBindings]*buffer
+
+ outputFBO gl.Framebuffer
+ sRGBFBO *SRGBFBO
+
+ // vertArray is bound during a frame. We don't need it, but
+ // core desktop OpenGL profile 3.3 requires some array bound.
+ vertArray gl.VertexArray
+}
+
+// State tracking.
+type glState struct {
+ drawFBO gl.Framebuffer
+ readFBO gl.Framebuffer
+ renderBuf gl.Renderbuffer
+ vertAttribs [5]struct {
+ obj gl.Buffer
+ enabled bool
+ size int
+ typ gl.Enum
+ normalized bool
+ stride int
+ offset uintptr
+ }
+ prog gl.Program
+ texUnits struct {
+ active gl.Enum
+ binds [2]gl.Texture
+ }
+ arrayBuf gl.Buffer
+ elemBuf gl.Buffer
+ uniBuf gl.Buffer
+ uniBufs [2]gl.Buffer
+ storeBuf gl.Buffer
+ storeBufs [4]gl.Buffer
+ vertArray gl.VertexArray
+ srgb bool
+ blend struct {
+ enable bool
+ srcRGB, dstRGB gl.Enum
+ srcA, dstA gl.Enum
+ }
+ clearColor [4]float32
+ viewport [4]int
+ unpack_row_length int
+ pack_row_length int
+}
+
+type state struct {
+ pipeline *pipeline
+ buffer bufferBinding
+}
+
+type bufferBinding struct {
+ obj gl.Buffer
+ offset int
+}
+
+type timer struct {
+ funcs *gl.Functions
+ obj gl.Query
+}
+
+type texture struct {
+ backend *Backend
+ obj gl.Texture
+ fbo gl.Framebuffer
+ hasFBO bool
+ triple textureTriple
+ width int
+ height int
+ bindings driver.BufferBinding
+ foreign bool
+}
+
+type pipeline struct {
+ prog *program
+ inputs []shader.InputLocation
+ layout driver.VertexLayout
+ blend driver.BlendDesc
+ topology driver.Topology
+}
+
+type buffer struct {
+ backend *Backend
+ hasBuffer bool
+ obj gl.Buffer
+ typ driver.BufferBinding
+ size int
+ immutable bool
+ // For emulation of uniform buffers.
+ data []byte
+}
+
+type glshader struct {
+ backend *Backend
+ obj gl.Shader
+ src shader.Sources
+}
+
+type program struct {
+ backend *Backend
+ obj gl.Program
+ vertUniforms uniforms
+ fragUniforms uniforms
+}
+
+type uniforms struct {
+ locs []uniformLocation
+ size int
+}
+
+type uniformLocation struct {
+ uniform gl.Uniform
+ offset int
+ typ shader.DataType
+ size int
+}
+
+type inputLayout struct {
+ inputs []shader.InputLocation
+ layout []driver.InputDesc
+}
+
+// textureTriple holds the type settings for
+// a TexImage2D call.
+type textureTriple struct {
+ internalFormat gl.Enum
+ format gl.Enum
+ typ gl.Enum
+}
+
+const (
+ storageBindings = 32
+)
+
+func init() {
+ driver.NewOpenGLDevice = newOpenGLDevice
+}
+
+// Supporting compute programs is theoretically possible with OpenGL ES 3.1. In
+// practice, there are too many driver issues, especially on Android (e.g.
+// Google Pixel, Samsung J2 are both broken i different ways). Disable support
+// and rely on Vulkan for devices that support it, and the CPU fallback for
+// devices that don't.
+const brokenGLES31 = true
+
+func newOpenGLDevice(api driver.OpenGL) (driver.Device, error) {
+ f, err := gl.NewFunctions(api.Context, api.ES)
+ if err != nil {
+ return nil, err
+ }
+ exts := strings.Split(f.GetString(gl.EXTENSIONS), " ")
+ glVer := f.GetString(gl.VERSION)
+ ver, gles, err := gl.ParseGLVersion(glVer)
+ if err != nil {
+ return nil, err
+ }
+ floatTriple, ffboErr := floatTripleFor(f, ver, exts)
+ srgbaTriple, srgbErr := srgbaTripleFor(ver, exts)
+ gles31 := gles && (ver[0] > 3 || (ver[0] == 3 && ver[1] >= 1))
+ b := &Backend{
+ glver: ver,
+ gles: gles,
+ funcs: f,
+ floatTriple: floatTriple,
+ alphaTriple: alphaTripleFor(ver),
+ srgbaTriple: srgbaTriple,
+ sharedCtx: api.Shared,
+ }
+ b.feats.BottomLeftOrigin = true
+ if srgbErr == nil {
+ b.feats.Features |= driver.FeatureSRGB
+ }
+ if ffboErr == nil {
+ b.feats.Features |= driver.FeatureFloatRenderTargets
+ }
+ if gles31 && !brokenGLES31 {
+ b.feats.Features |= driver.FeatureCompute
+ }
+ if hasExtension(exts, "GL_EXT_disjoint_timer_query_webgl2") || hasExtension(exts, "GL_EXT_disjoint_timer_query") {
+ b.feats.Features |= driver.FeatureTimers
+ }
+ b.feats.MaxTextureSize = f.GetInteger(gl.MAX_TEXTURE_SIZE)
+ if !b.sharedCtx {
+ // We have exclusive access to the context, so query the GL state once
+ // instead of at each frame.
+ b.glstate = b.queryState()
+ }
+ return b, nil
+}
+
+func (b *Backend) BeginFrame(target driver.RenderTarget, clear bool, viewport image.Point) driver.Texture {
+ b.clear = clear
+ if b.sharedCtx {
+ b.glstate = b.queryState()
+ b.savedState = b.glstate
+ }
+ b.state = state{}
+ var renderFBO gl.Framebuffer
+ if target != nil {
+ switch t := target.(type) {
+ case driver.OpenGLRenderTarget:
+ renderFBO = gl.Framebuffer(t)
+ case *texture:
+ renderFBO = t.ensureFBO()
+ default:
+ panic(fmt.Errorf("opengl: invalid render target type: %T", target))
+ }
+ }
+ b.outputFBO = renderFBO
+ b.glstate.bindFramebuffer(b.funcs, gl.FRAMEBUFFER, renderFBO)
+ if b.gles {
+ // If the output framebuffer is not in the sRGB colorspace already, emulate it.
+ var fbEncoding int
+ if !renderFBO.Valid() {
+ fbEncoding = b.funcs.GetFramebufferAttachmentParameteri(gl.FRAMEBUFFER, gl.BACK, gl.FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING)
+ } else {
+ fbEncoding = b.funcs.GetFramebufferAttachmentParameteri(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING)
+ }
+ if fbEncoding == gl.LINEAR && viewport != (image.Point{}) {
+ if b.sRGBFBO == nil {
+ sfbo, err := NewSRGBFBO(b.funcs, &b.glstate)
+ if err != nil {
+ panic(err)
+ }
+ b.sRGBFBO = sfbo
+ }
+ if err := b.sRGBFBO.Refresh(viewport); err != nil {
+ panic(err)
+ }
+ renderFBO = b.sRGBFBO.Framebuffer()
+ } else if b.sRGBFBO != nil {
+ b.sRGBFBO.Release()
+ b.sRGBFBO = nil
+ }
+ } else {
+ b.glstate.set(b.funcs, gl.FRAMEBUFFER_SRGB, true)
+ if !b.vertArray.Valid() {
+ b.vertArray = b.funcs.CreateVertexArray()
+ }
+ b.glstate.bindVertexArray(b.funcs, b.vertArray)
+ }
+ b.glstate.bindFramebuffer(b.funcs, gl.FRAMEBUFFER, renderFBO)
+ if b.sRGBFBO != nil && !clear {
+ b.clearOutput(0, 0, 0, 0)
+ }
+ return &texture{backend: b, fbo: renderFBO, hasFBO: true, foreign: true}
+}
+
+func (b *Backend) EndFrame() {
+ if b.sRGBFBO != nil {
+ b.glstate.bindFramebuffer(b.funcs, gl.FRAMEBUFFER, b.outputFBO)
+ if b.clear {
+ b.SetBlend(false)
+ } else {
+ b.BlendFunc(driver.BlendFactorOne, driver.BlendFactorOneMinusSrcAlpha)
+ b.SetBlend(true)
+ }
+ b.sRGBFBO.Blit()
+ }
+ if b.sharedCtx {
+ b.restoreState(b.savedState)
+ }
+}
+
+func (b *Backend) queryState() glState {
+ s := glState{
+ prog: gl.Program(b.funcs.GetBinding(gl.CURRENT_PROGRAM)),
+ arrayBuf: gl.Buffer(b.funcs.GetBinding(gl.ARRAY_BUFFER_BINDING)),
+ elemBuf: gl.Buffer(b.funcs.GetBinding(gl.ELEMENT_ARRAY_BUFFER_BINDING)),
+ drawFBO: gl.Framebuffer(b.funcs.GetBinding(gl.FRAMEBUFFER_BINDING)),
+ clearColor: b.funcs.GetFloat4(gl.COLOR_CLEAR_VALUE),
+ viewport: b.funcs.GetInteger4(gl.VIEWPORT),
+ unpack_row_length: b.funcs.GetInteger(gl.UNPACK_ROW_LENGTH),
+ pack_row_length: b.funcs.GetInteger(gl.PACK_ROW_LENGTH),
+ }
+ s.blend.enable = b.funcs.IsEnabled(gl.BLEND)
+ s.blend.srcRGB = gl.Enum(b.funcs.GetInteger(gl.BLEND_SRC_RGB))
+ s.blend.dstRGB = gl.Enum(b.funcs.GetInteger(gl.BLEND_DST_RGB))
+ s.blend.srcA = gl.Enum(b.funcs.GetInteger(gl.BLEND_SRC_ALPHA))
+ s.blend.dstA = gl.Enum(b.funcs.GetInteger(gl.BLEND_DST_ALPHA))
+ s.texUnits.active = gl.Enum(b.funcs.GetInteger(gl.ACTIVE_TEXTURE))
+ if !b.gles {
+ s.srgb = b.funcs.IsEnabled(gl.FRAMEBUFFER_SRGB)
+ }
+ if !b.gles || b.glver[0] >= 3 {
+ s.vertArray = gl.VertexArray(b.funcs.GetBinding(gl.VERTEX_ARRAY_BINDING))
+ s.readFBO = gl.Framebuffer(b.funcs.GetBinding(gl.READ_FRAMEBUFFER_BINDING))
+ s.uniBuf = gl.Buffer(b.funcs.GetBinding(gl.UNIFORM_BUFFER_BINDING))
+ for i := range s.uniBufs {
+ s.uniBufs[i] = gl.Buffer(b.funcs.GetBindingi(gl.UNIFORM_BUFFER_BINDING, i))
+ }
+ }
+ if b.gles && (b.glver[0] > 3 || (b.glver[0] == 3 && b.glver[1] >= 1)) {
+ s.storeBuf = gl.Buffer(b.funcs.GetBinding(gl.SHADER_STORAGE_BUFFER_BINDING))
+ for i := range s.storeBufs {
+ s.storeBufs[i] = gl.Buffer(b.funcs.GetBindingi(gl.SHADER_STORAGE_BUFFER_BINDING, i))
+ }
+ }
+ for i := range s.texUnits.binds {
+ s.activeTexture(b.funcs, gl.TEXTURE0+gl.Enum(i))
+ s.texUnits.binds[i] = gl.Texture(b.funcs.GetBinding(gl.TEXTURE_BINDING_2D))
+ }
+ for i := range s.vertAttribs {
+ a := &s.vertAttribs[i]
+ a.enabled = b.funcs.GetVertexAttrib(i, gl.VERTEX_ATTRIB_ARRAY_ENABLED) != gl.FALSE
+ a.obj = gl.Buffer(b.funcs.GetVertexAttribBinding(i, gl.VERTEX_ATTRIB_ARRAY_ENABLED))
+ a.size = b.funcs.GetVertexAttrib(i, gl.VERTEX_ATTRIB_ARRAY_SIZE)
+ a.typ = gl.Enum(b.funcs.GetVertexAttrib(i, gl.VERTEX_ATTRIB_ARRAY_TYPE))
+ a.normalized = b.funcs.GetVertexAttrib(i, gl.VERTEX_ATTRIB_ARRAY_NORMALIZED) != gl.FALSE
+ a.stride = b.funcs.GetVertexAttrib(i, gl.VERTEX_ATTRIB_ARRAY_STRIDE)
+ a.offset = b.funcs.GetVertexAttribPointer(i, gl.VERTEX_ATTRIB_ARRAY_POINTER)
+ }
+ return s
+}
+
+func (b *Backend) restoreState(dst glState) {
+ src := b.glstate
+ f := b.funcs
+ for i, unit := range dst.texUnits.binds {
+ src.bindTexture(f, i, unit)
+ }
+ src.activeTexture(f, dst.texUnits.active)
+ src.bindFramebuffer(f, gl.FRAMEBUFFER, dst.drawFBO)
+ src.bindFramebuffer(f, gl.READ_FRAMEBUFFER, dst.readFBO)
+ src.set(f, gl.BLEND, dst.blend.enable)
+ bf := dst.blend
+ src.setBlendFuncSeparate(f, bf.srcRGB, bf.dstRGB, bf.srcA, bf.dstA)
+ src.set(f, gl.FRAMEBUFFER_SRGB, dst.srgb)
+ src.bindVertexArray(f, dst.vertArray)
+ src.useProgram(f, dst.prog)
+ src.bindBuffer(f, gl.ELEMENT_ARRAY_BUFFER, dst.elemBuf)
+ for i, b := range dst.uniBufs {
+ src.bindBufferBase(f, gl.UNIFORM_BUFFER, i, b)
+ }
+ src.bindBuffer(f, gl.UNIFORM_BUFFER, dst.uniBuf)
+ for i, b := range dst.storeBufs {
+ src.bindBufferBase(f, gl.SHADER_STORAGE_BUFFER, i, b)
+ }
+ src.bindBuffer(f, gl.SHADER_STORAGE_BUFFER, dst.storeBuf)
+ col := dst.clearColor
+ src.setClearColor(f, col[0], col[1], col[2], col[3])
+ for i, attr := range dst.vertAttribs {
+ src.setVertexAttribArray(f, i, attr.enabled)
+ src.vertexAttribPointer(f, attr.obj, i, attr.size, attr.typ, attr.normalized, attr.stride, int(attr.offset))
+ }
+ src.bindBuffer(f, gl.ARRAY_BUFFER, dst.arrayBuf)
+ v := dst.viewport
+ src.setViewport(f, v[0], v[1], v[2], v[3])
+ src.pixelStorei(f, gl.UNPACK_ROW_LENGTH, dst.unpack_row_length)
+ src.pixelStorei(f, gl.PACK_ROW_LENGTH, dst.pack_row_length)
+}
+
+func (s *glState) setVertexAttribArray(f *gl.Functions, idx int, enabled bool) {
+ a := &s.vertAttribs[idx]
+ if enabled != a.enabled {
+ if enabled {
+ f.EnableVertexAttribArray(gl.Attrib(idx))
+ } else {
+ f.DisableVertexAttribArray(gl.Attrib(idx))
+ }
+ a.enabled = enabled
+ }
+}
+
+func (s *glState) vertexAttribPointer(f *gl.Functions, buf gl.Buffer, idx, size int, typ gl.Enum, normalized bool, stride, offset int) {
+ s.bindBuffer(f, gl.ARRAY_BUFFER, buf)
+ a := &s.vertAttribs[idx]
+ a.obj = buf
+ a.size = size
+ a.typ = typ
+ a.normalized = normalized
+ a.stride = stride
+ a.offset = uintptr(offset)
+ f.VertexAttribPointer(gl.Attrib(idx), a.size, a.typ, a.normalized, a.stride, int(a.offset))
+}
+
+func (s *glState) activeTexture(f *gl.Functions, unit gl.Enum) {
+ if unit != s.texUnits.active {
+ f.ActiveTexture(unit)
+ s.texUnits.active = unit
+ }
+}
+
+func (s *glState) bindRenderbuffer(f *gl.Functions, target gl.Enum, r gl.Renderbuffer) {
+ if !r.Equal(s.renderBuf) {
+ f.BindRenderbuffer(gl.RENDERBUFFER, r)
+ s.renderBuf = r
+ }
+}
+
+func (s *glState) bindTexture(f *gl.Functions, unit int, t gl.Texture) {
+ s.activeTexture(f, gl.TEXTURE0+gl.Enum(unit))
+ if !t.Equal(s.texUnits.binds[unit]) {
+ f.BindTexture(gl.TEXTURE_2D, t)
+ s.texUnits.binds[unit] = t
+ }
+}
+
+func (s *glState) bindVertexArray(f *gl.Functions, a gl.VertexArray) {
+ if !a.Equal(s.vertArray) {
+ f.BindVertexArray(a)
+ s.vertArray = a
+ }
+}
+
+func (s *glState) deleteRenderbuffer(f *gl.Functions, r gl.Renderbuffer) {
+ f.DeleteRenderbuffer(r)
+ if r.Equal(s.renderBuf) {
+ s.renderBuf = gl.Renderbuffer{}
+ }
+}
+
+func (s *glState) deleteFramebuffer(f *gl.Functions, fbo gl.Framebuffer) {
+ f.DeleteFramebuffer(fbo)
+ if fbo.Equal(s.drawFBO) {
+ s.drawFBO = gl.Framebuffer{}
+ }
+ if fbo.Equal(s.readFBO) {
+ s.readFBO = gl.Framebuffer{}
+ }
+}
+
+func (s *glState) deleteBuffer(f *gl.Functions, b gl.Buffer) {
+ f.DeleteBuffer(b)
+ if b.Equal(s.arrayBuf) {
+ s.arrayBuf = gl.Buffer{}
+ }
+ if b.Equal(s.elemBuf) {
+ s.elemBuf = gl.Buffer{}
+ }
+ if b.Equal(s.uniBuf) {
+ s.uniBuf = gl.Buffer{}
+ }
+ if b.Equal(s.storeBuf) {
+ s.uniBuf = gl.Buffer{}
+ }
+ for i, b2 := range s.storeBufs {
+ if b.Equal(b2) {
+ s.storeBufs[i] = gl.Buffer{}
+ }
+ }
+ for i, b2 := range s.uniBufs {
+ if b.Equal(b2) {
+ s.uniBufs[i] = gl.Buffer{}
+ }
+ }
+}
+
+func (s *glState) deleteProgram(f *gl.Functions, p gl.Program) {
+ f.DeleteProgram(p)
+ if p.Equal(s.prog) {
+ s.prog = gl.Program{}
+ }
+}
+
+func (s *glState) deleteVertexArray(f *gl.Functions, a gl.VertexArray) {
+ f.DeleteVertexArray(a)
+ if a.Equal(s.vertArray) {
+ s.vertArray = gl.VertexArray{}
+ }
+}
+
+func (s *glState) deleteTexture(f *gl.Functions, t gl.Texture) {
+ f.DeleteTexture(t)
+ binds := &s.texUnits.binds
+ for i, obj := range binds {
+ if t.Equal(obj) {
+ binds[i] = gl.Texture{}
+ }
+ }
+}
+
+func (s *glState) useProgram(f *gl.Functions, p gl.Program) {
+ if !p.Equal(s.prog) {
+ f.UseProgram(p)
+ s.prog = p
+ }
+}
+
+func (s *glState) bindFramebuffer(f *gl.Functions, target gl.Enum, fbo gl.Framebuffer) {
+ switch target {
+ case gl.FRAMEBUFFER:
+ if fbo.Equal(s.drawFBO) && fbo.Equal(s.readFBO) {
+ return
+ }
+ s.drawFBO = fbo
+ s.readFBO = fbo
+ case gl.READ_FRAMEBUFFER:
+ if fbo.Equal(s.readFBO) {
+ return
+ }
+ s.readFBO = fbo
+ case gl.DRAW_FRAMEBUFFER:
+ if fbo.Equal(s.drawFBO) {
+ return
+ }
+ s.drawFBO = fbo
+ default:
+ panic("unknown target")
+ }
+ f.BindFramebuffer(target, fbo)
+}
+
+func (s *glState) bindBufferBase(f *gl.Functions, target gl.Enum, idx int, buf gl.Buffer) {
+ switch target {
+ case gl.UNIFORM_BUFFER:
+ if buf.Equal(s.uniBuf) && buf.Equal(s.uniBufs[idx]) {
+ return
+ }
+ s.uniBuf = buf
+ s.uniBufs[idx] = buf
+ case gl.SHADER_STORAGE_BUFFER:
+ if buf.Equal(s.storeBuf) && buf.Equal(s.storeBufs[idx]) {
+ return
+ }
+ s.storeBuf = buf
+ s.storeBufs[idx] = buf
+ default:
+ panic("unknown buffer target")
+ }
+ f.BindBufferBase(target, idx, buf)
+}
+
+func (s *glState) bindBuffer(f *gl.Functions, target gl.Enum, buf gl.Buffer) {
+ switch target {
+ case gl.ARRAY_BUFFER:
+ if buf.Equal(s.arrayBuf) {
+ return
+ }
+ s.arrayBuf = buf
+ case gl.ELEMENT_ARRAY_BUFFER:
+ if buf.Equal(s.elemBuf) {
+ return
+ }
+ s.elemBuf = buf
+ case gl.UNIFORM_BUFFER:
+ if buf.Equal(s.uniBuf) {
+ return
+ }
+ s.uniBuf = buf
+ case gl.SHADER_STORAGE_BUFFER:
+ if buf.Equal(s.storeBuf) {
+ return
+ }
+ s.storeBuf = buf
+ default:
+ panic("unknown buffer target")
+ }
+ f.BindBuffer(target, buf)
+}
+
+func (s *glState) pixelStorei(f *gl.Functions, pname gl.Enum, val int) {
+ switch pname {
+ case gl.UNPACK_ROW_LENGTH:
+ if val == s.unpack_row_length {
+ return
+ }
+ s.unpack_row_length = val
+ case gl.PACK_ROW_LENGTH:
+ if val == s.pack_row_length {
+ return
+ }
+ s.pack_row_length = val
+ default:
+ panic("unsupported PixelStorei pname")
+ }
+ f.PixelStorei(pname, val)
+}
+
+func (s *glState) setClearColor(f *gl.Functions, r, g, b, a float32) {
+ col := [4]float32{r, g, b, a}
+ if col != s.clearColor {
+ f.ClearColor(r, g, b, a)
+ s.clearColor = col
+ }
+}
+
+func (s *glState) setViewport(f *gl.Functions, x, y, width, height int) {
+ view := [4]int{x, y, width, height}
+ if view != s.viewport {
+ f.Viewport(x, y, width, height)
+ s.viewport = view
+ }
+}
+
+func (s *glState) setBlendFuncSeparate(f *gl.Functions, srcRGB, dstRGB, srcA, dstA gl.Enum) {
+ if srcRGB != s.blend.srcRGB || dstRGB != s.blend.dstRGB || srcA != s.blend.srcA || dstA != s.blend.dstA {
+ s.blend.srcRGB = srcRGB
+ s.blend.dstRGB = dstRGB
+ s.blend.srcA = srcA
+ s.blend.dstA = dstA
+ f.BlendFuncSeparate(srcA, dstA, srcA, dstA)
+ }
+}
+
+func (s *glState) set(f *gl.Functions, target gl.Enum, enable bool) {
+ switch target {
+ case gl.FRAMEBUFFER_SRGB:
+ if s.srgb == enable {
+ return
+ }
+ s.srgb = enable
+ case gl.BLEND:
+ if enable == s.blend.enable {
+ return
+ }
+ s.blend.enable = enable
+ default:
+ panic("unknown enable")
+ }
+ if enable {
+ f.Enable(target)
+ } else {
+ f.Disable(target)
+ }
+}
+
+func (b *Backend) Caps() driver.Caps {
+ return b.feats
+}
+
+func (b *Backend) NewTimer() driver.Timer {
+ return &timer{
+ funcs: b.funcs,
+ obj: b.funcs.CreateQuery(),
+ }
+}
+
+func (b *Backend) IsTimeContinuous() bool {
+ return b.funcs.GetInteger(gl.GPU_DISJOINT_EXT) == gl.FALSE
+}
+
+func (t *texture) ensureFBO() gl.Framebuffer {
+ if t.hasFBO {
+ return t.fbo
+ }
+ b := t.backend
+ oldFBO := b.glstate.drawFBO
+ defer func() {
+ b.glstate.bindFramebuffer(b.funcs, gl.FRAMEBUFFER, oldFBO)
+ }()
+ glErr(b.funcs)
+ fb := b.funcs.CreateFramebuffer()
+ b.glstate.bindFramebuffer(b.funcs, gl.FRAMEBUFFER, fb)
+ if err := glErr(b.funcs); err != nil {
+ b.funcs.DeleteFramebuffer(fb)
+ panic(err)
+ }
+ b.funcs.FramebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, t.obj, 0)
+ if st := b.funcs.CheckFramebufferStatus(gl.FRAMEBUFFER); st != gl.FRAMEBUFFER_COMPLETE {
+ b.funcs.DeleteFramebuffer(fb)
+ panic(fmt.Errorf("incomplete framebuffer, status = 0x%x, err = %d", st, b.funcs.GetError()))
+ }
+ t.fbo = fb
+ t.hasFBO = true
+ return fb
+}
+
+func (b *Backend) NewTexture(format driver.TextureFormat, width, height int, minFilter, magFilter driver.TextureFilter, binding driver.BufferBinding) (driver.Texture, error) {
+ glErr(b.funcs)
+ tex := &texture{backend: b, obj: b.funcs.CreateTexture(), width: width, height: height, bindings: binding}
+ switch format {
+ case driver.TextureFormatFloat:
+ tex.triple = b.floatTriple
+ case driver.TextureFormatSRGBA:
+ tex.triple = b.srgbaTriple
+ case driver.TextureFormatRGBA8:
+ tex.triple = textureTriple{gl.RGBA8, gl.RGBA, gl.UNSIGNED_BYTE}
+ default:
+ return nil, errors.New("unsupported texture format")
+ }
+ b.BindTexture(0, tex)
+ b.funcs.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, toTexFilter(magFilter))
+ b.funcs.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, toTexFilter(minFilter))
+ b.funcs.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
+ b.funcs.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
+ if b.gles && b.glver[0] >= 3 {
+ // Immutable textures are required for BindImageTexture, and can't hurt otherwise.
+ b.funcs.TexStorage2D(gl.TEXTURE_2D, 1, tex.triple.internalFormat, width, height)
+ } else {
+ b.funcs.TexImage2D(gl.TEXTURE_2D, 0, tex.triple.internalFormat, width, height, tex.triple.format, tex.triple.typ)
+ }
+ if err := glErr(b.funcs); err != nil {
+ tex.Release()
+ return nil, err
+ }
+ return tex, nil
+}
+
+func (b *Backend) NewBuffer(typ driver.BufferBinding, size int) (driver.Buffer, error) {
+ glErr(b.funcs)
+ buf := &buffer{backend: b, typ: typ, size: size}
+ if typ&driver.BufferBindingUniforms != 0 {
+ if typ != driver.BufferBindingUniforms {
+ return nil, errors.New("uniforms buffers cannot be bound as anything else")
+ }
+ buf.data = make([]byte, size)
+ }
+ if typ&^driver.BufferBindingUniforms != 0 {
+ buf.hasBuffer = true
+ buf.obj = b.funcs.CreateBuffer()
+ if err := glErr(b.funcs); err != nil {
+ buf.Release()
+ return nil, err
+ }
+ firstBinding := firstBufferType(typ)
+ b.glstate.bindBuffer(b.funcs, firstBinding, buf.obj)
+ b.funcs.BufferData(firstBinding, size, gl.DYNAMIC_DRAW, nil)
+ }
+ return buf, nil
+}
+
+func (b *Backend) NewImmutableBuffer(typ driver.BufferBinding, data []byte) (driver.Buffer, error) {
+ glErr(b.funcs)
+ obj := b.funcs.CreateBuffer()
+ buf := &buffer{backend: b, obj: obj, typ: typ, size: len(data), hasBuffer: true}
+ firstBinding := firstBufferType(typ)
+ b.glstate.bindBuffer(b.funcs, firstBinding, buf.obj)
+ b.funcs.BufferData(firstBinding, len(data), gl.STATIC_DRAW, data)
+ buf.immutable = true
+ if err := glErr(b.funcs); err != nil {
+ buf.Release()
+ return nil, err
+ }
+ return buf, nil
+}
+
+func glErr(f *gl.Functions) error {
+ if st := f.GetError(); st != gl.NO_ERROR {
+ return fmt.Errorf("glGetError: %#x", st)
+ }
+ return nil
+}
+
+func (b *Backend) Release() {
+ if b.sRGBFBO != nil {
+ b.sRGBFBO.Release()
+ }
+ if b.vertArray.Valid() {
+ b.glstate.deleteVertexArray(b.funcs, b.vertArray)
+ }
+ *b = Backend{}
+}
+
+func (b *Backend) DispatchCompute(x, y, z int) {
+ for binding, buf := range b.storage {
+ if buf != nil {
+ b.glstate.bindBufferBase(b.funcs, gl.SHADER_STORAGE_BUFFER, binding, buf.obj)
+ }
+ }
+ b.funcs.DispatchCompute(x, y, z)
+ b.funcs.MemoryBarrier(gl.ALL_BARRIER_BITS)
+}
+
+func (b *Backend) BindImageTexture(unit int, tex driver.Texture) {
+ t := tex.(*texture)
+ var acc gl.Enum
+ switch t.bindings & (driver.BufferBindingShaderStorageRead | driver.BufferBindingShaderStorageWrite) {
+ case driver.BufferBindingShaderStorageRead:
+ acc = gl.READ_ONLY
+ case driver.BufferBindingShaderStorageWrite:
+ acc = gl.WRITE_ONLY
+ case driver.BufferBindingShaderStorageRead | driver.BufferBindingShaderStorageWrite:
+ acc = gl.READ_WRITE
+ default:
+ panic("unsupported access bits")
+ }
+ b.funcs.BindImageTexture(unit, t.obj, 0, false, 0, acc, t.triple.internalFormat)
+}
+
+func (b *Backend) BlendFunc(sfactor, dfactor driver.BlendFactor) {
+ src, dst := toGLBlendFactor(sfactor), toGLBlendFactor(dfactor)
+ b.glstate.setBlendFuncSeparate(b.funcs, src, dst, src, dst)
+}
+
+func toGLBlendFactor(f driver.BlendFactor) gl.Enum {
+ switch f {
+ case driver.BlendFactorOne:
+ return gl.ONE
+ case driver.BlendFactorOneMinusSrcAlpha:
+ return gl.ONE_MINUS_SRC_ALPHA
+ case driver.BlendFactorZero:
+ return gl.ZERO
+ case driver.BlendFactorDstColor:
+ return gl.DST_COLOR
+ default:
+ panic("unsupported blend factor")
+ }
+}
+
+func (b *Backend) SetBlend(enable bool) {
+ b.glstate.set(b.funcs, gl.BLEND, enable)
+}
+
+func (b *Backend) DrawElements(off, count int) {
+ b.prepareDraw()
+ // off is in 16-bit indices, but DrawElements take a byte offset.
+ byteOff := off * 2
+ b.funcs.DrawElements(toGLDrawMode(b.state.pipeline.topology), count, gl.UNSIGNED_SHORT, byteOff)
+}
+
+func (b *Backend) DrawArrays(off, count int) {
+ b.prepareDraw()
+ b.funcs.DrawArrays(toGLDrawMode(b.state.pipeline.topology), off, count)
+}
+
+func (b *Backend) prepareDraw() {
+ p := b.state.pipeline
+ if p == nil {
+ return
+ }
+ b.setupVertexArrays()
+}
+
+func toGLDrawMode(mode driver.Topology) gl.Enum {
+ switch mode {
+ case driver.TopologyTriangleStrip:
+ return gl.TRIANGLE_STRIP
+ case driver.TopologyTriangles:
+ return gl.TRIANGLES
+ default:
+ panic("unsupported draw mode")
+ }
+}
+
+func (b *Backend) Viewport(x, y, width, height int) {
+ b.glstate.setViewport(b.funcs, x, y, width, height)
+}
+
+func (b *Backend) clearOutput(colR, colG, colB, colA float32) {
+ b.glstate.setClearColor(b.funcs, colR, colG, colB, colA)
+ b.funcs.Clear(gl.COLOR_BUFFER_BIT)
+}
+
+func (b *Backend) NewComputeProgram(src shader.Sources) (driver.Program, error) {
+ // We don't support ES 3.1 compute, see brokenGLES31 above.
+ const GLES31Source = ""
+ p, err := gl.CreateComputeProgram(b.funcs, GLES31Source)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %v", src.Name, err)
+ }
+ return &program{
+ backend: b,
+ obj: p,
+ }, nil
+}
+
+func (b *Backend) NewVertexShader(src shader.Sources) (driver.VertexShader, error) {
+ glslSrc := b.glslFor(src)
+ sh, err := gl.CreateShader(b.funcs, gl.VERTEX_SHADER, glslSrc)
+ return &glshader{backend: b, obj: sh, src: src}, err
+}
+
+func (b *Backend) NewFragmentShader(src shader.Sources) (driver.FragmentShader, error) {
+ glslSrc := b.glslFor(src)
+ sh, err := gl.CreateShader(b.funcs, gl.FRAGMENT_SHADER, glslSrc)
+ return &glshader{backend: b, obj: sh, src: src}, err
+}
+
+func (b *Backend) glslFor(src shader.Sources) string {
+ if b.gles {
+ return src.GLSL100ES
+ } else {
+ return src.GLSL150
+ }
+}
+
+func (b *Backend) NewPipeline(desc driver.PipelineDesc) (driver.Pipeline, error) {
+ p, err := b.newProgram(desc)
+ if err != nil {
+ return nil, err
+ }
+ layout := desc.VertexLayout
+ vsrc := desc.VertexShader.(*glshader).src
+ if len(vsrc.Inputs) != len(layout.Inputs) {
+ return nil, fmt.Errorf("opengl: got %d inputs, expected %d", len(layout.Inputs), len(vsrc.Inputs))
+ }
+ for i, inp := range vsrc.Inputs {
+ if exp, got := inp.Size, layout.Inputs[i].Size; exp != got {
+ return nil, fmt.Errorf("opengl: data size mismatch for %q: got %d expected %d", inp.Name, got, exp)
+ }
+ }
+ return &pipeline{
+ prog: p,
+ inputs: vsrc.Inputs,
+ layout: layout,
+ blend: desc.BlendDesc,
+ topology: desc.Topology,
+ }, nil
+}
+
+func (b *Backend) newProgram(desc driver.PipelineDesc) (*program, error) {
+ p := b.funcs.CreateProgram()
+ if !p.Valid() {
+ return nil, errors.New("opengl: glCreateProgram failed")
+ }
+ vsh, fsh := desc.VertexShader.(*glshader), desc.FragmentShader.(*glshader)
+ b.funcs.AttachShader(p, vsh.obj)
+ b.funcs.AttachShader(p, fsh.obj)
+ for _, inp := range vsh.src.Inputs {
+ b.funcs.BindAttribLocation(p, gl.Attrib(inp.Location), inp.Name)
+ }
+ b.funcs.LinkProgram(p)
+ if b.funcs.GetProgrami(p, gl.LINK_STATUS) == 0 {
+ log := b.funcs.GetProgramInfoLog(p)
+ b.funcs.DeleteProgram(p)
+ return nil, fmt.Errorf("opengl: program link failed: %s", strings.TrimSpace(log))
+ }
+ prog := &program{
+ backend: b,
+ obj: p,
+ }
+ b.glstate.useProgram(b.funcs, p)
+ // Bind texture uniforms.
+ for _, tex := range vsh.src.Textures {
+ u := b.funcs.GetUniformLocation(p, tex.Name)
+ if u.Valid() {
+ b.funcs.Uniform1i(u, tex.Binding)
+ }
+ }
+ for _, tex := range fsh.src.Textures {
+ u := b.funcs.GetUniformLocation(p, tex.Name)
+ if u.Valid() {
+ b.funcs.Uniform1i(u, tex.Binding)
+ }
+ }
+ prog.vertUniforms.setup(b.funcs, p, vsh.src.Uniforms.Size, vsh.src.Uniforms.Locations)
+ prog.fragUniforms.setup(b.funcs, p, fsh.src.Uniforms.Size, fsh.src.Uniforms.Locations)
+ return prog, nil
+}
+
+func (b *Backend) BindStorageBuffer(binding int, buf driver.Buffer) {
+ bf := buf.(*buffer)
+ if bf.typ&(driver.BufferBindingShaderStorageRead|driver.BufferBindingShaderStorageWrite) == 0 {
+ panic("not a shader storage buffer")
+ }
+ b.storage[binding] = bf
+}
+
+func (b *Backend) BindUniforms(buf driver.Buffer) {
+ bf := buf.(*buffer)
+ if bf.typ&driver.BufferBindingUniforms == 0 {
+ panic("not a uniform buffer")
+ }
+ b.state.pipeline.prog.vertUniforms.update(b.funcs, bf)
+ b.state.pipeline.prog.fragUniforms.update(b.funcs, bf)
+}
+
+func (b *Backend) BindProgram(prog driver.Program) {
+ p := prog.(*program)
+ b.glstate.useProgram(b.funcs, p.obj)
+}
+
+func (s *glshader) Release() {
+ s.backend.funcs.DeleteShader(s.obj)
+}
+
+func (p *program) Release() {
+ p.backend.glstate.deleteProgram(p.backend.funcs, p.obj)
+}
+
+func (u *uniforms) setup(funcs *gl.Functions, p gl.Program, uniformSize int, uniforms []shader.UniformLocation) {
+ u.locs = make([]uniformLocation, len(uniforms))
+ for i, uniform := range uniforms {
+ loc := funcs.GetUniformLocation(p, uniform.Name)
+ u.locs[i] = uniformLocation{uniform: loc, offset: uniform.Offset, typ: uniform.Type, size: uniform.Size}
+ }
+ u.size = uniformSize
+}
+
+func (p *uniforms) update(funcs *gl.Functions, buf *buffer) {
+ if buf.size < p.size {
+ panic(fmt.Errorf("uniform buffer too small, got %d need %d", buf.size, p.size))
+ }
+ data := buf.data
+ for _, u := range p.locs {
+ if !u.uniform.Valid() {
+ continue
+ }
+ data := data[u.offset:]
+ switch {
+ case u.typ == shader.DataTypeFloat && u.size == 1:
+ data := data[:4]
+ v := *(*[1]float32)(unsafe.Pointer(&data[0]))
+ funcs.Uniform1f(u.uniform, v[0])
+ case u.typ == shader.DataTypeFloat && u.size == 2:
+ data := data[:8]
+ v := *(*[2]float32)(unsafe.Pointer(&data[0]))
+ funcs.Uniform2f(u.uniform, v[0], v[1])
+ case u.typ == shader.DataTypeFloat && u.size == 3:
+ data := data[:12]
+ v := *(*[3]float32)(unsafe.Pointer(&data[0]))
+ funcs.Uniform3f(u.uniform, v[0], v[1], v[2])
+ case u.typ == shader.DataTypeFloat && u.size == 4:
+ data := data[:16]
+ v := *(*[4]float32)(unsafe.Pointer(&data[0]))
+ funcs.Uniform4f(u.uniform, v[0], v[1], v[2], v[3])
+ default:
+ panic("unsupported uniform data type or size")
+ }
+ }
+}
+
+func (b *buffer) Upload(data []byte) {
+ if b.immutable {
+ panic("immutable buffer")
+ }
+ if len(data) > b.size {
+ panic("buffer size overflow")
+ }
+ copy(b.data, data)
+ if b.hasBuffer {
+ firstBinding := firstBufferType(b.typ)
+ b.backend.glstate.bindBuffer(b.backend.funcs, firstBinding, b.obj)
+ if len(data) == b.size {
+ // the iOS GL implementation doesn't recognize when BufferSubData
+ // clears the entire buffer. Tell it and avoid GPU stalls.
+ // See also https://github.com/godotengine/godot/issues/23956.
+ b.backend.funcs.BufferData(firstBinding, b.size, gl.DYNAMIC_DRAW, data)
+ } else {
+ b.backend.funcs.BufferSubData(firstBinding, 0, data)
+ }
+ }
+}
+
+func (b *buffer) Download(data []byte) error {
+ if len(data) > b.size {
+ panic("buffer size overflow")
+ }
+ if !b.hasBuffer {
+ copy(data, b.data)
+ return nil
+ }
+ firstBinding := firstBufferType(b.typ)
+ b.backend.glstate.bindBuffer(b.backend.funcs, firstBinding, b.obj)
+ bufferMap := b.backend.funcs.MapBufferRange(firstBinding, 0, len(data), gl.MAP_READ_BIT)
+ if bufferMap == nil {
+ return fmt.Errorf("MapBufferRange: error %#x", b.backend.funcs.GetError())
+ }
+ copy(data, bufferMap)
+ if !b.backend.funcs.UnmapBuffer(firstBinding) {
+ return driver.ErrContentLost
+ }
+ return nil
+}
+
+func (b *buffer) Release() {
+ if b.hasBuffer {
+ b.backend.glstate.deleteBuffer(b.backend.funcs, b.obj)
+ b.hasBuffer = false
+ }
+}
+
+func (b *Backend) BindVertexBuffer(buf driver.Buffer, offset int) {
+ gbuf := buf.(*buffer)
+ if gbuf.typ&driver.BufferBindingVertices == 0 {
+ panic("not a vertex buffer")
+ }
+ b.state.buffer = bufferBinding{obj: gbuf.obj, offset: offset}
+}
+
+func (b *Backend) setupVertexArrays() {
+ p := b.state.pipeline
+ inputs := p.inputs
+ if len(inputs) == 0 {
+ return
+ }
+ layout := p.layout
+ const max = len(b.glstate.vertAttribs)
+ var enabled [max]bool
+ buf := b.state.buffer
+ for i, inp := range inputs {
+ l := layout.Inputs[i]
+ var gltyp gl.Enum
+ switch l.Type {
+ case shader.DataTypeFloat:
+ gltyp = gl.FLOAT
+ case shader.DataTypeShort:
+ gltyp = gl.SHORT
+ default:
+ panic("unsupported data type")
+ }
+ enabled[inp.Location] = true
+ b.glstate.vertexAttribPointer(b.funcs, buf.obj, inp.Location, l.Size, gltyp, false, p.layout.Stride, buf.offset+l.Offset)
+ }
+ for i := 0; i < max; i++ {
+ b.glstate.setVertexAttribArray(b.funcs, i, enabled[i])
+ }
+}
+
+func (b *Backend) BindIndexBuffer(buf driver.Buffer) {
+ gbuf := buf.(*buffer)
+ if gbuf.typ&driver.BufferBindingIndices == 0 {
+ panic("not an index buffer")
+ }
+ b.glstate.bindBuffer(b.funcs, gl.ELEMENT_ARRAY_BUFFER, gbuf.obj)
+}
+
+func (b *Backend) CopyTexture(dst driver.Texture, dstOrigin image.Point, src driver.Texture, srcRect image.Rectangle) {
+ const unit = 0
+ oldTex := b.glstate.texUnits.binds[unit]
+ defer func() {
+ b.glstate.bindTexture(b.funcs, unit, oldTex)
+ }()
+ b.glstate.bindTexture(b.funcs, unit, dst.(*texture).obj)
+ b.glstate.bindFramebuffer(b.funcs, gl.FRAMEBUFFER, src.(*texture).ensureFBO())
+ sz := srcRect.Size()
+ b.funcs.CopyTexSubImage2D(gl.TEXTURE_2D, 0, dstOrigin.X, dstOrigin.Y, srcRect.Min.X, srcRect.Min.Y, sz.X, sz.Y)
+}
+
+func (t *texture) ReadPixels(src image.Rectangle, pixels []byte, stride int) error {
+ glErr(t.backend.funcs)
+ t.backend.glstate.bindFramebuffer(t.backend.funcs, gl.FRAMEBUFFER, t.ensureFBO())
+ if len(pixels) < src.Dx()*src.Dy()*4 {
+ return errors.New("unexpected RGBA size")
+ }
+ w, h := src.Dx(), src.Dy()
+ // WebGL 1 doesn't support PACK_ROW_LENGTH != 0. Avoid it if possible.
+ rowLen := 0
+ if n := stride / 4; n != w {
+ rowLen = n
+ }
+ t.backend.glstate.pixelStorei(t.backend.funcs, gl.PACK_ROW_LENGTH, rowLen)
+ t.backend.funcs.ReadPixels(src.Min.X, src.Min.Y, w, h, gl.RGBA, gl.UNSIGNED_BYTE, pixels)
+ return glErr(t.backend.funcs)
+}
+
+func (b *Backend) BindPipeline(pl driver.Pipeline) {
+ p := pl.(*pipeline)
+ b.state.pipeline = p
+ b.glstate.useProgram(b.funcs, p.prog.obj)
+ b.SetBlend(p.blend.Enable)
+ b.BlendFunc(p.blend.SrcFactor, p.blend.DstFactor)
+}
+
+func (b *Backend) BeginCompute() {
+ b.funcs.MemoryBarrier(gl.ALL_BARRIER_BITS)
+}
+
+func (b *Backend) EndCompute() {
+}
+
+func (b *Backend) BeginRenderPass(tex driver.Texture, desc driver.LoadDesc) {
+ fbo := tex.(*texture).ensureFBO()
+ b.glstate.bindFramebuffer(b.funcs, gl.FRAMEBUFFER, fbo)
+ switch desc.Action {
+ case driver.LoadActionClear:
+ c := desc.ClearColor
+ b.clearOutput(c.R, c.G, c.B, c.A)
+ case driver.LoadActionInvalidate:
+ b.funcs.InvalidateFramebuffer(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0)
+ }
+}
+
+func (b *Backend) EndRenderPass() {
+}
+
+func (f *texture) ImplementsRenderTarget() {}
+
+func (p *pipeline) Release() {
+ p.prog.Release()
+ *p = pipeline{}
+}
+
+func toTexFilter(f driver.TextureFilter) int {
+ switch f {
+ case driver.FilterNearest:
+ return gl.NEAREST
+ case driver.FilterLinear:
+ return gl.LINEAR
+ default:
+ panic("unsupported texture filter")
+ }
+}
+
+func (b *Backend) PrepareTexture(tex driver.Texture) {}
+
+func (b *Backend) BindTexture(unit int, t driver.Texture) {
+ b.glstate.bindTexture(b.funcs, unit, t.(*texture).obj)
+}
+
+func (t *texture) Release() {
+ if t.foreign {
+ panic("texture not created by NewTexture")
+ }
+ if t.hasFBO {
+ t.backend.glstate.deleteFramebuffer(t.backend.funcs, t.fbo)
+ }
+ t.backend.glstate.deleteTexture(t.backend.funcs, t.obj)
+}
+
+func (t *texture) Upload(offset, size image.Point, pixels []byte, stride int) {
+ if min := size.X * size.Y * 4; min > len(pixels) {
+ panic(fmt.Errorf("size %d larger than data %d", min, len(pixels)))
+ }
+ t.backend.BindTexture(0, t)
+ // WebGL 1 doesn't support UNPACK_ROW_LENGTH != 0. Avoid it if possible.
+ rowLen := 0
+ if n := stride / 4; n != size.X {
+ rowLen = n
+ }
+ t.backend.glstate.pixelStorei(t.backend.funcs, gl.UNPACK_ROW_LENGTH, rowLen)
+ t.backend.funcs.TexSubImage2D(gl.TEXTURE_2D, 0, offset.X, offset.Y, size.X, size.Y, t.triple.format, t.triple.typ, pixels)
+}
+
+func (t *timer) Begin() {
+ t.funcs.BeginQuery(gl.TIME_ELAPSED_EXT, t.obj)
+}
+
+func (t *timer) End() {
+ t.funcs.EndQuery(gl.TIME_ELAPSED_EXT)
+}
+
+func (t *timer) ready() bool {
+ return t.funcs.GetQueryObjectuiv(t.obj, gl.QUERY_RESULT_AVAILABLE) == gl.TRUE
+}
+
+func (t *timer) Release() {
+ t.funcs.DeleteQuery(t.obj)
+}
+
+func (t *timer) Duration() (time.Duration, bool) {
+ if !t.ready() {
+ return 0, false
+ }
+ nanos := t.funcs.GetQueryObjectuiv(t.obj, gl.QUERY_RESULT)
+ return time.Duration(nanos), true
+}
+
+// floatTripleFor determines the best texture triple for floating point FBOs.
+func floatTripleFor(f *gl.Functions, ver [2]int, exts []string) (textureTriple, error) {
+ var triples []textureTriple
+ if ver[0] >= 3 {
+ triples = append(triples, textureTriple{gl.R16F, gl.Enum(gl.RED), gl.Enum(gl.HALF_FLOAT)})
+ }
+ // According to the OES_texture_half_float specification, EXT_color_buffer_half_float is needed to
+ // render to FBOs. However, the Safari WebGL1 implementation does support half-float FBOs but does not
+ // report EXT_color_buffer_half_float support. The triples are verified below, so it doesn't matter if we're
+ // wrong.
+ if hasExtension(exts, "GL_OES_texture_half_float") || hasExtension(exts, "GL_EXT_color_buffer_half_float") {
+ // Try single channel.
+ triples = append(triples, textureTriple{gl.LUMINANCE, gl.Enum(gl.LUMINANCE), gl.Enum(gl.HALF_FLOAT_OES)})
+ // Fallback to 4 channels.
+ triples = append(triples, textureTriple{gl.RGBA, gl.Enum(gl.RGBA), gl.Enum(gl.HALF_FLOAT_OES)})
+ }
+ if hasExtension(exts, "GL_OES_texture_float") || hasExtension(exts, "GL_EXT_color_buffer_float") {
+ triples = append(triples, textureTriple{gl.RGBA, gl.Enum(gl.RGBA), gl.Enum(gl.FLOAT)})
+ }
+ tex := f.CreateTexture()
+ defer f.DeleteTexture(tex)
+ defTex := gl.Texture(f.GetBinding(gl.TEXTURE_BINDING_2D))
+ defer f.BindTexture(gl.TEXTURE_2D, defTex)
+ f.BindTexture(gl.TEXTURE_2D, tex)
+ f.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
+ f.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
+ f.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
+ f.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
+ fbo := f.CreateFramebuffer()
+ defer f.DeleteFramebuffer(fbo)
+ defFBO := gl.Framebuffer(f.GetBinding(gl.FRAMEBUFFER_BINDING))
+ f.BindFramebuffer(gl.FRAMEBUFFER, fbo)
+ defer f.BindFramebuffer(gl.FRAMEBUFFER, defFBO)
+ var attempts []string
+ for _, tt := range triples {
+ const size = 256
+ f.TexImage2D(gl.TEXTURE_2D, 0, tt.internalFormat, size, size, tt.format, tt.typ)
+ f.FramebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, tex, 0)
+ st := f.CheckFramebufferStatus(gl.FRAMEBUFFER)
+ if st == gl.FRAMEBUFFER_COMPLETE {
+ return tt, nil
+ }
+ attempts = append(attempts, fmt.Sprintf("(0x%x, 0x%x, 0x%x): 0x%x", tt.internalFormat, tt.format, tt.typ, st))
+ }
+ return textureTriple{}, fmt.Errorf("floating point fbos not supported (attempted %s)", attempts)
+}
+
+func srgbaTripleFor(ver [2]int, exts []string) (textureTriple, error) {
+ switch {
+ case ver[0] >= 3:
+ return textureTriple{gl.SRGB8_ALPHA8, gl.Enum(gl.RGBA), gl.Enum(gl.UNSIGNED_BYTE)}, nil
+ case hasExtension(exts, "GL_EXT_sRGB"):
+ return textureTriple{gl.SRGB_ALPHA_EXT, gl.Enum(gl.SRGB_ALPHA_EXT), gl.Enum(gl.UNSIGNED_BYTE)}, nil
+ default:
+ return textureTriple{}, errors.New("no sRGB texture formats found")
+ }
+}
+
+func alphaTripleFor(ver [2]int) textureTriple {
+ intf, f := gl.Enum(gl.R8), gl.Enum(gl.RED)
+ if ver[0] < 3 {
+ // R8, RED not supported on OpenGL ES 2.0.
+ intf, f = gl.LUMINANCE, gl.Enum(gl.LUMINANCE)
+ }
+ return textureTriple{intf, f, gl.UNSIGNED_BYTE}
+}
+
+func hasExtension(exts []string, ext string) bool {
+ for _, e := range exts {
+ if ext == e {
+ return true
+ }
+ }
+ return false
+}
+
+func firstBufferType(typ driver.BufferBinding) gl.Enum {
+ switch {
+ case typ&driver.BufferBindingIndices != 0:
+ return gl.ELEMENT_ARRAY_BUFFER
+ case typ&driver.BufferBindingVertices != 0:
+ return gl.ARRAY_BUFFER
+ case typ&driver.BufferBindingUniforms != 0:
+ return gl.UNIFORM_BUFFER
+ case typ&(driver.BufferBindingShaderStorageRead|driver.BufferBindingShaderStorageWrite) != 0:
+ return gl.SHADER_STORAGE_BUFFER
+ default:
+ panic("unsupported buffer type")
+ }
+}
diff --git a/vendor/gioui.org/gpu/internal/opengl/srgb.go b/vendor/gioui.org/gpu/internal/opengl/srgb.go
new file mode 100644
index 0000000..4871d94
--- /dev/null
+++ b/vendor/gioui.org/gpu/internal/opengl/srgb.go
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package opengl
+
+import (
+ "errors"
+ "fmt"
+ "image"
+ "runtime"
+ "strings"
+
+ "gioui.org/internal/byteslice"
+ "gioui.org/internal/gl"
+)
+
+// SRGBFBO implements an intermediate sRGB FBO
+// for gamma-correct rendering on platforms without
+// sRGB enabled native framebuffers.
+type SRGBFBO struct {
+ c *gl.Functions
+ state *glState
+ viewport image.Point
+ fbo gl.Framebuffer
+ tex gl.Texture
+ blitted bool
+ quad gl.Buffer
+ prog gl.Program
+ format textureTriple
+}
+
+func NewSRGBFBO(f *gl.Functions, state *glState) (*SRGBFBO, error) {
+ glVer := f.GetString(gl.VERSION)
+ ver, _, err := gl.ParseGLVersion(glVer)
+ if err != nil {
+ return nil, err
+ }
+ exts := strings.Split(f.GetString(gl.EXTENSIONS), " ")
+ srgbTriple, err := srgbaTripleFor(ver, exts)
+ if err != nil {
+ // Fall back to the linear RGB colorspace, at the cost of color precision loss.
+ srgbTriple = textureTriple{gl.RGBA, gl.Enum(gl.RGBA), gl.Enum(gl.UNSIGNED_BYTE)}
+ }
+ s := &SRGBFBO{
+ c: f,
+ state: state,
+ format: srgbTriple,
+ fbo: f.CreateFramebuffer(),
+ tex: f.CreateTexture(),
+ }
+ state.bindTexture(f, 0, s.tex)
+ f.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
+ f.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
+ f.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST)
+ f.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
+ return s, nil
+}
+
+func (s *SRGBFBO) Blit() {
+ if !s.blitted {
+ prog, err := gl.CreateProgram(s.c, blitVSrc, blitFSrc, []string{"pos", "uv"})
+ if err != nil {
+ panic(err)
+ }
+ s.prog = prog
+ s.state.useProgram(s.c, prog)
+ s.c.Uniform1i(s.c.GetUniformLocation(prog, "tex"), 0)
+ s.quad = s.c.CreateBuffer()
+ s.state.bindBuffer(s.c, gl.ARRAY_BUFFER, s.quad)
+ coords := byteslice.Slice([]float32{
+ -1, +1, 0, 1,
+ +1, +1, 1, 1,
+ -1, -1, 0, 0,
+ +1, -1, 1, 0,
+ })
+ s.c.BufferData(gl.ARRAY_BUFFER, len(coords), gl.STATIC_DRAW, coords)
+ s.blitted = true
+ }
+ s.state.useProgram(s.c, s.prog)
+ s.state.bindTexture(s.c, 0, s.tex)
+ s.state.vertexAttribPointer(s.c, s.quad, 0 /* pos */, 2, gl.FLOAT, false, 4*4, 0)
+ s.state.vertexAttribPointer(s.c, s.quad, 1 /* uv */, 2, gl.FLOAT, false, 4*4, 4*2)
+ s.state.setVertexAttribArray(s.c, 0, true)
+ s.state.setVertexAttribArray(s.c, 1, true)
+ s.c.DrawArrays(gl.TRIANGLE_STRIP, 0, 4)
+ s.state.bindFramebuffer(s.c, gl.FRAMEBUFFER, s.fbo)
+ s.c.InvalidateFramebuffer(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0)
+}
+
+func (s *SRGBFBO) Framebuffer() gl.Framebuffer {
+ return s.fbo
+}
+
+func (s *SRGBFBO) Refresh(viewport image.Point) error {
+ if viewport.X == 0 || viewport.Y == 0 {
+ return errors.New("srgb: zero-sized framebuffer")
+ }
+ if s.viewport == viewport {
+ return nil
+ }
+ s.viewport = viewport
+ s.state.bindTexture(s.c, 0, s.tex)
+ s.c.TexImage2D(gl.TEXTURE_2D, 0, s.format.internalFormat, viewport.X, viewport.Y, s.format.format, s.format.typ)
+ s.state.bindFramebuffer(s.c, gl.FRAMEBUFFER, s.fbo)
+ s.c.FramebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, s.tex, 0)
+ if st := s.c.CheckFramebufferStatus(gl.FRAMEBUFFER); st != gl.FRAMEBUFFER_COMPLETE {
+ return fmt.Errorf("sRGB framebuffer incomplete (%dx%d), status: %#x error: %x", viewport.X, viewport.Y, st, s.c.GetError())
+ }
+
+ if runtime.GOOS == "js" {
+ // With macOS Safari, rendering to and then reading from a SRGB8_ALPHA8
+ // texture result in twice gamma corrected colors. Using a plain RGBA
+ // texture seems to work.
+ s.state.setClearColor(s.c, .5, .5, .5, 1.0)
+ s.c.Clear(gl.COLOR_BUFFER_BIT)
+ var pixel [4]byte
+ s.c.ReadPixels(0, 0, 1, 1, gl.RGBA, gl.UNSIGNED_BYTE, pixel[:])
+ if pixel[0] == 128 { // Correct sRGB color value is ~188
+ s.c.TexImage2D(gl.TEXTURE_2D, 0, gl.RGBA, viewport.X, viewport.Y, gl.RGBA, gl.UNSIGNED_BYTE)
+ if st := s.c.CheckFramebufferStatus(gl.FRAMEBUFFER); st != gl.FRAMEBUFFER_COMPLETE {
+ return fmt.Errorf("fallback RGBA framebuffer incomplete (%dx%d), status: %#x error: %x", viewport.X, viewport.Y, st, s.c.GetError())
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *SRGBFBO) Release() {
+ s.state.deleteFramebuffer(s.c, s.fbo)
+ s.state.deleteTexture(s.c, s.tex)
+ if s.blitted {
+ s.state.deleteBuffer(s.c, s.quad)
+ s.state.deleteProgram(s.c, s.prog)
+ }
+ s.c = nil
+}
+
+const (
+ blitVSrc = `
+#version 100
+
+precision highp float;
+
+attribute vec2 pos;
+attribute vec2 uv;
+
+varying vec2 vUV;
+
+void main() {
+ gl_Position = vec4(pos, 0, 1);
+ vUV = uv;
+}
+`
+ blitFSrc = `
+#version 100
+
+precision mediump float;
+
+uniform sampler2D tex;
+varying vec2 vUV;
+
+vec3 gamma(vec3 rgb) {
+ vec3 exp = vec3(1.055)*pow(rgb, vec3(0.41666)) - vec3(0.055);
+ vec3 lin = rgb * vec3(12.92);
+ bvec3 cut = lessThan(rgb, vec3(0.0031308));
+ return vec3(cut.r ? lin.r : exp.r, cut.g ? lin.g : exp.g, cut.b ? lin.b : exp.b);
+}
+
+void main() {
+ vec4 col = texture2D(tex, vUV);
+ vec3 rgb = col.rgb;
+ rgb = gamma(rgb);
+ gl_FragColor = vec4(rgb, col.a);
+}
+`
+)
diff --git a/vendor/gioui.org/gpu/internal/vulkan/vulkan.go b/vendor/gioui.org/gpu/internal/vulkan/vulkan.go
new file mode 100644
index 0000000..f69c232
--- /dev/null
+++ b/vendor/gioui.org/gpu/internal/vulkan/vulkan.go
@@ -0,0 +1,1120 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+//go:build (linux || freebsd) && !novulkan
+// +build linux freebsd
+// +build !novulkan
+
+package vulkan
+
+import (
+ "errors"
+ "fmt"
+ "image"
+
+ "gioui.org/gpu/internal/driver"
+ "gioui.org/internal/vk"
+ "gioui.org/shader"
+)
+
+type Backend struct {
+ physDev vk.PhysicalDevice
+ dev vk.Device
+ queue vk.Queue
+ cmdPool struct {
+ current vk.CommandBuffer
+ pool vk.CommandPool
+ used int
+ buffers []vk.CommandBuffer
+ }
+ outFormat vk.Format
+ staging struct {
+ buf *Buffer
+ mem []byte
+ size int
+ cap int
+ }
+ defers []func(d vk.Device)
+ frameSig vk.Semaphore
+ waitSems []vk.Semaphore
+ waitStages []vk.PipelineStageFlags
+ sigSems []vk.Semaphore
+ fence vk.Fence
+
+ allPipes []*Pipeline
+
+ pipe *Pipeline
+
+ passes map[passKey]vk.RenderPass
+
+ // bindings and offset are temporary storage for BindVertexBuffer.
+ bindings []vk.Buffer
+ offsets []vk.DeviceSize
+
+ desc struct {
+ dirty bool
+ texBinds [texUnits]*Texture
+ bufBinds [storageUnits]*Buffer
+ }
+
+ caps driver.Features
+}
+
+type passKey struct {
+ fmt vk.Format
+ loadAct vk.AttachmentLoadOp
+ initLayout vk.ImageLayout
+ finalLayout vk.ImageLayout
+}
+
+type Texture struct {
+ backend *Backend
+ img vk.Image
+ mem vk.DeviceMemory
+ view vk.ImageView
+ sampler vk.Sampler
+ fbo vk.Framebuffer
+ format vk.Format
+ layout vk.ImageLayout
+ passLayout vk.ImageLayout
+ width int
+ height int
+ acquire vk.Semaphore
+ foreign bool
+
+ scope struct {
+ stage vk.PipelineStageFlags
+ access vk.AccessFlags
+ }
+}
+
+type Shader struct {
+ dev vk.Device
+ module vk.ShaderModule
+ pushRange vk.PushConstantRange
+ src shader.Sources
+}
+
+type Pipeline struct {
+ backend *Backend
+ pipe vk.Pipeline
+ pushRanges []vk.PushConstantRange
+ ninputs int
+ desc *descPool
+}
+
+type descPool struct {
+ layout vk.PipelineLayout
+ descLayout vk.DescriptorSetLayout
+ pool vk.DescriptorPool
+ size int
+ cap int
+ texBinds []int
+ imgBinds []int
+ bufBinds []int
+}
+
+type Buffer struct {
+ backend *Backend
+ buf vk.Buffer
+ store []byte
+ mem vk.DeviceMemory
+ usage vk.BufferUsageFlags
+
+ scope struct {
+ stage vk.PipelineStageFlags
+ access vk.AccessFlags
+ }
+}
+
+const (
+ texUnits = 4
+ storageUnits = 4
+)
+
+func init() {
+ driver.NewVulkanDevice = newVulkanDevice
+}
+
+func newVulkanDevice(api driver.Vulkan) (driver.Device, error) {
+ b := &Backend{
+ physDev: vk.PhysicalDevice(api.PhysDevice),
+ dev: vk.Device(api.Device),
+ outFormat: vk.Format(api.Format),
+ caps: driver.FeatureCompute,
+ passes: make(map[passKey]vk.RenderPass),
+ }
+ b.queue = vk.GetDeviceQueue(b.dev, api.QueueFamily, api.QueueIndex)
+ cmdPool, err := vk.CreateCommandPool(b.dev, api.QueueFamily)
+ if err != nil {
+ return nil, err
+ }
+ b.cmdPool.pool = cmdPool
+ props := vk.GetPhysicalDeviceFormatProperties(b.physDev, vk.FORMAT_R16_SFLOAT)
+ reqs := vk.FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | vk.FORMAT_FEATURE_SAMPLED_IMAGE_BIT
+ if props&reqs == reqs {
+ b.caps |= driver.FeatureFloatRenderTargets
+ }
+ reqs = vk.FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT | vk.FORMAT_FEATURE_SAMPLED_IMAGE_BIT
+ props = vk.GetPhysicalDeviceFormatProperties(b.physDev, vk.FORMAT_R8G8B8A8_SRGB)
+ if props&reqs == reqs {
+ b.caps |= driver.FeatureSRGB
+ }
+ fence, err := vk.CreateFence(b.dev)
+ if err != nil {
+ return nil, mapErr(err)
+ }
+ b.fence = fence
+ return b, nil
+}
+
+func (b *Backend) BeginFrame(target driver.RenderTarget, clear bool, viewport image.Point) driver.Texture {
+ vk.QueueWaitIdle(b.queue)
+ b.staging.size = 0
+ b.cmdPool.used = 0
+ b.runDefers()
+ b.resetPipes()
+
+ if target == nil {
+ return nil
+ }
+ switch t := target.(type) {
+ case driver.VulkanRenderTarget:
+ layout := vk.IMAGE_LAYOUT_UNDEFINED
+ if !clear {
+ layout = vk.IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
+ }
+ b.frameSig = vk.Semaphore(t.SignalSem)
+ tex := &Texture{
+ img: vk.Image(t.Image),
+ fbo: vk.Framebuffer(t.Framebuffer),
+ width: viewport.X,
+ height: viewport.Y,
+ layout: layout,
+ passLayout: vk.IMAGE_LAYOUT_PRESENT_SRC_KHR,
+ format: b.outFormat,
+ acquire: vk.Semaphore(t.WaitSem),
+ foreign: true,
+ }
+ return tex
+ case *Texture:
+ return t
+ default:
+ panic(fmt.Sprintf("vulkan: unsupported render target type: %T", t))
+ }
+}
+
+func (b *Backend) deferFunc(f func(d vk.Device)) {
+ b.defers = append(b.defers, f)
+}
+
+func (b *Backend) runDefers() {
+ for _, f := range b.defers {
+ f(b.dev)
+ }
+ b.defers = b.defers[:0]
+}
+
+func (b *Backend) resetPipes() {
+ for i := len(b.allPipes) - 1; i >= 0; i-- {
+ p := b.allPipes[i]
+ if p.pipe == 0 {
+ // Released pipeline.
+ b.allPipes = append(b.allPipes[:i], b.allPipes[:i+1]...)
+ continue
+ }
+ if p.desc.size > 0 {
+ vk.ResetDescriptorPool(b.dev, p.desc.pool)
+ p.desc.size = 0
+ }
+ }
+}
+
+func (b *Backend) EndFrame() {
+ if b.frameSig != 0 {
+ b.sigSems = append(b.sigSems, b.frameSig)
+ b.frameSig = 0
+ }
+ b.submitCmdBuf(false)
+}
+
+func (b *Backend) Caps() driver.Caps {
+ return driver.Caps{
+ MaxTextureSize: 4096,
+ Features: b.caps,
+ }
+}
+
+func (b *Backend) NewTimer() driver.Timer {
+ panic("timers not supported")
+}
+
+func (b *Backend) IsTimeContinuous() bool {
+ panic("timers not supported")
+}
+
+func (b *Backend) Release() {
+ vk.DeviceWaitIdle(b.dev)
+ if buf := b.staging.buf; buf != nil {
+ vk.UnmapMemory(b.dev, b.staging.buf.mem)
+ buf.Release()
+ }
+ b.runDefers()
+ for _, rp := range b.passes {
+ vk.DestroyRenderPass(b.dev, rp)
+ }
+ vk.DestroyFence(b.dev, b.fence)
+ vk.FreeCommandBuffers(b.dev, b.cmdPool.pool, b.cmdPool.buffers...)
+ vk.DestroyCommandPool(b.dev, b.cmdPool.pool)
+ *b = Backend{}
+}
+
+func (b *Backend) NewTexture(format driver.TextureFormat, width, height int, minFilter, magFilter driver.TextureFilter, bindings driver.BufferBinding) (driver.Texture, error) {
+ vkfmt := formatFor(format)
+ usage := vk.IMAGE_USAGE_TRANSFER_DST_BIT | vk.IMAGE_USAGE_TRANSFER_SRC_BIT
+ passLayout := vk.IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
+ if bindings&driver.BufferBindingTexture != 0 {
+ usage |= vk.IMAGE_USAGE_SAMPLED_BIT
+ passLayout = vk.IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ }
+ if bindings&driver.BufferBindingFramebuffer != 0 {
+ usage |= vk.IMAGE_USAGE_COLOR_ATTACHMENT_BIT
+ }
+ if bindings&(driver.BufferBindingShaderStorageRead|driver.BufferBindingShaderStorageWrite) != 0 {
+ usage |= vk.IMAGE_USAGE_STORAGE_BIT
+ }
+ filterFor := func(f driver.TextureFilter) vk.Filter {
+ switch minFilter {
+ case driver.FilterLinear:
+ return vk.FILTER_LINEAR
+ case driver.FilterNearest:
+ return vk.FILTER_NEAREST
+ }
+ panic("unknown filter")
+ }
+ sampler, err := vk.CreateSampler(b.dev, filterFor(minFilter), filterFor(magFilter))
+ if err != nil {
+ return nil, mapErr(err)
+ }
+ img, mem, err := vk.CreateImage(b.physDev, b.dev, vkfmt, width, height, usage)
+ if err != nil {
+ vk.DestroySampler(b.dev, sampler)
+ return nil, mapErr(err)
+ }
+ view, err := vk.CreateImageView(b.dev, img, vkfmt)
+ if err != nil {
+ vk.DestroySampler(b.dev, sampler)
+ vk.DestroyImage(b.dev, img)
+ vk.FreeMemory(b.dev, mem)
+ return nil, mapErr(err)
+ }
+ t := &Texture{backend: b, img: img, mem: mem, view: view, sampler: sampler, layout: vk.IMAGE_LAYOUT_UNDEFINED, passLayout: passLayout, width: width, height: height, format: vkfmt}
+ if bindings&driver.BufferBindingFramebuffer != 0 {
+ pass, err := vk.CreateRenderPass(b.dev, vkfmt, vk.ATTACHMENT_LOAD_OP_DONT_CARE,
+ vk.IMAGE_LAYOUT_UNDEFINED, vk.IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, nil)
+ if err != nil {
+ return nil, mapErr(err)
+ }
+ defer vk.DestroyRenderPass(b.dev, pass)
+ fbo, err := vk.CreateFramebuffer(b.dev, pass, view, width, height)
+ if err != nil {
+ return nil, mapErr(err)
+ }
+ t.fbo = fbo
+ }
+ return t, nil
+}
+
+func (b *Backend) NewBuffer(bindings driver.BufferBinding, size int) (driver.Buffer, error) {
+ if bindings&driver.BufferBindingUniforms != 0 {
+ // Implement uniform buffers as inline push constants.
+ return &Buffer{store: make([]byte, size)}, nil
+ }
+ usage := vk.BUFFER_USAGE_TRANSFER_DST_BIT | vk.BUFFER_USAGE_TRANSFER_SRC_BIT
+ if bindings&driver.BufferBindingIndices != 0 {
+ usage |= vk.BUFFER_USAGE_INDEX_BUFFER_BIT
+ }
+ if bindings&(driver.BufferBindingShaderStorageRead|driver.BufferBindingShaderStorageWrite) != 0 {
+ usage |= vk.BUFFER_USAGE_STORAGE_BUFFER_BIT
+ }
+ if bindings&driver.BufferBindingVertices != 0 {
+ usage |= vk.BUFFER_USAGE_VERTEX_BUFFER_BIT
+ }
+ buf, err := b.newBuffer(size, usage, vk.MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
+ return buf, mapErr(err)
+}
+
+func (b *Backend) newBuffer(size int, usage vk.BufferUsageFlags, props vk.MemoryPropertyFlags) (*Buffer, error) {
+ buf, mem, err := vk.CreateBuffer(b.physDev, b.dev, size, usage, props)
+ return &Buffer{backend: b, buf: buf, mem: mem, usage: usage}, err
+}
+
+func (b *Backend) NewImmutableBuffer(typ driver.BufferBinding, data []byte) (driver.Buffer, error) {
+ buf, err := b.NewBuffer(typ, len(data))
+ if err != nil {
+ return nil, err
+ }
+ buf.Upload(data)
+ return buf, nil
+}
+
+func (b *Backend) NewVertexShader(src shader.Sources) (driver.VertexShader, error) {
+ sh, err := b.newShader(src, vk.SHADER_STAGE_VERTEX_BIT)
+ return sh, mapErr(err)
+}
+
+func (b *Backend) NewFragmentShader(src shader.Sources) (driver.FragmentShader, error) {
+ sh, err := b.newShader(src, vk.SHADER_STAGE_FRAGMENT_BIT)
+ return sh, mapErr(err)
+}
+
+func (b *Backend) NewPipeline(desc driver.PipelineDesc) (driver.Pipeline, error) {
+ vs := desc.VertexShader.(*Shader)
+ fs := desc.FragmentShader.(*Shader)
+ var ranges []vk.PushConstantRange
+ if r := vs.pushRange; r != (vk.PushConstantRange{}) {
+ ranges = append(ranges, r)
+ }
+ if r := fs.pushRange; r != (vk.PushConstantRange{}) {
+ ranges = append(ranges, r)
+ }
+ descPool, err := createPipelineLayout(b.dev, fs.src, ranges)
+ if err != nil {
+ return nil, mapErr(err)
+ }
+ blend := desc.BlendDesc
+ factorFor := func(f driver.BlendFactor) vk.BlendFactor {
+ switch f {
+ case driver.BlendFactorZero:
+ return vk.BLEND_FACTOR_ZERO
+ case driver.BlendFactorOne:
+ return vk.BLEND_FACTOR_ONE
+ case driver.BlendFactorOneMinusSrcAlpha:
+ return vk.BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
+ case driver.BlendFactorDstColor:
+ return vk.BLEND_FACTOR_DST_COLOR
+ default:
+ panic("unknown blend factor")
+ }
+ }
+ var top vk.PrimitiveTopology
+ switch desc.Topology {
+ case driver.TopologyTriangles:
+ top = vk.PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
+ case driver.TopologyTriangleStrip:
+ top = vk.PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
+ default:
+ panic("unknown topology")
+ }
+ var binds []vk.VertexInputBindingDescription
+ var attrs []vk.VertexInputAttributeDescription
+ inputs := desc.VertexLayout.Inputs
+ for i, inp := range inputs {
+ binds = append(binds, vk.VertexInputBindingDescription{
+ Binding: i,
+ Stride: desc.VertexLayout.Stride,
+ })
+ attrs = append(attrs, vk.VertexInputAttributeDescription{
+ Binding: i,
+ Location: vs.src.Inputs[i].Location,
+ Format: vertFormatFor(vs.src.Inputs[i]),
+ Offset: inp.Offset,
+ })
+ }
+ fmt := b.outFormat
+ if f := desc.PixelFormat; f != driver.TextureFormatOutput {
+ fmt = formatFor(f)
+ }
+ pass, err := vk.CreateRenderPass(b.dev, fmt, vk.ATTACHMENT_LOAD_OP_DONT_CARE,
+ vk.IMAGE_LAYOUT_UNDEFINED, vk.IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, nil)
+ if err != nil {
+ return nil, mapErr(err)
+ }
+ defer vk.DestroyRenderPass(b.dev, pass)
+ pipe, err := vk.CreateGraphicsPipeline(b.dev, pass, vs.module, fs.module, blend.Enable, factorFor(blend.SrcFactor), factorFor(blend.DstFactor), top, binds, attrs, descPool.layout)
+ if err != nil {
+ descPool.release(b.dev)
+ return nil, mapErr(err)
+ }
+ p := &Pipeline{backend: b, pipe: pipe, desc: descPool, pushRanges: ranges, ninputs: len(inputs)}
+ b.allPipes = append(b.allPipes, p)
+ return p, nil
+}
+
+func (b *Backend) NewComputeProgram(src shader.Sources) (driver.Program, error) {
+ sh, err := b.newShader(src, vk.SHADER_STAGE_COMPUTE_BIT)
+ if err != nil {
+ return nil, mapErr(err)
+ }
+ defer sh.Release()
+ descPool, err := createPipelineLayout(b.dev, src, nil)
+ if err != nil {
+ return nil, mapErr(err)
+ }
+ pipe, err := vk.CreateComputePipeline(b.dev, sh.module, descPool.layout)
+ if err != nil {
+ descPool.release(b.dev)
+ return nil, mapErr(err)
+ }
+ return &Pipeline{backend: b, pipe: pipe, desc: descPool}, nil
+}
+
+func vertFormatFor(f shader.InputLocation) vk.Format {
+ t := f.Type
+ s := f.Size
+ switch {
+ case t == shader.DataTypeFloat && s == 1:
+ return vk.FORMAT_R32_SFLOAT
+ case t == shader.DataTypeFloat && s == 2:
+ return vk.FORMAT_R32G32_SFLOAT
+ case t == shader.DataTypeFloat && s == 3:
+ return vk.FORMAT_R32G32B32_SFLOAT
+ case t == shader.DataTypeFloat && s == 4:
+ return vk.FORMAT_R32G32B32A32_SFLOAT
+ default:
+ panic("unsupported data type")
+ }
+}
+
+func createPipelineLayout(d vk.Device, src shader.Sources, ranges []vk.PushConstantRange) (*descPool, error) {
+ var (
+ descLayouts []vk.DescriptorSetLayout
+ descLayout vk.DescriptorSetLayout
+ )
+ texBinds := make([]int, len(src.Textures))
+ imgBinds := make([]int, len(src.Images))
+ bufBinds := make([]int, len(src.StorageBuffers))
+ var descBinds []vk.DescriptorSetLayoutBinding
+ for i, t := range src.Textures {
+ descBinds = append(descBinds, vk.DescriptorSetLayoutBinding{
+ Binding: t.Binding,
+ StageFlags: vk.SHADER_STAGE_FRAGMENT_BIT,
+ DescriptorType: vk.DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ })
+ texBinds[i] = t.Binding
+ }
+ for i, img := range src.Images {
+ descBinds = append(descBinds, vk.DescriptorSetLayoutBinding{
+ Binding: img.Binding,
+ StageFlags: vk.SHADER_STAGE_COMPUTE_BIT,
+ DescriptorType: vk.DESCRIPTOR_TYPE_STORAGE_IMAGE,
+ })
+ imgBinds[i] = img.Binding
+ }
+ for i, buf := range src.StorageBuffers {
+ descBinds = append(descBinds, vk.DescriptorSetLayoutBinding{
+ Binding: buf.Binding,
+ StageFlags: vk.SHADER_STAGE_COMPUTE_BIT,
+ DescriptorType: vk.DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ })
+ bufBinds[i] = buf.Binding
+ }
+ if len(descBinds) > 0 {
+ var err error
+ descLayout, err = vk.CreateDescriptorSetLayout(d, descBinds)
+ if err != nil {
+ return nil, err
+ }
+ descLayouts = append(descLayouts, descLayout)
+ }
+ layout, err := vk.CreatePipelineLayout(d, ranges, descLayouts)
+ if err != nil {
+ if descLayout != 0 {
+ vk.DestroyDescriptorSetLayout(d, descLayout)
+ }
+ return nil, err
+ }
+ descPool := &descPool{
+ texBinds: texBinds,
+ bufBinds: bufBinds,
+ imgBinds: imgBinds,
+ layout: layout,
+ descLayout: descLayout,
+ }
+ return descPool, nil
+}
+
+func (b *Backend) newShader(src shader.Sources, stage vk.ShaderStageFlags) (*Shader, error) {
+ mod, err := vk.CreateShaderModule(b.dev, src.SPIRV)
+ if err != nil {
+ return nil, err
+ }
+
+ sh := &Shader{dev: b.dev, module: mod, src: src}
+ if locs := src.Uniforms.Locations; len(locs) > 0 {
+ pushOffset := 0x7fffffff
+ for _, l := range locs {
+ if l.Offset < pushOffset {
+ pushOffset = l.Offset
+ }
+ }
+ sh.pushRange = vk.BuildPushConstantRange(stage, pushOffset, src.Uniforms.Size)
+ }
+ return sh, nil
+}
+
+func (b *Backend) CopyTexture(dstTex driver.Texture, dorig image.Point, srcFBO driver.Texture, srect image.Rectangle) {
+ dst := dstTex.(*Texture)
+ src := srcFBO.(*Texture)
+ cmdBuf := b.ensureCmdBuf()
+ op := vk.BuildImageCopy(srect.Min.X, srect.Min.Y, dorig.X, dorig.Y, srect.Dx(), srect.Dy())
+ src.imageBarrier(cmdBuf,
+ vk.IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ vk.PIPELINE_STAGE_TRANSFER_BIT,
+ vk.ACCESS_TRANSFER_READ_BIT,
+ )
+ dst.imageBarrier(cmdBuf,
+ vk.IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ vk.PIPELINE_STAGE_TRANSFER_BIT,
+ vk.ACCESS_TRANSFER_WRITE_BIT,
+ )
+ vk.CmdCopyImage(cmdBuf, src.img, src.layout, dst.img, dst.layout, []vk.ImageCopy{op})
+}
+
+func (b *Backend) Viewport(x, y, width, height int) {
+ cmdBuf := b.currentCmdBuf()
+ vp := vk.BuildViewport(float32(x), float32(y), float32(width), float32(height))
+ vk.CmdSetViewport(cmdBuf, 0, vp)
+}
+
+func (b *Backend) DrawArrays(off, count int) {
+ cmdBuf := b.currentCmdBuf()
+ if b.desc.dirty {
+ b.pipe.desc.bindDescriptorSet(b, cmdBuf, vk.PIPELINE_BIND_POINT_GRAPHICS, b.desc.texBinds, b.desc.bufBinds)
+ b.desc.dirty = false
+ }
+ vk.CmdDraw(cmdBuf, count, 1, off, 0)
+}
+
+func (b *Backend) DrawElements(off, count int) {
+ cmdBuf := b.currentCmdBuf()
+ if b.desc.dirty {
+ b.pipe.desc.bindDescriptorSet(b, cmdBuf, vk.PIPELINE_BIND_POINT_GRAPHICS, b.desc.texBinds, b.desc.bufBinds)
+ b.desc.dirty = false
+ }
+ vk.CmdDrawIndexed(cmdBuf, count, 1, off, 0, 0)
+}
+
+func (b *Backend) BindImageTexture(unit int, tex driver.Texture) {
+ t := tex.(*Texture)
+ b.desc.texBinds[unit] = t
+ b.desc.dirty = true
+ t.imageBarrier(b.currentCmdBuf(),
+ vk.IMAGE_LAYOUT_GENERAL,
+ vk.PIPELINE_STAGE_COMPUTE_SHADER_BIT,
+ vk.ACCESS_SHADER_READ_BIT|vk.ACCESS_SHADER_WRITE_BIT,
+ )
+}
+
+func (b *Backend) DispatchCompute(x, y, z int) {
+ cmdBuf := b.currentCmdBuf()
+ if b.desc.dirty {
+ b.pipe.desc.bindDescriptorSet(b, cmdBuf, vk.PIPELINE_BIND_POINT_COMPUTE, b.desc.texBinds, b.desc.bufBinds)
+ b.desc.dirty = false
+ }
+ vk.CmdDispatch(cmdBuf, x, y, z)
+}
+
+func (t *Texture) Upload(offset, size image.Point, pixels []byte, stride int) {
+ if stride == 0 {
+ stride = size.X * 4
+ }
+ cmdBuf := t.backend.ensureCmdBuf()
+ dstStride := size.X * 4
+ n := size.Y * dstStride
+ stage, mem, off := t.backend.stagingBuffer(n)
+ var srcOff, dstOff int
+ for y := 0; y < size.Y; y++ {
+ srcRow := pixels[srcOff : srcOff+dstStride]
+ dstRow := mem[dstOff : dstOff+dstStride]
+ copy(dstRow, srcRow)
+ dstOff += dstStride
+ srcOff += stride
+ }
+ op := vk.BuildBufferImageCopy(off, dstStride/4, offset.X, offset.Y, size.X, size.Y)
+ t.imageBarrier(cmdBuf,
+ vk.IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ vk.PIPELINE_STAGE_TRANSFER_BIT,
+ vk.ACCESS_TRANSFER_WRITE_BIT,
+ )
+ vk.CmdCopyBufferToImage(cmdBuf, stage.buf, t.img, t.layout, op)
+}
+
+func (t *Texture) Release() {
+ if t.foreign {
+ panic("external textures cannot be released")
+ }
+ freet := *t
+ t.backend.deferFunc(func(d vk.Device) {
+ if freet.fbo != 0 {
+ vk.DestroyFramebuffer(d, freet.fbo)
+ }
+ vk.DestroySampler(d, freet.sampler)
+ vk.DestroyImageView(d, freet.view)
+ vk.DestroyImage(d, freet.img)
+ vk.FreeMemory(d, freet.mem)
+ })
+ *t = Texture{}
+}
+
+func (p *Pipeline) Release() {
+ freep := *p
+ p.backend.deferFunc(func(d vk.Device) {
+ freep.desc.release(d)
+ vk.DestroyPipeline(d, freep.pipe)
+ })
+ *p = Pipeline{}
+}
+
+func (p *descPool) release(d vk.Device) {
+ if p := p.pool; p != 0 {
+ vk.DestroyDescriptorPool(d, p)
+ }
+ if l := p.descLayout; l != 0 {
+ vk.DestroyDescriptorSetLayout(d, l)
+ }
+ vk.DestroyPipelineLayout(d, p.layout)
+}
+
+func (p *descPool) bindDescriptorSet(b *Backend, cmdBuf vk.CommandBuffer, bindPoint vk.PipelineBindPoint, texBinds [texUnits]*Texture, bufBinds [storageUnits]*Buffer) {
+ realloced := false
+ destroyPool := func() {
+ pool := p.pool
+ b.deferFunc(func(d vk.Device) {
+ vk.DestroyDescriptorPool(d, pool)
+ })
+ p.pool = 0
+ p.cap = 0
+ }
+ for {
+ if p.size == p.cap {
+ if realloced {
+ panic("vulkan: vkAllocateDescriptorSet failed on a newly allocated descriptor pool")
+ }
+ destroyPool()
+ realloced = true
+ newCap := p.cap * 2
+ const initialPoolSize = 100
+ if newCap < initialPoolSize {
+ newCap = initialPoolSize
+ }
+ var poolSizes []vk.DescriptorPoolSize
+ if n := len(p.texBinds); n > 0 {
+ poolSizes = append(poolSizes, vk.BuildDescriptorPoolSize(vk.DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, newCap*n))
+ }
+ if n := len(p.imgBinds); n > 0 {
+ poolSizes = append(poolSizes, vk.BuildDescriptorPoolSize(vk.DESCRIPTOR_TYPE_STORAGE_IMAGE, newCap*n))
+ }
+ if n := len(p.bufBinds); n > 0 {
+ poolSizes = append(poolSizes, vk.BuildDescriptorPoolSize(vk.DESCRIPTOR_TYPE_STORAGE_BUFFER, newCap*n))
+ }
+ pool, err := vk.CreateDescriptorPool(b.dev, newCap, poolSizes)
+ if err != nil {
+ panic(fmt.Errorf("vulkan: failed to allocate descriptor pool with %d descriptors", newCap))
+ }
+ p.pool = pool
+ p.cap = newCap
+ p.size = 0
+ }
+ l := p.descLayout
+ if l == 0 {
+ panic("vulkan: descriptor set is dirty, but pipeline has empty layout")
+ }
+ descSet, err := vk.AllocateDescriptorSet(b.dev, p.pool, l)
+ if err != nil {
+ destroyPool()
+ continue
+ }
+ p.size++
+ for _, bind := range p.texBinds {
+ tex := texBinds[bind]
+ write := vk.BuildWriteDescriptorSetImage(descSet, bind, vk.DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, tex.sampler, tex.view, vk.IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
+ vk.UpdateDescriptorSet(b.dev, write)
+ }
+ for _, bind := range p.imgBinds {
+ tex := texBinds[bind]
+ write := vk.BuildWriteDescriptorSetImage(descSet, bind, vk.DESCRIPTOR_TYPE_STORAGE_IMAGE, 0, tex.view, vk.IMAGE_LAYOUT_GENERAL)
+ vk.UpdateDescriptorSet(b.dev, write)
+ }
+ for _, bind := range p.bufBinds {
+ buf := bufBinds[bind]
+ write := vk.BuildWriteDescriptorSetBuffer(descSet, bind, vk.DESCRIPTOR_TYPE_STORAGE_BUFFER, buf.buf)
+ vk.UpdateDescriptorSet(b.dev, write)
+ }
+ vk.CmdBindDescriptorSets(cmdBuf, bindPoint, p.layout, 0, []vk.DescriptorSet{descSet})
+ break
+ }
+}
+
+func (t *Texture) imageBarrier(cmdBuf vk.CommandBuffer, layout vk.ImageLayout, stage vk.PipelineStageFlags, access vk.AccessFlags) {
+ srcStage := t.scope.stage
+ if srcStage == 0 && t.layout == layout {
+ t.scope.stage = stage
+ t.scope.access = access
+ return
+ }
+ if srcStage == 0 {
+ srcStage = vk.PIPELINE_STAGE_TOP_OF_PIPE_BIT
+ }
+ b := vk.BuildImageMemoryBarrier(
+ t.img,
+ t.scope.access, access,
+ t.layout, layout,
+ )
+ vk.CmdPipelineBarrier(cmdBuf, srcStage, stage, vk.DEPENDENCY_BY_REGION_BIT, nil, nil, []vk.ImageMemoryBarrier{b})
+ t.layout = layout
+ t.scope.stage = stage
+ t.scope.access = access
+}
+
+func (b *Backend) PrepareTexture(tex driver.Texture) {
+ t := tex.(*Texture)
+ cmdBuf := b.ensureCmdBuf()
+ t.imageBarrier(cmdBuf,
+ vk.IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ vk.PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
+ vk.ACCESS_SHADER_READ_BIT,
+ )
+}
+
+func (b *Backend) BindTexture(unit int, tex driver.Texture) {
+ t := tex.(*Texture)
+ b.desc.texBinds[unit] = t
+ b.desc.dirty = true
+}
+
+func (b *Backend) BindPipeline(pipe driver.Pipeline) {
+ b.bindPipeline(pipe.(*Pipeline), vk.PIPELINE_BIND_POINT_GRAPHICS)
+}
+
+func (b *Backend) BindProgram(prog driver.Program) {
+ b.bindPipeline(prog.(*Pipeline), vk.PIPELINE_BIND_POINT_COMPUTE)
+}
+
+func (b *Backend) bindPipeline(p *Pipeline, point vk.PipelineBindPoint) {
+ b.pipe = p
+ b.desc.dirty = p.desc.descLayout != 0
+ cmdBuf := b.currentCmdBuf()
+ vk.CmdBindPipeline(cmdBuf, point, p.pipe)
+}
+
+func (s *Shader) Release() {
+ vk.DestroyShaderModule(s.dev, s.module)
+ *s = Shader{}
+}
+
+func (b *Backend) BindStorageBuffer(binding int, buffer driver.Buffer) {
+ buf := buffer.(*Buffer)
+ b.desc.bufBinds[binding] = buf
+ b.desc.dirty = true
+ buf.barrier(b.currentCmdBuf(),
+ vk.PIPELINE_STAGE_COMPUTE_SHADER_BIT,
+ vk.ACCESS_SHADER_READ_BIT|vk.ACCESS_SHADER_WRITE_BIT,
+ )
+}
+
+func (b *Backend) BindUniforms(buffer driver.Buffer) {
+ buf := buffer.(*Buffer)
+ cmdBuf := b.currentCmdBuf()
+ for _, s := range b.pipe.pushRanges {
+ off := s.Offset()
+ vk.CmdPushConstants(cmdBuf, b.pipe.desc.layout, s.StageFlags(), off, buf.store[off:off+s.Size()])
+ }
+}
+
+func (b *Backend) BindVertexBuffer(buffer driver.Buffer, offset int) {
+ buf := buffer.(*Buffer)
+ cmdBuf := b.currentCmdBuf()
+ b.bindings = b.bindings[:0]
+ b.offsets = b.offsets[:0]
+ for i := 0; i < b.pipe.ninputs; i++ {
+ b.bindings = append(b.bindings, buf.buf)
+ b.offsets = append(b.offsets, vk.DeviceSize(offset))
+ }
+ vk.CmdBindVertexBuffers(cmdBuf, 0, b.bindings, b.offsets)
+}
+
+func (b *Backend) BindIndexBuffer(buffer driver.Buffer) {
+ buf := buffer.(*Buffer)
+ cmdBuf := b.currentCmdBuf()
+ vk.CmdBindIndexBuffer(cmdBuf, buf.buf, 0, vk.INDEX_TYPE_UINT16)
+}
+
+func (b *Buffer) Download(data []byte) error {
+ if b.buf == 0 {
+ copy(data, b.store)
+ return nil
+ }
+ stage, mem, off := b.backend.stagingBuffer(len(data))
+ cmdBuf := b.backend.ensureCmdBuf()
+ b.barrier(cmdBuf,
+ vk.PIPELINE_STAGE_TRANSFER_BIT,
+ vk.ACCESS_TRANSFER_READ_BIT,
+ )
+ vk.CmdCopyBuffer(cmdBuf, b.buf, stage.buf, 0, off, len(data))
+ stage.scope.stage = vk.PIPELINE_STAGE_TRANSFER_BIT
+ stage.scope.access = vk.ACCESS_TRANSFER_WRITE_BIT
+ stage.barrier(cmdBuf,
+ vk.PIPELINE_STAGE_HOST_BIT,
+ vk.ACCESS_HOST_READ_BIT,
+ )
+ b.backend.submitCmdBuf(true)
+ copy(data, mem)
+ return nil
+}
+
+func (b *Buffer) Upload(data []byte) {
+ if b.buf == 0 {
+ copy(b.store, data)
+ return
+ }
+ stage, mem, off := b.backend.stagingBuffer(len(data))
+ copy(mem, data)
+ cmdBuf := b.backend.ensureCmdBuf()
+ b.barrier(cmdBuf,
+ vk.PIPELINE_STAGE_TRANSFER_BIT,
+ vk.ACCESS_TRANSFER_WRITE_BIT,
+ )
+ vk.CmdCopyBuffer(cmdBuf, stage.buf, b.buf, off, 0, len(data))
+ var access vk.AccessFlags
+ if b.usage&vk.BUFFER_USAGE_INDEX_BUFFER_BIT != 0 {
+ access |= vk.ACCESS_INDEX_READ_BIT
+ }
+ if b.usage&vk.BUFFER_USAGE_VERTEX_BUFFER_BIT != 0 {
+ access |= vk.ACCESS_VERTEX_ATTRIBUTE_READ_BIT
+ }
+ if access != 0 {
+ b.barrier(cmdBuf,
+ vk.PIPELINE_STAGE_VERTEX_INPUT_BIT,
+ access,
+ )
+ }
+}
+
+func (b *Buffer) barrier(cmdBuf vk.CommandBuffer, stage vk.PipelineStageFlags, access vk.AccessFlags) {
+ srcStage := b.scope.stage
+ if srcStage == 0 {
+ b.scope.stage = stage
+ b.scope.access = access
+ return
+ }
+ barrier := vk.BuildBufferMemoryBarrier(
+ b.buf,
+ b.scope.access, access,
+ )
+ vk.CmdPipelineBarrier(cmdBuf, srcStage, stage, vk.DEPENDENCY_BY_REGION_BIT, nil, []vk.BufferMemoryBarrier{barrier}, nil)
+ b.scope.stage = stage
+ b.scope.access = access
+}
+
+func (b *Buffer) Release() {
+ freeb := *b
+ if freeb.buf != 0 {
+ b.backend.deferFunc(func(d vk.Device) {
+ vk.DestroyBuffer(d, freeb.buf)
+ vk.FreeMemory(d, freeb.mem)
+ })
+ }
+ *b = Buffer{}
+}
+
+func (t *Texture) ReadPixels(src image.Rectangle, pixels []byte, stride int) error {
+ if len(pixels) == 0 {
+ return nil
+ }
+ sz := src.Size()
+ stageStride := sz.X * 4
+ n := sz.Y * stageStride
+ stage, mem, off := t.backend.stagingBuffer(n)
+ cmdBuf := t.backend.ensureCmdBuf()
+ region := vk.BuildBufferImageCopy(off, stageStride/4, src.Min.X, src.Min.Y, sz.X, sz.Y)
+ t.imageBarrier(cmdBuf,
+ vk.IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ vk.PIPELINE_STAGE_TRANSFER_BIT,
+ vk.ACCESS_TRANSFER_READ_BIT,
+ )
+ vk.CmdCopyImageToBuffer(cmdBuf, t.img, t.layout, stage.buf, []vk.BufferImageCopy{region})
+ stage.scope.stage = vk.PIPELINE_STAGE_TRANSFER_BIT
+ stage.scope.access = vk.ACCESS_TRANSFER_WRITE_BIT
+ stage.barrier(cmdBuf,
+ vk.PIPELINE_STAGE_HOST_BIT,
+ vk.ACCESS_HOST_READ_BIT,
+ )
+ t.backend.submitCmdBuf(true)
+ var srcOff, dstOff int
+ for y := 0; y < sz.Y; y++ {
+ dstRow := pixels[srcOff : srcOff+stageStride]
+ srcRow := mem[dstOff : dstOff+stageStride]
+ copy(dstRow, srcRow)
+ dstOff += stageStride
+ srcOff += stride
+ }
+ return nil
+}
+
+func (b *Backend) currentCmdBuf() vk.CommandBuffer {
+ cur := b.cmdPool.current
+ if cur == nil {
+ panic("vulkan: invalid operation outside a render or compute pass")
+ }
+ return cur
+}
+
+func (b *Backend) ensureCmdBuf() vk.CommandBuffer {
+ if b.cmdPool.current != nil {
+ return b.cmdPool.current
+ }
+ if b.cmdPool.used < len(b.cmdPool.buffers) {
+ buf := b.cmdPool.buffers[b.cmdPool.used]
+ b.cmdPool.current = buf
+ } else {
+ buf, err := vk.AllocateCommandBuffer(b.dev, b.cmdPool.pool)
+ if err != nil {
+ panic(err)
+ }
+ b.cmdPool.buffers = append(b.cmdPool.buffers, buf)
+ b.cmdPool.current = buf
+ }
+ b.cmdPool.used++
+ buf := b.cmdPool.current
+ if err := vk.BeginCommandBuffer(buf); err != nil {
+ panic(err)
+ }
+ return buf
+}
+
+func (b *Backend) BeginRenderPass(tex driver.Texture, d driver.LoadDesc) {
+ t := tex.(*Texture)
+ var vkop vk.AttachmentLoadOp
+ switch d.Action {
+ case driver.LoadActionClear:
+ vkop = vk.ATTACHMENT_LOAD_OP_CLEAR
+ case driver.LoadActionInvalidate:
+ vkop = vk.ATTACHMENT_LOAD_OP_DONT_CARE
+ case driver.LoadActionKeep:
+ vkop = vk.ATTACHMENT_LOAD_OP_LOAD
+ }
+ cmdBuf := b.ensureCmdBuf()
+ if sem := t.acquire; sem != 0 {
+ // The render pass targets a framebuffer that has an associated acquire semaphore.
+ // Wait for it by forming an execution barrier.
+ b.waitSems = append(b.waitSems, sem)
+ b.waitStages = append(b.waitStages, vk.PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)
+ // But only for the first pass in a frame.
+ t.acquire = 0
+ }
+ t.imageBarrier(cmdBuf,
+ vk.IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ vk.PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
+ vk.ACCESS_COLOR_ATTACHMENT_READ_BIT|vk.ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
+ )
+ pass := b.lookupPass(t.format, vkop, t.layout, t.passLayout)
+ col := d.ClearColor
+ vk.CmdBeginRenderPass(cmdBuf, pass, t.fbo, t.width, t.height, [4]float32{col.R, col.G, col.B, col.A})
+ t.layout = t.passLayout
+ // If the render pass describes an automatic image layout transition to its final layout, there
+ // is an implicit image barrier with destination PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT. Make
+ // sure any subsequent barrier includes the transition.
+ // See also https://www.khronos.org/registry/vulkan/specs/1.0/html/vkspec.html#VkSubpassDependency.
+ t.scope.stage |= vk.PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
+}
+
+func (b *Backend) EndRenderPass() {
+ vk.CmdEndRenderPass(b.cmdPool.current)
+}
+
+func (b *Backend) BeginCompute() {
+ b.ensureCmdBuf()
+}
+
+func (b *Backend) EndCompute() {
+}
+
+func (b *Backend) lookupPass(fmt vk.Format, loadAct vk.AttachmentLoadOp, initLayout, finalLayout vk.ImageLayout) vk.RenderPass {
+ key := passKey{fmt: fmt, loadAct: loadAct, initLayout: initLayout, finalLayout: finalLayout}
+ if pass, ok := b.passes[key]; ok {
+ return pass
+ }
+ pass, err := vk.CreateRenderPass(b.dev, fmt, loadAct, initLayout, finalLayout, nil)
+ if err != nil {
+ panic(err)
+ }
+ b.passes[key] = pass
+ return pass
+}
+
+func (b *Backend) submitCmdBuf(sync bool) {
+ buf := b.cmdPool.current
+ if buf == nil {
+ return
+ }
+ b.cmdPool.current = nil
+ if err := vk.EndCommandBuffer(buf); err != nil {
+ panic(err)
+ }
+ var fence vk.Fence
+ if sync {
+ fence = b.fence
+ }
+ if err := vk.QueueSubmit(b.queue, buf, b.waitSems, b.waitStages, b.sigSems, fence); err != nil {
+ panic(err)
+ }
+ b.waitSems = b.waitSems[:0]
+ b.sigSems = b.sigSems[:0]
+ b.waitStages = b.waitStages[:0]
+ if sync {
+ vk.WaitForFences(b.dev, b.fence)
+ vk.ResetFences(b.dev, b.fence)
+ }
+}
+
+func (b *Backend) stagingBuffer(size int) (*Buffer, []byte, int) {
+ if b.staging.size+size > b.staging.cap {
+ if b.staging.buf != nil {
+ vk.UnmapMemory(b.dev, b.staging.buf.mem)
+ b.staging.buf.Release()
+ b.staging.cap = 0
+ }
+ cap := 2 * (b.staging.size + size)
+ buf, err := b.newBuffer(cap, vk.BUFFER_USAGE_TRANSFER_SRC_BIT|vk.BUFFER_USAGE_TRANSFER_DST_BIT,
+ vk.MEMORY_PROPERTY_HOST_VISIBLE_BIT|vk.MEMORY_PROPERTY_HOST_COHERENT_BIT)
+ if err != nil {
+ panic(err)
+ }
+ mem, err := vk.MapMemory(b.dev, buf.mem, 0, cap)
+ if err != nil {
+ buf.Release()
+ panic(err)
+ }
+ b.staging.buf = buf
+ b.staging.mem = mem
+ b.staging.size = 0
+ b.staging.cap = cap
+ }
+ off := b.staging.size
+ b.staging.size += size
+ mem := b.staging.mem[off : off+size]
+ return b.staging.buf, mem, off
+}
+
+func formatFor(format driver.TextureFormat) vk.Format {
+ switch format {
+ case driver.TextureFormatRGBA8:
+ return vk.FORMAT_R8G8B8A8_UNORM
+ case driver.TextureFormatSRGBA:
+ return vk.FORMAT_R8G8B8A8_SRGB
+ case driver.TextureFormatFloat:
+ return vk.FORMAT_R16_SFLOAT
+ default:
+ panic("unsupported texture format")
+ }
+}
+
+func mapErr(err error) error {
+ var vkErr vk.Error
+ if errors.As(err, &vkErr) && vkErr == vk.ERROR_DEVICE_LOST {
+ return driver.ErrDeviceLost
+ }
+ return err
+}
+
+func (f *Texture) ImplementsRenderTarget() {}
diff --git a/vendor/gioui.org/gpu/internal/vulkan/vulkan_nosupport.go b/vendor/gioui.org/gpu/internal/vulkan/vulkan_nosupport.go
new file mode 100644
index 0000000..4364a43
--- /dev/null
+++ b/vendor/gioui.org/gpu/internal/vulkan/vulkan_nosupport.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package vulkan
+
+// Empty file to avoid the build error for platforms without Vulkan support.
diff --git a/vendor/gioui.org/gpu/pack.go b/vendor/gioui.org/gpu/pack.go
new file mode 100644
index 0000000..3f8c925
--- /dev/null
+++ b/vendor/gioui.org/gpu/pack.go
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package gpu
+
+import (
+ "image"
+)
+
+// packer packs a set of many smaller rectangles into
+// much fewer larger atlases.
+type packer struct {
+ maxDims image.Point
+ spaces []image.Rectangle
+
+ sizes []image.Point
+ pos image.Point
+}
+
+type placement struct {
+ Idx int
+ Pos image.Point
+}
+
+// add adds the given rectangle to the atlases and
+// return the allocated position.
+func (p *packer) add(s image.Point) (placement, bool) {
+ if place, ok := p.tryAdd(s); ok {
+ return place, true
+ }
+ p.newPage()
+ return p.tryAdd(s)
+}
+
+func (p *packer) clear() {
+ p.sizes = p.sizes[:0]
+ p.spaces = p.spaces[:0]
+}
+
+func (p *packer) newPage() {
+ p.pos = image.Point{}
+ p.sizes = append(p.sizes, image.Point{})
+ p.spaces = p.spaces[:0]
+ p.spaces = append(p.spaces, image.Rectangle{
+ Max: image.Point{X: 1e6, Y: 1e6},
+ })
+}
+
+func (p *packer) tryAdd(s image.Point) (placement, bool) {
+ var (
+ bestIdx = -1
+ bestSpace image.Rectangle
+ bestSize = p.maxDims
+ )
+ // Go backwards to prioritize smaller spaces.
+ for i, space := range p.spaces {
+ rightSpace := space.Dx() - s.X
+ bottomSpace := space.Dy() - s.Y
+ if rightSpace < 0 || bottomSpace < 0 {
+ continue
+ }
+ idx := len(p.sizes) - 1
+ size := p.sizes[idx]
+ if x := space.Min.X + s.X; x > size.X {
+ if x > p.maxDims.X {
+ continue
+ }
+ size.X = x
+ }
+ if y := space.Min.Y + s.Y; y > size.Y {
+ if y > p.maxDims.Y {
+ continue
+ }
+ size.Y = y
+ }
+ if size.X*size.Y < bestSize.X*bestSize.Y {
+ bestIdx = i
+ bestSpace = space
+ bestSize = size
+ }
+ }
+ if bestIdx == -1 {
+ return placement{}, false
+ }
+ // Remove space.
+ p.spaces[bestIdx] = p.spaces[len(p.spaces)-1]
+ p.spaces = p.spaces[:len(p.spaces)-1]
+ // Put s in the top left corner and add the (at most)
+ // two smaller spaces.
+ pos := bestSpace.Min
+ if rem := bestSpace.Dy() - s.Y; rem > 0 {
+ p.spaces = append(p.spaces, image.Rectangle{
+ Min: image.Point{X: pos.X, Y: pos.Y + s.Y},
+ Max: image.Point{X: bestSpace.Max.X, Y: bestSpace.Max.Y},
+ })
+ }
+ if rem := bestSpace.Dx() - s.X; rem > 0 {
+ p.spaces = append(p.spaces, image.Rectangle{
+ Min: image.Point{X: pos.X + s.X, Y: pos.Y},
+ Max: image.Point{X: bestSpace.Max.X, Y: pos.Y + s.Y},
+ })
+ }
+ idx := len(p.sizes) - 1
+ p.sizes[idx] = bestSize
+ return placement{Idx: idx, Pos: pos}, true
+}
diff --git a/vendor/gioui.org/gpu/path.go b/vendor/gioui.org/gpu/path.go
new file mode 100644
index 0000000..5fd73cf
--- /dev/null
+++ b/vendor/gioui.org/gpu/path.go
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package gpu
+
+// GPU accelerated path drawing using the algorithms from
+// Pathfinder (https://github.com/servo/pathfinder).
+
+import (
+ "encoding/binary"
+ "image"
+ "math"
+ "unsafe"
+
+ "gioui.org/f32"
+ "gioui.org/gpu/internal/driver"
+ "gioui.org/internal/byteslice"
+ "gioui.org/internal/f32color"
+ "gioui.org/shader"
+ "gioui.org/shader/gio"
+)
+
+type pather struct {
+ ctx driver.Device
+
+ viewport image.Point
+
+ stenciler *stenciler
+ coverer *coverer
+}
+
+type coverer struct {
+ ctx driver.Device
+ pipelines [3]*pipeline
+ texUniforms *coverTexUniforms
+ colUniforms *coverColUniforms
+ linearGradientUniforms *coverLinearGradientUniforms
+}
+
+type coverTexUniforms struct {
+ coverUniforms
+ _ [12]byte // Padding to multiple of 16.
+}
+
+type coverColUniforms struct {
+ coverUniforms
+ _ [128 - unsafe.Sizeof(coverUniforms{}) - unsafe.Sizeof(colorUniforms{})]byte // Padding to 128 bytes.
+ colorUniforms
+}
+
+type coverLinearGradientUniforms struct {
+ coverUniforms
+ _ [128 - unsafe.Sizeof(coverUniforms{}) - unsafe.Sizeof(gradientUniforms{})]byte // Padding to 128.
+ gradientUniforms
+}
+
+type coverUniforms struct {
+ transform [4]float32
+ uvCoverTransform [4]float32
+ uvTransformR1 [4]float32
+ uvTransformR2 [4]float32
+ z float32
+}
+
+type stenciler struct {
+ ctx driver.Device
+ pipeline struct {
+ pipeline *pipeline
+ uniforms *stencilUniforms
+ }
+ ipipeline struct {
+ pipeline *pipeline
+ uniforms *intersectUniforms
+ }
+ fbos fboSet
+ intersections fboSet
+ indexBuf driver.Buffer
+}
+
+type stencilUniforms struct {
+ transform [4]float32
+ pathOffset [2]float32
+ _ [8]byte // Padding to multiple of 16.
+}
+
+type intersectUniforms struct {
+ vert struct {
+ uvTransform [4]float32
+ subUVTransform [4]float32
+ }
+}
+
+type fboSet struct {
+ fbos []stencilFBO
+}
+
+type stencilFBO struct {
+ size image.Point
+ tex driver.Texture
+}
+
+type pathData struct {
+ ncurves int
+ data driver.Buffer
+}
+
+// vertex data suitable for passing to vertex programs.
+type vertex struct {
+ // Corner encodes the corner: +0.5 for south, +.25 for east.
+ Corner float32
+ MaxY float32
+ FromX, FromY float32
+ CtrlX, CtrlY float32
+ ToX, ToY float32
+}
+
+func (v vertex) encode(d []byte, maxy uint32) {
+ bo := binary.LittleEndian
+ bo.PutUint32(d[0:], math.Float32bits(v.Corner))
+ bo.PutUint32(d[4:], maxy)
+ bo.PutUint32(d[8:], math.Float32bits(v.FromX))
+ bo.PutUint32(d[12:], math.Float32bits(v.FromY))
+ bo.PutUint32(d[16:], math.Float32bits(v.CtrlX))
+ bo.PutUint32(d[20:], math.Float32bits(v.CtrlY))
+ bo.PutUint32(d[24:], math.Float32bits(v.ToX))
+ bo.PutUint32(d[28:], math.Float32bits(v.ToY))
+}
+
+const (
+ // Number of path quads per draw batch.
+ pathBatchSize = 10000
+ // Size of a vertex as sent to gpu
+ vertStride = 8 * 4
+)
+
+func newPather(ctx driver.Device) *pather {
+ return &pather{
+ ctx: ctx,
+ stenciler: newStenciler(ctx),
+ coverer: newCoverer(ctx),
+ }
+}
+
+func newCoverer(ctx driver.Device) *coverer {
+ c := &coverer{
+ ctx: ctx,
+ }
+ c.colUniforms = new(coverColUniforms)
+ c.texUniforms = new(coverTexUniforms)
+ c.linearGradientUniforms = new(coverLinearGradientUniforms)
+ pipelines, err := createColorPrograms(ctx, gio.Shader_cover_vert, gio.Shader_cover_frag,
+ [3]interface{}{c.colUniforms, c.linearGradientUniforms, c.texUniforms},
+ )
+ if err != nil {
+ panic(err)
+ }
+ c.pipelines = pipelines
+ return c
+}
+
+func newStenciler(ctx driver.Device) *stenciler {
+ // Allocate a suitably large index buffer for drawing paths.
+ indices := make([]uint16, pathBatchSize*6)
+ for i := 0; i < pathBatchSize; i++ {
+ i := uint16(i)
+ indices[i*6+0] = i*4 + 0
+ indices[i*6+1] = i*4 + 1
+ indices[i*6+2] = i*4 + 2
+ indices[i*6+3] = i*4 + 2
+ indices[i*6+4] = i*4 + 1
+ indices[i*6+5] = i*4 + 3
+ }
+ indexBuf, err := ctx.NewImmutableBuffer(driver.BufferBindingIndices, byteslice.Slice(indices))
+ if err != nil {
+ panic(err)
+ }
+ progLayout := driver.VertexLayout{
+ Inputs: []driver.InputDesc{
+ {Type: shader.DataTypeFloat, Size: 1, Offset: int(unsafe.Offsetof((*(*vertex)(nil)).Corner))},
+ {Type: shader.DataTypeFloat, Size: 1, Offset: int(unsafe.Offsetof((*(*vertex)(nil)).MaxY))},
+ {Type: shader.DataTypeFloat, Size: 2, Offset: int(unsafe.Offsetof((*(*vertex)(nil)).FromX))},
+ {Type: shader.DataTypeFloat, Size: 2, Offset: int(unsafe.Offsetof((*(*vertex)(nil)).CtrlX))},
+ {Type: shader.DataTypeFloat, Size: 2, Offset: int(unsafe.Offsetof((*(*vertex)(nil)).ToX))},
+ },
+ Stride: vertStride,
+ }
+ iprogLayout := driver.VertexLayout{
+ Inputs: []driver.InputDesc{
+ {Type: shader.DataTypeFloat, Size: 2, Offset: 0},
+ {Type: shader.DataTypeFloat, Size: 2, Offset: 4 * 2},
+ },
+ Stride: 4 * 4,
+ }
+ st := &stenciler{
+ ctx: ctx,
+ indexBuf: indexBuf,
+ }
+ vsh, fsh, err := newShaders(ctx, gio.Shader_stencil_vert, gio.Shader_stencil_frag)
+ if err != nil {
+ panic(err)
+ }
+ defer vsh.Release()
+ defer fsh.Release()
+ st.pipeline.uniforms = new(stencilUniforms)
+ vertUniforms := newUniformBuffer(ctx, st.pipeline.uniforms)
+ pipe, err := st.ctx.NewPipeline(driver.PipelineDesc{
+ VertexShader: vsh,
+ FragmentShader: fsh,
+ VertexLayout: progLayout,
+ BlendDesc: driver.BlendDesc{
+ Enable: true,
+ SrcFactor: driver.BlendFactorOne,
+ DstFactor: driver.BlendFactorOne,
+ },
+ PixelFormat: driver.TextureFormatFloat,
+ Topology: driver.TopologyTriangles,
+ })
+ st.pipeline.pipeline = &pipeline{pipe, vertUniforms}
+ if err != nil {
+ panic(err)
+ }
+ vsh, fsh, err = newShaders(ctx, gio.Shader_intersect_vert, gio.Shader_intersect_frag)
+ if err != nil {
+ panic(err)
+ }
+ defer vsh.Release()
+ defer fsh.Release()
+ st.ipipeline.uniforms = new(intersectUniforms)
+ vertUniforms = newUniformBuffer(ctx, &st.ipipeline.uniforms.vert)
+ ipipe, err := st.ctx.NewPipeline(driver.PipelineDesc{
+ VertexShader: vsh,
+ FragmentShader: fsh,
+ VertexLayout: iprogLayout,
+ BlendDesc: driver.BlendDesc{
+ Enable: true,
+ SrcFactor: driver.BlendFactorDstColor,
+ DstFactor: driver.BlendFactorZero,
+ },
+ PixelFormat: driver.TextureFormatFloat,
+ Topology: driver.TopologyTriangleStrip,
+ })
+ st.ipipeline.pipeline = &pipeline{ipipe, vertUniforms}
+ return st
+}
+
+func (s *fboSet) resize(ctx driver.Device, sizes []image.Point) {
+ // Add fbos.
+ for i := len(s.fbos); i < len(sizes); i++ {
+ s.fbos = append(s.fbos, stencilFBO{})
+ }
+ // Resize fbos.
+ for i, sz := range sizes {
+ f := &s.fbos[i]
+ // Resizing or recreating FBOs can introduce rendering stalls.
+ // Avoid if the space waste is not too high.
+ resize := sz.X > f.size.X || sz.Y > f.size.Y
+ waste := float32(sz.X*sz.Y) / float32(f.size.X*f.size.Y)
+ resize = resize || waste > 1.2
+ if resize {
+ if f.tex != nil {
+ f.tex.Release()
+ }
+ tex, err := ctx.NewTexture(driver.TextureFormatFloat, sz.X, sz.Y, driver.FilterNearest, driver.FilterNearest,
+ driver.BufferBindingTexture|driver.BufferBindingFramebuffer)
+ if err != nil {
+ panic(err)
+ }
+ f.size = sz
+ f.tex = tex
+ }
+ }
+ // Delete extra fbos.
+ s.delete(ctx, len(sizes))
+}
+
+func (s *fboSet) delete(ctx driver.Device, idx int) {
+ for i := idx; i < len(s.fbos); i++ {
+ f := s.fbos[i]
+ f.tex.Release()
+ }
+ s.fbos = s.fbos[:idx]
+}
+
+func (s *stenciler) release() {
+ s.fbos.delete(s.ctx, 0)
+ s.intersections.delete(s.ctx, 0)
+ s.pipeline.pipeline.Release()
+ s.ipipeline.pipeline.Release()
+ s.indexBuf.Release()
+}
+
+func (p *pather) release() {
+ p.stenciler.release()
+ p.coverer.release()
+}
+
+func (c *coverer) release() {
+ for _, p := range c.pipelines {
+ p.Release()
+ }
+}
+
+func buildPath(ctx driver.Device, p []byte) pathData {
+ buf, err := ctx.NewImmutableBuffer(driver.BufferBindingVertices, p)
+ if err != nil {
+ panic(err)
+ }
+ return pathData{
+ ncurves: len(p) / vertStride,
+ data: buf,
+ }
+}
+
+func (p pathData) release() {
+ p.data.Release()
+}
+
+func (p *pather) begin(sizes []image.Point) {
+ p.stenciler.begin(sizes)
+}
+
+func (p *pather) stencilPath(bounds image.Rectangle, offset f32.Point, uv image.Point, data pathData) {
+ p.stenciler.stencilPath(bounds, offset, uv, data)
+}
+
+func (s *stenciler) beginIntersect(sizes []image.Point) {
+ // 8 bit coverage is enough, but OpenGL ES only supports single channel
+ // floating point formats. Replace with GL_RGB+GL_UNSIGNED_BYTE if
+ // no floating point support is available.
+ s.intersections.resize(s.ctx, sizes)
+}
+
+func (s *stenciler) cover(idx int) stencilFBO {
+ return s.fbos.fbos[idx]
+}
+
+func (s *stenciler) begin(sizes []image.Point) {
+ s.fbos.resize(s.ctx, sizes)
+}
+
+func (s *stenciler) stencilPath(bounds image.Rectangle, offset f32.Point, uv image.Point, data pathData) {
+ s.ctx.Viewport(uv.X, uv.Y, bounds.Dx(), bounds.Dy())
+ // Transform UI coordinates to OpenGL coordinates.
+ texSize := f32.Point{X: float32(bounds.Dx()), Y: float32(bounds.Dy())}
+ scale := f32.Point{X: 2 / texSize.X, Y: 2 / texSize.Y}
+ orig := f32.Point{X: -1 - float32(bounds.Min.X)*2/texSize.X, Y: -1 - float32(bounds.Min.Y)*2/texSize.Y}
+ s.pipeline.uniforms.transform = [4]float32{scale.X, scale.Y, orig.X, orig.Y}
+ s.pipeline.uniforms.pathOffset = [2]float32{offset.X, offset.Y}
+ s.pipeline.pipeline.UploadUniforms(s.ctx)
+ // Draw in batches that fit in uint16 indices.
+ start := 0
+ nquads := data.ncurves / 4
+ for start < nquads {
+ batch := nquads - start
+ if max := pathBatchSize; batch > max {
+ batch = max
+ }
+ off := vertStride * start * 4
+ s.ctx.BindVertexBuffer(data.data, off)
+ s.ctx.DrawElements(0, batch*6)
+ start += batch
+ }
+}
+
+func (p *pather) cover(mat materialType, col f32color.RGBA, col1, col2 f32color.RGBA, scale, off f32.Point, uvTrans f32.Affine2D, coverScale, coverOff f32.Point) {
+ p.coverer.cover(mat, col, col1, col2, scale, off, uvTrans, coverScale, coverOff)
+}
+
+func (c *coverer) cover(mat materialType, col f32color.RGBA, col1, col2 f32color.RGBA, scale, off f32.Point, uvTrans f32.Affine2D, coverScale, coverOff f32.Point) {
+ var uniforms *coverUniforms
+ switch mat {
+ case materialColor:
+ c.colUniforms.color = col
+ uniforms = &c.colUniforms.coverUniforms
+ case materialLinearGradient:
+ c.linearGradientUniforms.color1 = col1
+ c.linearGradientUniforms.color2 = col2
+
+ t1, t2, t3, t4, t5, t6 := uvTrans.Elems()
+ c.linearGradientUniforms.uvTransformR1 = [4]float32{t1, t2, t3, 0}
+ c.linearGradientUniforms.uvTransformR2 = [4]float32{t4, t5, t6, 0}
+ uniforms = &c.linearGradientUniforms.coverUniforms
+ case materialTexture:
+ t1, t2, t3, t4, t5, t6 := uvTrans.Elems()
+ c.texUniforms.uvTransformR1 = [4]float32{t1, t2, t3, 0}
+ c.texUniforms.uvTransformR2 = [4]float32{t4, t5, t6, 0}
+ uniforms = &c.texUniforms.coverUniforms
+ }
+ uniforms.transform = [4]float32{scale.X, scale.Y, off.X, off.Y}
+ uniforms.uvCoverTransform = [4]float32{coverScale.X, coverScale.Y, coverOff.X, coverOff.Y}
+ c.pipelines[mat].UploadUniforms(c.ctx)
+ c.ctx.DrawArrays(0, 4)
+}
+
+func init() {
+ // Check that struct vertex has the expected size and
+ // that it contains no padding.
+ if unsafe.Sizeof(*(*vertex)(nil)) != vertStride {
+ panic("unexpected struct size")
+ }
+}
diff --git a/vendor/gioui.org/gpu/timer.go b/vendor/gioui.org/gpu/timer.go
new file mode 100644
index 0000000..c1e3227
--- /dev/null
+++ b/vendor/gioui.org/gpu/timer.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package gpu
+
+import (
+ "time"
+
+ "gioui.org/gpu/internal/driver"
+)
+
+type timers struct {
+ backend driver.Device
+ timers []*timer
+}
+
+type timer struct {
+ Elapsed time.Duration
+ backend driver.Device
+ timer driver.Timer
+ state timerState
+}
+
+type timerState uint8
+
+const (
+ timerIdle timerState = iota
+ timerRunning
+ timerWaiting
+)
+
+func newTimers(b driver.Device) *timers {
+ return &timers{
+ backend: b,
+ }
+}
+
+func (t *timers) newTimer() *timer {
+ if t == nil {
+ return nil
+ }
+ tt := &timer{
+ backend: t.backend,
+ timer: t.backend.NewTimer(),
+ }
+ t.timers = append(t.timers, tt)
+ return tt
+}
+
+func (t *timer) begin() {
+ if t == nil || t.state != timerIdle {
+ return
+ }
+ t.timer.Begin()
+ t.state = timerRunning
+}
+
+func (t *timer) end() {
+ if t == nil || t.state != timerRunning {
+ return
+ }
+ t.timer.End()
+ t.state = timerWaiting
+}
+
+func (t *timers) ready() bool {
+ if t == nil {
+ return false
+ }
+ for _, tt := range t.timers {
+ switch tt.state {
+ case timerIdle:
+ continue
+ case timerRunning:
+ return false
+ }
+ d, ok := tt.timer.Duration()
+ if !ok {
+ return false
+ }
+ tt.state = timerIdle
+ tt.Elapsed = d
+ }
+ return t.backend.IsTimeContinuous()
+}
+
+func (t *timers) Release() {
+ if t == nil {
+ return
+ }
+ for _, tt := range t.timers {
+ tt.timer.Release()
+ }
+ t.timers = nil
+}
diff --git a/vendor/gioui.org/internal/byteslice/byteslice.go b/vendor/gioui.org/internal/byteslice/byteslice.go
new file mode 100644
index 0000000..26ebdb2
--- /dev/null
+++ b/vendor/gioui.org/internal/byteslice/byteslice.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+// Package byteslice provides byte slice views of other Go values such as
+// slices and structs.
+package byteslice
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// Struct returns a byte slice view of a struct.
+func Struct(s interface{}) []byte {
+ v := reflect.ValueOf(s).Elem()
+ sz := int(v.Type().Size())
+ var res []byte
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&res))
+ h.Data = uintptr(unsafe.Pointer(v.UnsafeAddr()))
+ h.Cap = sz
+ h.Len = sz
+ return res
+}
+
+// Uint32 returns a byte slice view of a uint32 slice.
+func Uint32(s []uint32) []byte {
+ n := len(s)
+ if n == 0 {
+ return nil
+ }
+ blen := n * int(unsafe.Sizeof(s[0]))
+ return (*[1 << 30]byte)(unsafe.Pointer(&s[0]))[:blen:blen]
+}
+
+// Slice returns a byte slice view of a slice.
+func Slice(s interface{}) []byte {
+ v := reflect.ValueOf(s)
+ first := v.Index(0)
+ sz := int(first.Type().Size())
+ var res []byte
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&res))
+ h.Data = first.UnsafeAddr()
+ h.Cap = v.Cap() * sz
+ h.Len = v.Len() * sz
+ return res
+}
diff --git a/vendor/gioui.org/internal/cocoainit/cocoa_darwin.go b/vendor/gioui.org/internal/cocoainit/cocoa_darwin.go
new file mode 100644
index 0000000..2a34e57
--- /dev/null
+++ b/vendor/gioui.org/internal/cocoainit/cocoa_darwin.go
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+// Package cocoainit initializes support for multithreaded
+// programs in Cocoa.
+package cocoainit
+
+/*
+#cgo CFLAGS: -xobjective-c -fmodules -fobjc-arc
+#import
+
+static inline void activate_cocoa_multithreading() {
+ [[NSThread new] start];
+}
+#pragma GCC visibility push(hidden)
+*/
+import "C"
+
+func init() {
+ C.activate_cocoa_multithreading()
+}
diff --git a/vendor/gioui.org/internal/d3d11/d3d11_windows.go b/vendor/gioui.org/internal/d3d11/d3d11_windows.go
new file mode 100644
index 0000000..450fde1
--- /dev/null
+++ b/vendor/gioui.org/internal/d3d11/d3d11_windows.go
@@ -0,0 +1,1682 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package d3d11
+
+import (
+ "fmt"
+ "math"
+ "syscall"
+ "unsafe"
+
+ "gioui.org/internal/f32color"
+
+ "golang.org/x/sys/windows"
+)
+
+type DXGI_SWAP_CHAIN_DESC struct {
+ BufferDesc DXGI_MODE_DESC
+ SampleDesc DXGI_SAMPLE_DESC
+ BufferUsage uint32
+ BufferCount uint32
+ OutputWindow windows.Handle
+ Windowed uint32
+ SwapEffect uint32
+ Flags uint32
+}
+
+type DXGI_SAMPLE_DESC struct {
+ Count uint32
+ Quality uint32
+}
+
+type DXGI_MODE_DESC struct {
+ Width uint32
+ Height uint32
+ RefreshRate DXGI_RATIONAL
+ Format uint32
+ ScanlineOrdering uint32
+ Scaling uint32
+}
+
+type DXGI_RATIONAL struct {
+ Numerator uint32
+ Denominator uint32
+}
+
+type TEXTURE2D_DESC struct {
+ Width uint32
+ Height uint32
+ MipLevels uint32
+ ArraySize uint32
+ Format uint32
+ SampleDesc DXGI_SAMPLE_DESC
+ Usage uint32
+ BindFlags uint32
+ CPUAccessFlags uint32
+ MiscFlags uint32
+}
+
+type SAMPLER_DESC struct {
+ Filter uint32
+ AddressU uint32
+ AddressV uint32
+ AddressW uint32
+ MipLODBias float32
+ MaxAnisotropy uint32
+ ComparisonFunc uint32
+ BorderColor [4]float32
+ MinLOD float32
+ MaxLOD float32
+}
+
+type SHADER_RESOURCE_VIEW_DESC_TEX2D struct {
+ SHADER_RESOURCE_VIEW_DESC
+ Texture2D TEX2D_SRV
+}
+
+type SHADER_RESOURCE_VIEW_DESC_BUFFEREX struct {
+ SHADER_RESOURCE_VIEW_DESC
+ Buffer BUFFEREX_SRV
+}
+
+type UNORDERED_ACCESS_VIEW_DESC_TEX2D struct {
+ UNORDERED_ACCESS_VIEW_DESC
+ Texture2D TEX2D_UAV
+}
+
+type UNORDERED_ACCESS_VIEW_DESC_BUFFER struct {
+ UNORDERED_ACCESS_VIEW_DESC
+ Buffer BUFFER_UAV
+}
+
+type SHADER_RESOURCE_VIEW_DESC struct {
+ Format uint32
+ ViewDimension uint32
+}
+
+type UNORDERED_ACCESS_VIEW_DESC struct {
+ Format uint32
+ ViewDimension uint32
+}
+
+type TEX2D_SRV struct {
+ MostDetailedMip uint32
+ MipLevels uint32
+}
+
+type BUFFEREX_SRV struct {
+ FirstElement uint32
+ NumElements uint32
+ Flags uint32
+}
+
+type TEX2D_UAV struct {
+ MipSlice uint32
+}
+
+type BUFFER_UAV struct {
+ FirstElement uint32
+ NumElements uint32
+ Flags uint32
+}
+
+type INPUT_ELEMENT_DESC struct {
+ SemanticName *byte
+ SemanticIndex uint32
+ Format uint32
+ InputSlot uint32
+ AlignedByteOffset uint32
+ InputSlotClass uint32
+ InstanceDataStepRate uint32
+}
+
+type IDXGISwapChain struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ SetPrivateData uintptr
+ SetPrivateDataInterface uintptr
+ GetPrivateData uintptr
+ GetParent uintptr
+ GetDevice uintptr
+ Present uintptr
+ GetBuffer uintptr
+ SetFullscreenState uintptr
+ GetFullscreenState uintptr
+ GetDesc uintptr
+ ResizeBuffers uintptr
+ ResizeTarget uintptr
+ GetContainingOutput uintptr
+ GetFrameStatistics uintptr
+ GetLastPresentCount uintptr
+ }
+}
+
+type Debug struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ SetFeatureMask uintptr
+ GetFeatureMask uintptr
+ SetPresentPerRenderOpDelay uintptr
+ GetPresentPerRenderOpDelay uintptr
+ SetSwapChain uintptr
+ GetSwapChain uintptr
+ ValidateContext uintptr
+ ReportLiveDeviceObjects uintptr
+ ValidateContextForDispatch uintptr
+ }
+}
+
+type Device struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ CreateBuffer uintptr
+ CreateTexture1D uintptr
+ CreateTexture2D uintptr
+ CreateTexture3D uintptr
+ CreateShaderResourceView uintptr
+ CreateUnorderedAccessView uintptr
+ CreateRenderTargetView uintptr
+ CreateDepthStencilView uintptr
+ CreateInputLayout uintptr
+ CreateVertexShader uintptr
+ CreateGeometryShader uintptr
+ CreateGeometryShaderWithStreamOutput uintptr
+ CreatePixelShader uintptr
+ CreateHullShader uintptr
+ CreateDomainShader uintptr
+ CreateComputeShader uintptr
+ CreateClassLinkage uintptr
+ CreateBlendState uintptr
+ CreateDepthStencilState uintptr
+ CreateRasterizerState uintptr
+ CreateSamplerState uintptr
+ CreateQuery uintptr
+ CreatePredicate uintptr
+ CreateCounter uintptr
+ CreateDeferredContext uintptr
+ OpenSharedResource uintptr
+ CheckFormatSupport uintptr
+ CheckMultisampleQualityLevels uintptr
+ CheckCounterInfo uintptr
+ CheckCounter uintptr
+ CheckFeatureSupport uintptr
+ GetPrivateData uintptr
+ SetPrivateData uintptr
+ SetPrivateDataInterface uintptr
+ GetFeatureLevel uintptr
+ GetCreationFlags uintptr
+ GetDeviceRemovedReason uintptr
+ GetImmediateContext uintptr
+ SetExceptionMode uintptr
+ GetExceptionMode uintptr
+ }
+}
+
+type DeviceContext struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ GetDevice uintptr
+ GetPrivateData uintptr
+ SetPrivateData uintptr
+ SetPrivateDataInterface uintptr
+ VSSetConstantBuffers uintptr
+ PSSetShaderResources uintptr
+ PSSetShader uintptr
+ PSSetSamplers uintptr
+ VSSetShader uintptr
+ DrawIndexed uintptr
+ Draw uintptr
+ Map uintptr
+ Unmap uintptr
+ PSSetConstantBuffers uintptr
+ IASetInputLayout uintptr
+ IASetVertexBuffers uintptr
+ IASetIndexBuffer uintptr
+ DrawIndexedInstanced uintptr
+ DrawInstanced uintptr
+ GSSetConstantBuffers uintptr
+ GSSetShader uintptr
+ IASetPrimitiveTopology uintptr
+ VSSetShaderResources uintptr
+ VSSetSamplers uintptr
+ Begin uintptr
+ End uintptr
+ GetData uintptr
+ SetPredication uintptr
+ GSSetShaderResources uintptr
+ GSSetSamplers uintptr
+ OMSetRenderTargets uintptr
+ OMSetRenderTargetsAndUnorderedAccessViews uintptr
+ OMSetBlendState uintptr
+ OMSetDepthStencilState uintptr
+ SOSetTargets uintptr
+ DrawAuto uintptr
+ DrawIndexedInstancedIndirect uintptr
+ DrawInstancedIndirect uintptr
+ Dispatch uintptr
+ DispatchIndirect uintptr
+ RSSetState uintptr
+ RSSetViewports uintptr
+ RSSetScissorRects uintptr
+ CopySubresourceRegion uintptr
+ CopyResource uintptr
+ UpdateSubresource uintptr
+ CopyStructureCount uintptr
+ ClearRenderTargetView uintptr
+ ClearUnorderedAccessViewUint uintptr
+ ClearUnorderedAccessViewFloat uintptr
+ ClearDepthStencilView uintptr
+ GenerateMips uintptr
+ SetResourceMinLOD uintptr
+ GetResourceMinLOD uintptr
+ ResolveSubresource uintptr
+ ExecuteCommandList uintptr
+ HSSetShaderResources uintptr
+ HSSetShader uintptr
+ HSSetSamplers uintptr
+ HSSetConstantBuffers uintptr
+ DSSetShaderResources uintptr
+ DSSetShader uintptr
+ DSSetSamplers uintptr
+ DSSetConstantBuffers uintptr
+ CSSetShaderResources uintptr
+ CSSetUnorderedAccessViews uintptr
+ CSSetShader uintptr
+ CSSetSamplers uintptr
+ CSSetConstantBuffers uintptr
+ VSGetConstantBuffers uintptr
+ PSGetShaderResources uintptr
+ PSGetShader uintptr
+ PSGetSamplers uintptr
+ VSGetShader uintptr
+ PSGetConstantBuffers uintptr
+ IAGetInputLayout uintptr
+ IAGetVertexBuffers uintptr
+ IAGetIndexBuffer uintptr
+ GSGetConstantBuffers uintptr
+ GSGetShader uintptr
+ IAGetPrimitiveTopology uintptr
+ VSGetShaderResources uintptr
+ VSGetSamplers uintptr
+ GetPredication uintptr
+ GSGetShaderResources uintptr
+ GSGetSamplers uintptr
+ OMGetRenderTargets uintptr
+ OMGetRenderTargetsAndUnorderedAccessViews uintptr
+ OMGetBlendState uintptr
+ OMGetDepthStencilState uintptr
+ SOGetTargets uintptr
+ RSGetState uintptr
+ RSGetViewports uintptr
+ RSGetScissorRects uintptr
+ HSGetShaderResources uintptr
+ HSGetShader uintptr
+ HSGetSamplers uintptr
+ HSGetConstantBuffers uintptr
+ DSGetShaderResources uintptr
+ DSGetShader uintptr
+ DSGetSamplers uintptr
+ DSGetConstantBuffers uintptr
+ CSGetShaderResources uintptr
+ CSGetUnorderedAccessViews uintptr
+ CSGetShader uintptr
+ CSGetSamplers uintptr
+ CSGetConstantBuffers uintptr
+ ClearState uintptr
+ Flush uintptr
+ GetType uintptr
+ GetContextFlags uintptr
+ FinishCommandList uintptr
+ }
+}
+
+type RenderTargetView struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type Resource struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type Texture2D struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type Buffer struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type SamplerState struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type PixelShader struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type ShaderResourceView struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type UnorderedAccessView struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type DepthStencilView struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type BlendState struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type DepthStencilState struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type VertexShader struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type ComputeShader struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type RasterizerState struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type InputLayout struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ GetBufferPointer uintptr
+ GetBufferSize uintptr
+ }
+}
+
+type DEPTH_STENCIL_DESC struct {
+ DepthEnable uint32
+ DepthWriteMask uint32
+ DepthFunc uint32
+ StencilEnable uint32
+ StencilReadMask uint8
+ StencilWriteMask uint8
+ FrontFace DEPTH_STENCILOP_DESC
+ BackFace DEPTH_STENCILOP_DESC
+}
+
+type DEPTH_STENCILOP_DESC struct {
+ StencilFailOp uint32
+ StencilDepthFailOp uint32
+ StencilPassOp uint32
+ StencilFunc uint32
+}
+
+type DEPTH_STENCIL_VIEW_DESC_TEX2D struct {
+ Format uint32
+ ViewDimension uint32
+ Flags uint32
+ Texture2D TEX2D_DSV
+}
+
+type TEX2D_DSV struct {
+ MipSlice uint32
+}
+
+type BLEND_DESC struct {
+ AlphaToCoverageEnable uint32
+ IndependentBlendEnable uint32
+ RenderTarget [8]RENDER_TARGET_BLEND_DESC
+}
+
+type RENDER_TARGET_BLEND_DESC struct {
+ BlendEnable uint32
+ SrcBlend uint32
+ DestBlend uint32
+ BlendOp uint32
+ SrcBlendAlpha uint32
+ DestBlendAlpha uint32
+ BlendOpAlpha uint32
+ RenderTargetWriteMask uint8
+}
+
+type IDXGIObject struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ SetPrivateData uintptr
+ SetPrivateDataInterface uintptr
+ GetPrivateData uintptr
+ GetParent uintptr
+ }
+}
+
+type IDXGIAdapter struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ SetPrivateData uintptr
+ SetPrivateDataInterface uintptr
+ GetPrivateData uintptr
+ GetParent uintptr
+ EnumOutputs uintptr
+ GetDesc uintptr
+ CheckInterfaceSupport uintptr
+ GetDesc1 uintptr
+ }
+}
+
+type IDXGIFactory struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ SetPrivateData uintptr
+ SetPrivateDataInterface uintptr
+ GetPrivateData uintptr
+ GetParent uintptr
+ EnumAdapters uintptr
+ MakeWindowAssociation uintptr
+ GetWindowAssociation uintptr
+ CreateSwapChain uintptr
+ CreateSoftwareAdapter uintptr
+ }
+}
+
+type IDXGIDebug struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ ReportLiveObjects uintptr
+ }
+}
+
+type IDXGIDevice struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ SetPrivateData uintptr
+ SetPrivateDataInterface uintptr
+ GetPrivateData uintptr
+ GetParent uintptr
+ GetAdapter uintptr
+ CreateSurface uintptr
+ QueryResourceResidency uintptr
+ SetGPUThreadPriority uintptr
+ GetGPUThreadPriority uintptr
+ }
+}
+
+type IUnknown struct {
+ Vtbl *struct {
+ _IUnknownVTbl
+ }
+}
+
+type _IUnknownVTbl struct {
+ QueryInterface uintptr
+ AddRef uintptr
+ Release uintptr
+}
+
+type BUFFER_DESC struct {
+ ByteWidth uint32
+ Usage uint32
+ BindFlags uint32
+ CPUAccessFlags uint32
+ MiscFlags uint32
+ StructureByteStride uint32
+}
+
+type GUID struct {
+ Data1 uint32
+ Data2 uint16
+ Data3 uint16
+ Data4_0 uint8
+ Data4_1 uint8
+ Data4_2 uint8
+ Data4_3 uint8
+ Data4_4 uint8
+ Data4_5 uint8
+ Data4_6 uint8
+ Data4_7 uint8
+}
+
+type VIEWPORT struct {
+ TopLeftX float32
+ TopLeftY float32
+ Width float32
+ Height float32
+ MinDepth float32
+ MaxDepth float32
+}
+
+type SUBRESOURCE_DATA struct {
+ pSysMem *byte
+}
+
+type BOX struct {
+ Left uint32
+ Top uint32
+ Front uint32
+ Right uint32
+ Bottom uint32
+ Back uint32
+}
+
+type MAPPED_SUBRESOURCE struct {
+ PData uintptr
+ RowPitch uint32
+ DepthPitch uint32
+}
+
+type ErrorCode struct {
+ Name string
+ Code uint32
+}
+
+type RASTERIZER_DESC struct {
+ FillMode uint32
+ CullMode uint32
+ FrontCounterClockwise uint32
+ DepthBias int32
+ DepthBiasClamp float32
+ SlopeScaledDepthBias float32
+ DepthClipEnable uint32
+ ScissorEnable uint32
+ MultisampleEnable uint32
+ AntialiasedLineEnable uint32
+}
+
+var (
+ IID_Texture2D = GUID{0x6f15aaf2, 0xd208, 0x4e89, 0x9a, 0xb4, 0x48, 0x95, 0x35, 0xd3, 0x4f, 0x9c}
+ IID_IDXGIDebug = GUID{0x119E7452, 0xDE9E, 0x40fe, 0x88, 0x06, 0x88, 0xF9, 0x0C, 0x12, 0xB4, 0x41}
+ IID_IDXGIDevice = GUID{0x54ec77fa, 0x1377, 0x44e6, 0x8c, 0x32, 0x88, 0xfd, 0x5f, 0x44, 0xc8, 0x4c}
+ IID_IDXGIFactory = GUID{0x7b7166ec, 0x21c7, 0x44ae, 0xb2, 0x1a, 0xc9, 0xae, 0x32, 0x1a, 0xe3, 0x69}
+ IID_ID3D11Debug = GUID{0x79cf2233, 0x7536, 0x4948, 0x9d, 0x36, 0x1e, 0x46, 0x92, 0xdc, 0x57, 0x60}
+
+ DXGI_DEBUG_ALL = GUID{0xe48ae283, 0xda80, 0x490b, 0x87, 0xe6, 0x43, 0xe9, 0xa9, 0xcf, 0xda, 0x8}
+)
+
+var (
+ d3d11 = windows.NewLazySystemDLL("d3d11.dll")
+
+ _D3D11CreateDevice = d3d11.NewProc("D3D11CreateDevice")
+ _D3D11CreateDeviceAndSwapChain = d3d11.NewProc("D3D11CreateDeviceAndSwapChain")
+
+ dxgi = windows.NewLazySystemDLL("dxgi.dll")
+
+ _DXGIGetDebugInterface1 = dxgi.NewProc("DXGIGetDebugInterface1")
+)
+
+const (
+ SDK_VERSION = 7
+ DRIVER_TYPE_HARDWARE = 1
+
+ DXGI_FORMAT_UNKNOWN = 0
+ DXGI_FORMAT_R16_FLOAT = 54
+ DXGI_FORMAT_R32_FLOAT = 41
+ DXGI_FORMAT_R32_TYPELESS = 39
+ DXGI_FORMAT_R32G32_FLOAT = 16
+ DXGI_FORMAT_R32G32B32_FLOAT = 6
+ DXGI_FORMAT_R32G32B32A32_FLOAT = 2
+ DXGI_FORMAT_R8G8B8A8_UNORM = 28
+ DXGI_FORMAT_R8G8B8A8_UNORM_SRGB = 29
+ DXGI_FORMAT_R16_SINT = 59
+ DXGI_FORMAT_R16G16_SINT = 38
+ DXGI_FORMAT_R16_UINT = 57
+ DXGI_FORMAT_D24_UNORM_S8_UINT = 45
+ DXGI_FORMAT_R16G16_FLOAT = 34
+ DXGI_FORMAT_R16G16B16A16_FLOAT = 10
+
+ DXGI_DEBUG_RLO_SUMMARY = 0x1
+ DXGI_DEBUG_RLO_DETAIL = 0x2
+ DXGI_DEBUG_RLO_IGNORE_INTERNAL = 0x4
+
+ FORMAT_SUPPORT_TEXTURE2D = 0x20
+ FORMAT_SUPPORT_RENDER_TARGET = 0x4000
+
+ DXGI_USAGE_RENDER_TARGET_OUTPUT = 1 << (1 + 4)
+
+ CPU_ACCESS_READ = 0x20000
+
+ MAP_READ = 1
+
+ DXGI_SWAP_EFFECT_DISCARD = 0
+
+ FEATURE_LEVEL_9_1 = 0x9100
+ FEATURE_LEVEL_9_3 = 0x9300
+ FEATURE_LEVEL_11_0 = 0xb000
+
+ USAGE_IMMUTABLE = 1
+ USAGE_STAGING = 3
+
+ BIND_VERTEX_BUFFER = 0x1
+ BIND_INDEX_BUFFER = 0x2
+ BIND_CONSTANT_BUFFER = 0x4
+ BIND_SHADER_RESOURCE = 0x8
+ BIND_RENDER_TARGET = 0x20
+ BIND_DEPTH_STENCIL = 0x40
+ BIND_UNORDERED_ACCESS = 0x80
+
+ PRIMITIVE_TOPOLOGY_TRIANGLELIST = 4
+ PRIMITIVE_TOPOLOGY_TRIANGLESTRIP = 5
+
+ FILTER_MIN_MAG_LINEAR_MIP_POINT = 0x14
+ FILTER_MIN_MAG_MIP_POINT = 0
+
+ TEXTURE_ADDRESS_MIRROR = 2
+ TEXTURE_ADDRESS_CLAMP = 3
+ TEXTURE_ADDRESS_WRAP = 1
+
+ SRV_DIMENSION_BUFFER = 1
+ UAV_DIMENSION_BUFFER = 1
+ SRV_DIMENSION_BUFFEREX = 11
+ SRV_DIMENSION_TEXTURE2D = 4
+ UAV_DIMENSION_TEXTURE2D = 4
+
+ BUFFER_UAV_FLAG_RAW = 0x1
+ BUFFEREX_SRV_FLAG_RAW = 0x1
+
+ RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS = 0x20
+
+ CREATE_DEVICE_DEBUG = 0x2
+
+ FILL_SOLID = 3
+
+ CULL_NONE = 1
+
+ CLEAR_DEPTH = 0x1
+ CLEAR_STENCIL = 0x2
+
+ DSV_DIMENSION_TEXTURE2D = 3
+
+ DEPTH_WRITE_MASK_ALL = 1
+
+ COMPARISON_GREATER = 5
+ COMPARISON_GREATER_EQUAL = 7
+
+ BLEND_OP_ADD = 1
+ BLEND_ONE = 2
+ BLEND_INV_SRC_ALPHA = 6
+ BLEND_ZERO = 1
+ BLEND_DEST_COLOR = 9
+ BLEND_DEST_ALPHA = 7
+
+ COLOR_WRITE_ENABLE_ALL = 1 | 2 | 4 | 8
+
+ DXGI_STATUS_OCCLUDED = 0x087A0001
+ DXGI_ERROR_DEVICE_RESET = 0x887A0007
+ DXGI_ERROR_DEVICE_REMOVED = 0x887A0005
+ D3DDDIERR_DEVICEREMOVED = 1<<31 | 0x876<<16 | 2160
+
+ RLDO_SUMMARY = 1
+ RLDO_DETAIL = 2
+ RLDO_IGNORE_INTERNAL = 4
+)
+
+func CreateDevice(driverType uint32, flags uint32) (*Device, *DeviceContext, uint32, error) {
+ var (
+ dev *Device
+ ctx *DeviceContext
+ featLvl uint32
+ )
+ r, _, _ := _D3D11CreateDevice.Call(
+ 0, // pAdapter
+ uintptr(driverType), // driverType
+ 0, // Software
+ uintptr(flags), // Flags
+ 0, // pFeatureLevels
+ 0, // FeatureLevels
+ SDK_VERSION, // SDKVersion
+ uintptr(unsafe.Pointer(&dev)), // ppDevice
+ uintptr(unsafe.Pointer(&featLvl)), // pFeatureLevel
+ uintptr(unsafe.Pointer(&ctx)), // ppImmediateContext
+ )
+ if r != 0 {
+ return nil, nil, 0, ErrorCode{Name: "D3D11CreateDevice", Code: uint32(r)}
+ }
+ return dev, ctx, featLvl, nil
+}
+
+func CreateDeviceAndSwapChain(driverType uint32, flags uint32, swapDesc *DXGI_SWAP_CHAIN_DESC) (*Device, *DeviceContext, *IDXGISwapChain, uint32, error) {
+ var (
+ dev *Device
+ ctx *DeviceContext
+ swchain *IDXGISwapChain
+ featLvl uint32
+ )
+ r, _, _ := _D3D11CreateDeviceAndSwapChain.Call(
+ 0, // pAdapter
+ uintptr(driverType), // driverType
+ 0, // Software
+ uintptr(flags), // Flags
+ 0, // pFeatureLevels
+ 0, // FeatureLevels
+ SDK_VERSION, // SDKVersion
+ uintptr(unsafe.Pointer(swapDesc)), // pSwapChainDesc
+ uintptr(unsafe.Pointer(&swchain)), // ppSwapChain
+ uintptr(unsafe.Pointer(&dev)), // ppDevice
+ uintptr(unsafe.Pointer(&featLvl)), // pFeatureLevel
+ uintptr(unsafe.Pointer(&ctx)), // ppImmediateContext
+ )
+ if r != 0 {
+ return nil, nil, nil, 0, ErrorCode{Name: "D3D11CreateDeviceAndSwapChain", Code: uint32(r)}
+ }
+ return dev, ctx, swchain, featLvl, nil
+}
+
+func DXGIGetDebugInterface1() (*IDXGIDebug, error) {
+ var dbg *IDXGIDebug
+ r, _, _ := _DXGIGetDebugInterface1.Call(
+ 0, // Flags
+ uintptr(unsafe.Pointer(&IID_IDXGIDebug)),
+ uintptr(unsafe.Pointer(&dbg)),
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DXGIGetDebugInterface1", Code: uint32(r)}
+ }
+ return dbg, nil
+}
+
+func ReportLiveObjects() error {
+ dxgi, err := DXGIGetDebugInterface1()
+ if err != nil {
+ return err
+ }
+ defer IUnknownRelease(unsafe.Pointer(dxgi), dxgi.Vtbl.Release)
+ dxgi.ReportLiveObjects(&DXGI_DEBUG_ALL, DXGI_DEBUG_RLO_DETAIL|DXGI_DEBUG_RLO_IGNORE_INTERNAL)
+ return nil
+}
+
+func (d *IDXGIDebug) ReportLiveObjects(guid *GUID, flags uint32) {
+ syscall.Syscall6(
+ d.Vtbl.ReportLiveObjects,
+ 3,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(guid)),
+ uintptr(flags),
+ 0,
+ 0,
+ 0,
+ )
+}
+
+func (d *Device) CheckFormatSupport(format uint32) (uint32, error) {
+ var support uint32
+ r, _, _ := syscall.Syscall(
+ d.Vtbl.CheckFormatSupport,
+ 3,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(format),
+ uintptr(unsafe.Pointer(&support)),
+ )
+ if r != 0 {
+ return 0, ErrorCode{Name: "DeviceCheckFormatSupport", Code: uint32(r)}
+ }
+ return support, nil
+}
+
+func (d *Device) CreateBuffer(desc *BUFFER_DESC, data []byte) (*Buffer, error) {
+ var dataDesc *SUBRESOURCE_DATA
+ if len(data) > 0 {
+ dataDesc = &SUBRESOURCE_DATA{
+ pSysMem: &data[0],
+ }
+ }
+ var buf *Buffer
+ r, _, _ := syscall.Syscall6(
+ d.Vtbl.CreateBuffer,
+ 4,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(desc)),
+ uintptr(unsafe.Pointer(dataDesc)),
+ uintptr(unsafe.Pointer(&buf)),
+ 0, 0,
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DeviceCreateBuffer", Code: uint32(r)}
+ }
+ return buf, nil
+}
+
+func (d *Device) CreateDepthStencilViewTEX2D(res *Resource, desc *DEPTH_STENCIL_VIEW_DESC_TEX2D) (*DepthStencilView, error) {
+ var view *DepthStencilView
+ r, _, _ := syscall.Syscall6(
+ d.Vtbl.CreateDepthStencilView,
+ 4,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(res)),
+ uintptr(unsafe.Pointer(desc)),
+ uintptr(unsafe.Pointer(&view)),
+ 0, 0,
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DeviceCreateDepthStencilView", Code: uint32(r)}
+ }
+ return view, nil
+}
+
+func (d *Device) CreatePixelShader(bytecode []byte) (*PixelShader, error) {
+ var shader *PixelShader
+ r, _, _ := syscall.Syscall6(
+ d.Vtbl.CreatePixelShader,
+ 5,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(&bytecode[0])),
+ uintptr(len(bytecode)),
+ 0, // pClassLinkage
+ uintptr(unsafe.Pointer(&shader)),
+ 0,
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DeviceCreatePixelShader", Code: uint32(r)}
+ }
+ return shader, nil
+}
+
+func (d *Device) CreateVertexShader(bytecode []byte) (*VertexShader, error) {
+ var shader *VertexShader
+ r, _, _ := syscall.Syscall6(
+ d.Vtbl.CreateVertexShader,
+ 5,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(&bytecode[0])),
+ uintptr(len(bytecode)),
+ 0, // pClassLinkage
+ uintptr(unsafe.Pointer(&shader)),
+ 0,
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DeviceCreateVertexShader", Code: uint32(r)}
+ }
+ return shader, nil
+}
+
+func (d *Device) CreateComputeShader(bytecode []byte) (*ComputeShader, error) {
+ var shader *ComputeShader
+ r, _, _ := syscall.Syscall6(
+ d.Vtbl.CreateComputeShader,
+ 5,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(&bytecode[0])),
+ uintptr(len(bytecode)),
+ 0, // pClassLinkage
+ uintptr(unsafe.Pointer(&shader)),
+ 0,
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DeviceCreateComputeShader", Code: uint32(r)}
+ }
+ return shader, nil
+}
+
+func (d *Device) CreateShaderResourceView(res *Resource, desc unsafe.Pointer) (*ShaderResourceView, error) {
+ var resView *ShaderResourceView
+ r, _, _ := syscall.Syscall6(
+ d.Vtbl.CreateShaderResourceView,
+ 4,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(res)),
+ uintptr(desc),
+ uintptr(unsafe.Pointer(&resView)),
+ 0, 0,
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DeviceCreateShaderResourceView", Code: uint32(r)}
+ }
+ return resView, nil
+}
+
+func (d *Device) CreateUnorderedAccessView(res *Resource, desc unsafe.Pointer) (*UnorderedAccessView, error) {
+ var uaView *UnorderedAccessView
+ r, _, _ := syscall.Syscall6(
+ d.Vtbl.CreateUnorderedAccessView,
+ 4,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(res)),
+ uintptr(desc),
+ uintptr(unsafe.Pointer(&uaView)),
+ 0, 0,
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DeviceCreateUnorderedAccessView", Code: uint32(r)}
+ }
+ return uaView, nil
+}
+
+func (d *Device) CreateRasterizerState(desc *RASTERIZER_DESC) (*RasterizerState, error) {
+ var state *RasterizerState
+ r, _, _ := syscall.Syscall(
+ d.Vtbl.CreateRasterizerState,
+ 3,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(desc)),
+ uintptr(unsafe.Pointer(&state)),
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DeviceCreateRasterizerState", Code: uint32(r)}
+ }
+ return state, nil
+}
+
+func (d *Device) CreateInputLayout(descs []INPUT_ELEMENT_DESC, bytecode []byte) (*InputLayout, error) {
+ var pdesc *INPUT_ELEMENT_DESC
+ if len(descs) > 0 {
+ pdesc = &descs[0]
+ }
+ var layout *InputLayout
+ r, _, _ := syscall.Syscall6(
+ d.Vtbl.CreateInputLayout,
+ 6,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(pdesc)),
+ uintptr(len(descs)),
+ uintptr(unsafe.Pointer(&bytecode[0])),
+ uintptr(len(bytecode)),
+ uintptr(unsafe.Pointer(&layout)),
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DeviceCreateInputLayout", Code: uint32(r)}
+ }
+ return layout, nil
+}
+
+func (d *Device) CreateSamplerState(desc *SAMPLER_DESC) (*SamplerState, error) {
+ var sampler *SamplerState
+ r, _, _ := syscall.Syscall(
+ d.Vtbl.CreateSamplerState,
+ 3,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(desc)),
+ uintptr(unsafe.Pointer(&sampler)),
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DeviceCreateSamplerState", Code: uint32(r)}
+ }
+ return sampler, nil
+}
+
+func (d *Device) CreateTexture2D(desc *TEXTURE2D_DESC) (*Texture2D, error) {
+ var tex *Texture2D
+ r, _, _ := syscall.Syscall6(
+ d.Vtbl.CreateTexture2D,
+ 4,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(desc)),
+ 0, // pInitialData
+ uintptr(unsafe.Pointer(&tex)),
+ 0, 0,
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "CreateTexture2D", Code: uint32(r)}
+ }
+ return tex, nil
+}
+
+func (d *Device) CreateRenderTargetView(res *Resource) (*RenderTargetView, error) {
+ var target *RenderTargetView
+ r, _, _ := syscall.Syscall6(
+ d.Vtbl.CreateRenderTargetView,
+ 4,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(res)),
+ 0, // pDesc
+ uintptr(unsafe.Pointer(&target)),
+ 0, 0,
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DeviceCreateRenderTargetView", Code: uint32(r)}
+ }
+ return target, nil
+}
+
+func (d *Device) CreateBlendState(desc *BLEND_DESC) (*BlendState, error) {
+ var state *BlendState
+ r, _, _ := syscall.Syscall(
+ d.Vtbl.CreateBlendState,
+ 3,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(desc)),
+ uintptr(unsafe.Pointer(&state)),
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DeviceCreateBlendState", Code: uint32(r)}
+ }
+ return state, nil
+}
+
+func (d *Device) CreateDepthStencilState(desc *DEPTH_STENCIL_DESC) (*DepthStencilState, error) {
+ var state *DepthStencilState
+ r, _, _ := syscall.Syscall(
+ d.Vtbl.CreateDepthStencilState,
+ 3,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(desc)),
+ uintptr(unsafe.Pointer(&state)),
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "DeviceCreateDepthStencilState", Code: uint32(r)}
+ }
+ return state, nil
+}
+
+func (d *Device) GetFeatureLevel() int {
+ lvl, _, _ := syscall.Syscall(
+ d.Vtbl.GetFeatureLevel,
+ 1,
+ uintptr(unsafe.Pointer(d)),
+ 0, 0,
+ )
+ return int(lvl)
+}
+
+func (d *Device) GetImmediateContext() *DeviceContext {
+ var ctx *DeviceContext
+ syscall.Syscall(
+ d.Vtbl.GetImmediateContext,
+ 2,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(&ctx)),
+ 0,
+ )
+ return ctx
+}
+
+func (d *Device) ReportLiveDeviceObjects() error {
+ intf, err := IUnknownQueryInterface(unsafe.Pointer(d), d.Vtbl.QueryInterface, &IID_ID3D11Debug)
+ if err != nil {
+ return fmt.Errorf("ReportLiveObjects: failed to query ID3D11Debug interface: %v", err)
+ }
+ defer IUnknownRelease(unsafe.Pointer(intf), intf.Vtbl.Release)
+ dbg := (*Debug)(unsafe.Pointer(intf))
+ dbg.ReportLiveDeviceObjects(RLDO_DETAIL | RLDO_IGNORE_INTERNAL)
+ return nil
+}
+
+func (d *Debug) ReportLiveDeviceObjects(flags uint32) {
+ syscall.Syscall(
+ d.Vtbl.ReportLiveDeviceObjects,
+ 2,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(flags),
+ 0,
+ )
+}
+
+func (s *IDXGISwapChain) GetDesc() (DXGI_SWAP_CHAIN_DESC, error) {
+ var desc DXGI_SWAP_CHAIN_DESC
+ r, _, _ := syscall.Syscall(
+ s.Vtbl.GetDesc,
+ 2,
+ uintptr(unsafe.Pointer(s)),
+ uintptr(unsafe.Pointer(&desc)),
+ 0,
+ )
+ if r != 0 {
+ return DXGI_SWAP_CHAIN_DESC{}, ErrorCode{Name: "IDXGISwapChainGetDesc", Code: uint32(r)}
+ }
+ return desc, nil
+}
+
+func (s *IDXGISwapChain) ResizeBuffers(buffers, width, height, newFormat, flags uint32) error {
+ r, _, _ := syscall.Syscall6(
+ s.Vtbl.ResizeBuffers,
+ 6,
+ uintptr(unsafe.Pointer(s)),
+ uintptr(buffers),
+ uintptr(width),
+ uintptr(height),
+ uintptr(newFormat),
+ uintptr(flags),
+ )
+ if r != 0 {
+ return ErrorCode{Name: "IDXGISwapChainResizeBuffers", Code: uint32(r)}
+ }
+ return nil
+}
+
+func (s *IDXGISwapChain) Present(SyncInterval int, Flags uint32) error {
+ r, _, _ := syscall.Syscall(
+ s.Vtbl.Present,
+ 3,
+ uintptr(unsafe.Pointer(s)),
+ uintptr(SyncInterval),
+ uintptr(Flags),
+ )
+ if r != 0 {
+ return ErrorCode{Name: "IDXGISwapChainPresent", Code: uint32(r)}
+ }
+ return nil
+}
+
+func (s *IDXGISwapChain) GetBuffer(index int, riid *GUID) (*IUnknown, error) {
+ var buf *IUnknown
+ r, _, _ := syscall.Syscall6(
+ s.Vtbl.GetBuffer,
+ 4,
+ uintptr(unsafe.Pointer(s)),
+ uintptr(index),
+ uintptr(unsafe.Pointer(riid)),
+ uintptr(unsafe.Pointer(&buf)),
+ 0,
+ 0,
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "IDXGISwapChainGetBuffer", Code: uint32(r)}
+ }
+ return buf, nil
+}
+
+func (c *DeviceContext) Unmap(resource *Resource, subResource uint32) {
+ syscall.Syscall(
+ c.Vtbl.Unmap,
+ 3,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(resource)),
+ uintptr(subResource),
+ )
+}
+
+func (c *DeviceContext) Map(resource *Resource, subResource, mapType, mapFlags uint32) (MAPPED_SUBRESOURCE, error) {
+ var resMap MAPPED_SUBRESOURCE
+ r, _, _ := syscall.Syscall6(
+ c.Vtbl.Map,
+ 6,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(resource)),
+ uintptr(subResource),
+ uintptr(mapType),
+ uintptr(mapFlags),
+ uintptr(unsafe.Pointer(&resMap)),
+ )
+ if r != 0 {
+ return resMap, ErrorCode{Name: "DeviceContextMap", Code: uint32(r)}
+ }
+ return resMap, nil
+}
+
+func (c *DeviceContext) CopySubresourceRegion(dst *Resource, dstSubresource, dstX, dstY, dstZ uint32, src *Resource, srcSubresource uint32, srcBox *BOX) {
+ syscall.Syscall9(
+ c.Vtbl.CopySubresourceRegion,
+ 9,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(dst)),
+ uintptr(dstSubresource),
+ uintptr(dstX),
+ uintptr(dstY),
+ uintptr(dstZ),
+ uintptr(unsafe.Pointer(src)),
+ uintptr(srcSubresource),
+ uintptr(unsafe.Pointer(srcBox)),
+ )
+}
+
+func (c *DeviceContext) ClearDepthStencilView(target *DepthStencilView, flags uint32, depth float32, stencil uint8) {
+ syscall.Syscall6(
+ c.Vtbl.ClearDepthStencilView,
+ 5,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(target)),
+ uintptr(flags),
+ uintptr(math.Float32bits(depth)),
+ uintptr(stencil),
+ 0,
+ )
+}
+
+func (c *DeviceContext) ClearRenderTargetView(target *RenderTargetView, color *[4]float32) {
+ syscall.Syscall(
+ c.Vtbl.ClearRenderTargetView,
+ 3,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(target)),
+ uintptr(unsafe.Pointer(color)),
+ )
+}
+
+func (c *DeviceContext) CSSetShaderResources(startSlot uint32, s *ShaderResourceView) {
+ syscall.Syscall6(
+ c.Vtbl.CSSetShaderResources,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(startSlot),
+ 1, // NumViews
+ uintptr(unsafe.Pointer(&s)),
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) CSSetUnorderedAccessViews(startSlot uint32, v *UnorderedAccessView) {
+ syscall.Syscall6(
+ c.Vtbl.CSSetUnorderedAccessViews,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(startSlot),
+ 1, // NumViews
+ uintptr(unsafe.Pointer(&v)),
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) CSSetShader(s *ComputeShader) {
+ syscall.Syscall6(
+ c.Vtbl.CSSetShader,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(s)),
+ 0, // ppClassInstances
+ 0, // NumClassInstances
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) RSSetViewports(viewport *VIEWPORT) {
+ syscall.Syscall(
+ c.Vtbl.RSSetViewports,
+ 3,
+ uintptr(unsafe.Pointer(c)),
+ 1, // NumViewports
+ uintptr(unsafe.Pointer(viewport)),
+ )
+}
+
+func (c *DeviceContext) VSSetShader(s *VertexShader) {
+ syscall.Syscall6(
+ c.Vtbl.VSSetShader,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(s)),
+ 0, // ppClassInstances
+ 0, // NumClassInstances
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) VSSetConstantBuffers(b *Buffer) {
+ syscall.Syscall6(
+ c.Vtbl.VSSetConstantBuffers,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ 0, // StartSlot
+ 1, // NumBuffers
+ uintptr(unsafe.Pointer(&b)),
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) PSSetConstantBuffers(b *Buffer) {
+ syscall.Syscall6(
+ c.Vtbl.PSSetConstantBuffers,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ 0, // StartSlot
+ 1, // NumBuffers
+ uintptr(unsafe.Pointer(&b)),
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) PSSetShaderResources(startSlot uint32, s *ShaderResourceView) {
+ syscall.Syscall6(
+ c.Vtbl.PSSetShaderResources,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(startSlot),
+ 1, // NumViews
+ uintptr(unsafe.Pointer(&s)),
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) PSSetSamplers(startSlot uint32, s *SamplerState) {
+ syscall.Syscall6(
+ c.Vtbl.PSSetSamplers,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(startSlot),
+ 1, // NumSamplers
+ uintptr(unsafe.Pointer(&s)),
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) PSSetShader(s *PixelShader) {
+ syscall.Syscall6(
+ c.Vtbl.PSSetShader,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(s)),
+ 0, // ppClassInstances
+ 0, // NumClassInstances
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) UpdateSubresource(res *Resource, dstBox *BOX, rowPitch, depthPitch uint32, data []byte) {
+ syscall.Syscall9(
+ c.Vtbl.UpdateSubresource,
+ 7,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(res)),
+ 0, // DstSubresource
+ uintptr(unsafe.Pointer(dstBox)),
+ uintptr(unsafe.Pointer(&data[0])),
+ uintptr(rowPitch),
+ uintptr(depthPitch),
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) RSSetState(state *RasterizerState) {
+ syscall.Syscall(
+ c.Vtbl.RSSetState,
+ 2,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(state)),
+ 0,
+ )
+}
+
+func (c *DeviceContext) IASetInputLayout(layout *InputLayout) {
+ syscall.Syscall(
+ c.Vtbl.IASetInputLayout,
+ 2,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(layout)),
+ 0,
+ )
+}
+
+func (c *DeviceContext) IASetIndexBuffer(buf *Buffer, format, offset uint32) {
+ syscall.Syscall6(
+ c.Vtbl.IASetIndexBuffer,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(buf)),
+ uintptr(format),
+ uintptr(offset),
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) IASetVertexBuffers(buf *Buffer, stride, offset uint32) {
+ syscall.Syscall6(
+ c.Vtbl.IASetVertexBuffers,
+ 6,
+ uintptr(unsafe.Pointer(c)),
+ 0, // StartSlot
+ 1, // NumBuffers,
+ uintptr(unsafe.Pointer(&buf)),
+ uintptr(unsafe.Pointer(&stride)),
+ uintptr(unsafe.Pointer(&offset)),
+ )
+}
+
+func (c *DeviceContext) IASetPrimitiveTopology(mode uint32) {
+ syscall.Syscall(
+ c.Vtbl.IASetPrimitiveTopology,
+ 2,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(mode),
+ 0,
+ )
+}
+
+func (c *DeviceContext) OMGetRenderTargets() (*RenderTargetView, *DepthStencilView) {
+ var (
+ target *RenderTargetView
+ depthStencilView *DepthStencilView
+ )
+ syscall.Syscall6(
+ c.Vtbl.OMGetRenderTargets,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ 1, // NumViews
+ uintptr(unsafe.Pointer(&target)),
+ uintptr(unsafe.Pointer(&depthStencilView)),
+ 0, 0,
+ )
+ return target, depthStencilView
+}
+
+func (c *DeviceContext) OMSetRenderTargets(target *RenderTargetView, depthStencil *DepthStencilView) {
+ syscall.Syscall6(
+ c.Vtbl.OMSetRenderTargets,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ 1, // NumViews
+ uintptr(unsafe.Pointer(&target)),
+ uintptr(unsafe.Pointer(depthStencil)),
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) Draw(count, start uint32) {
+ syscall.Syscall(
+ c.Vtbl.Draw,
+ 3,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(count),
+ uintptr(start),
+ )
+}
+
+func (c *DeviceContext) DrawIndexed(count, start uint32, base int32) {
+ syscall.Syscall6(
+ c.Vtbl.DrawIndexed,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(count),
+ uintptr(start),
+ uintptr(base),
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) Dispatch(x, y, z uint32) {
+ syscall.Syscall6(
+ c.Vtbl.Dispatch,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(x),
+ uintptr(y),
+ uintptr(z),
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) OMSetBlendState(state *BlendState, factor *f32color.RGBA, sampleMask uint32) {
+ syscall.Syscall6(
+ c.Vtbl.OMSetBlendState,
+ 4,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(state)),
+ uintptr(unsafe.Pointer(factor)),
+ uintptr(sampleMask),
+ 0, 0,
+ )
+}
+
+func (c *DeviceContext) OMSetDepthStencilState(state *DepthStencilState, stencilRef uint32) {
+ syscall.Syscall(
+ c.Vtbl.OMSetDepthStencilState,
+ 3,
+ uintptr(unsafe.Pointer(c)),
+ uintptr(unsafe.Pointer(state)),
+ uintptr(stencilRef),
+ )
+}
+
+func (d *IDXGIObject) GetParent(guid *GUID) (*IDXGIObject, error) {
+ var parent *IDXGIObject
+ r, _, _ := syscall.Syscall(
+ d.Vtbl.GetParent,
+ 3,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(guid)),
+ uintptr(unsafe.Pointer(&parent)),
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "IDXGIObjectGetParent", Code: uint32(r)}
+ }
+ return parent, nil
+}
+
+func (d *IDXGIFactory) CreateSwapChain(device *IUnknown, desc *DXGI_SWAP_CHAIN_DESC) (*IDXGISwapChain, error) {
+ var swchain *IDXGISwapChain
+ r, _, _ := syscall.Syscall6(
+ d.Vtbl.CreateSwapChain,
+ 4,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(device)),
+ uintptr(unsafe.Pointer(desc)),
+ uintptr(unsafe.Pointer(&swchain)),
+ 0, 0,
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "IDXGIFactory", Code: uint32(r)}
+ }
+ return swchain, nil
+}
+
+func (d *IDXGIDevice) GetAdapter() (*IDXGIAdapter, error) {
+ var adapter *IDXGIAdapter
+ r, _, _ := syscall.Syscall(
+ d.Vtbl.GetAdapter,
+ 2,
+ uintptr(unsafe.Pointer(d)),
+ uintptr(unsafe.Pointer(&adapter)),
+ 0,
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "IDXGIDeviceGetAdapter", Code: uint32(r)}
+ }
+ return adapter, nil
+}
+
+func IUnknownQueryInterface(obj unsafe.Pointer, queryInterfaceMethod uintptr, guid *GUID) (*IUnknown, error) {
+ var ref *IUnknown
+ r, _, _ := syscall.Syscall(
+ queryInterfaceMethod,
+ 3,
+ uintptr(obj),
+ uintptr(unsafe.Pointer(guid)),
+ uintptr(unsafe.Pointer(&ref)),
+ )
+ if r != 0 {
+ return nil, ErrorCode{Name: "IUnknownQueryInterface", Code: uint32(r)}
+ }
+ return ref, nil
+}
+
+func IUnknownAddRef(obj unsafe.Pointer, addRefMethod uintptr) {
+ syscall.Syscall(
+ addRefMethod,
+ 1,
+ uintptr(obj),
+ 0,
+ 0,
+ )
+}
+
+func IUnknownRelease(obj unsafe.Pointer, releaseMethod uintptr) {
+ syscall.Syscall(
+ releaseMethod,
+ 1,
+ uintptr(obj),
+ 0,
+ 0,
+ )
+}
+
+func (e ErrorCode) Error() string {
+ return fmt.Sprintf("%s: %#x", e.Name, e.Code)
+}
+
+func CreateSwapChain(dev *Device, hwnd windows.Handle) (*IDXGISwapChain, error) {
+ dxgiDev, err := IUnknownQueryInterface(unsafe.Pointer(dev), dev.Vtbl.QueryInterface, &IID_IDXGIDevice)
+ if err != nil {
+ return nil, fmt.Errorf("NewContext: %v", err)
+ }
+ adapter, err := (*IDXGIDevice)(unsafe.Pointer(dxgiDev)).GetAdapter()
+ IUnknownRelease(unsafe.Pointer(dxgiDev), dxgiDev.Vtbl.Release)
+ if err != nil {
+ return nil, fmt.Errorf("NewContext: %v", err)
+ }
+ dxgiFactory, err := (*IDXGIObject)(unsafe.Pointer(adapter)).GetParent(&IID_IDXGIFactory)
+ IUnknownRelease(unsafe.Pointer(adapter), adapter.Vtbl.Release)
+ if err != nil {
+ return nil, fmt.Errorf("NewContext: %v", err)
+ }
+ swchain, err := (*IDXGIFactory)(unsafe.Pointer(dxgiFactory)).CreateSwapChain(
+ (*IUnknown)(unsafe.Pointer(dev)),
+ &DXGI_SWAP_CHAIN_DESC{
+ BufferDesc: DXGI_MODE_DESC{
+ Format: DXGI_FORMAT_R8G8B8A8_UNORM_SRGB,
+ },
+ SampleDesc: DXGI_SAMPLE_DESC{
+ Count: 1,
+ },
+ BufferUsage: DXGI_USAGE_RENDER_TARGET_OUTPUT,
+ BufferCount: 1,
+ OutputWindow: hwnd,
+ Windowed: 1,
+ SwapEffect: DXGI_SWAP_EFFECT_DISCARD,
+ },
+ )
+ IUnknownRelease(unsafe.Pointer(dxgiFactory), dxgiFactory.Vtbl.Release)
+ if err != nil {
+ return nil, fmt.Errorf("NewContext: %v", err)
+ }
+ return swchain, nil
+}
+
+func CreateDepthView(d *Device, width, height, depthBits int) (*DepthStencilView, error) {
+ depthTex, err := d.CreateTexture2D(&TEXTURE2D_DESC{
+ Width: uint32(width),
+ Height: uint32(height),
+ MipLevels: 1,
+ ArraySize: 1,
+ Format: DXGI_FORMAT_D24_UNORM_S8_UINT,
+ SampleDesc: DXGI_SAMPLE_DESC{
+ Count: 1,
+ Quality: 0,
+ },
+ BindFlags: BIND_DEPTH_STENCIL,
+ })
+ if err != nil {
+ return nil, err
+ }
+ depthView, err := d.CreateDepthStencilViewTEX2D(
+ (*Resource)(unsafe.Pointer(depthTex)),
+ &DEPTH_STENCIL_VIEW_DESC_TEX2D{
+ Format: DXGI_FORMAT_D24_UNORM_S8_UINT,
+ ViewDimension: DSV_DIMENSION_TEXTURE2D,
+ },
+ )
+ IUnknownRelease(unsafe.Pointer(depthTex), depthTex.Vtbl.Release)
+ return depthView, err
+}
diff --git a/vendor/gioui.org/internal/egl/egl.go b/vendor/gioui.org/internal/egl/egl.go
new file mode 100644
index 0000000..7605d46
--- /dev/null
+++ b/vendor/gioui.org/internal/egl/egl.go
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+//go:build linux || windows || freebsd || openbsd
+// +build linux windows freebsd openbsd
+
+package egl
+
+import (
+ "errors"
+ "fmt"
+ "runtime"
+ "strings"
+
+ "gioui.org/gpu"
+)
+
+type Context struct {
+ disp _EGLDisplay
+ eglCtx *eglContext
+ eglSurf _EGLSurface
+ width, height int
+}
+
+type eglContext struct {
+ config _EGLConfig
+ ctx _EGLContext
+ visualID int
+ srgb bool
+ surfaceless bool
+}
+
+var (
+ nilEGLDisplay _EGLDisplay
+ nilEGLSurface _EGLSurface
+ nilEGLContext _EGLContext
+ nilEGLConfig _EGLConfig
+ EGL_DEFAULT_DISPLAY NativeDisplayType
+)
+
+const (
+ _EGL_ALPHA_SIZE = 0x3021
+ _EGL_BLUE_SIZE = 0x3022
+ _EGL_CONFIG_CAVEAT = 0x3027
+ _EGL_CONTEXT_CLIENT_VERSION = 0x3098
+ _EGL_DEPTH_SIZE = 0x3025
+ _EGL_GL_COLORSPACE_KHR = 0x309d
+ _EGL_GL_COLORSPACE_SRGB_KHR = 0x3089
+ _EGL_GREEN_SIZE = 0x3023
+ _EGL_EXTENSIONS = 0x3055
+ _EGL_NATIVE_VISUAL_ID = 0x302e
+ _EGL_NONE = 0x3038
+ _EGL_OPENGL_ES2_BIT = 0x4
+ _EGL_RED_SIZE = 0x3024
+ _EGL_RENDERABLE_TYPE = 0x3040
+ _EGL_SURFACE_TYPE = 0x3033
+ _EGL_WINDOW_BIT = 0x4
+)
+
+func (c *Context) Release() {
+ c.ReleaseSurface()
+ if c.eglCtx != nil {
+ eglDestroyContext(c.disp, c.eglCtx.ctx)
+ c.eglCtx = nil
+ }
+ c.disp = nilEGLDisplay
+}
+
+func (c *Context) Present() error {
+ if !eglSwapBuffers(c.disp, c.eglSurf) {
+ return fmt.Errorf("eglSwapBuffers failed (%x)", eglGetError())
+ }
+ return nil
+}
+
+func NewContext(disp NativeDisplayType) (*Context, error) {
+ if err := loadEGL(); err != nil {
+ return nil, err
+ }
+ eglDisp := eglGetDisplay(disp)
+ // eglGetDisplay can return EGL_NO_DISPLAY yet no error
+ // (EGL_SUCCESS), in which case a default EGL display might be
+ // available.
+ if eglDisp == nilEGLDisplay {
+ eglDisp = eglGetDisplay(EGL_DEFAULT_DISPLAY)
+ }
+ if eglDisp == nilEGLDisplay {
+ return nil, fmt.Errorf("eglGetDisplay failed: 0x%x", eglGetError())
+ }
+ eglCtx, err := createContext(eglDisp)
+ if err != nil {
+ return nil, err
+ }
+ c := &Context{
+ disp: eglDisp,
+ eglCtx: eglCtx,
+ }
+ return c, nil
+}
+
+func (c *Context) RenderTarget() (gpu.RenderTarget, error) {
+ return gpu.OpenGLRenderTarget{}, nil
+}
+
+func (c *Context) API() gpu.API {
+ return gpu.OpenGL{}
+}
+
+func (c *Context) ReleaseSurface() {
+ if c.eglSurf == nilEGLSurface {
+ return
+ }
+ // Make sure any in-flight GL commands are complete.
+ eglWaitClient()
+ c.ReleaseCurrent()
+ eglDestroySurface(c.disp, c.eglSurf)
+ c.eglSurf = nilEGLSurface
+}
+
+func (c *Context) VisualID() int {
+ return c.eglCtx.visualID
+}
+
+func (c *Context) CreateSurface(win NativeWindowType, width, height int) error {
+ eglSurf, err := createSurface(c.disp, c.eglCtx, win)
+ c.eglSurf = eglSurf
+ c.width = width
+ c.height = height
+ return err
+}
+
+func (c *Context) ReleaseCurrent() {
+ if c.disp != nilEGLDisplay {
+ eglMakeCurrent(c.disp, nilEGLSurface, nilEGLSurface, nilEGLContext)
+ }
+}
+
+func (c *Context) MakeCurrent() error {
+ if c.eglSurf == nilEGLSurface && !c.eglCtx.surfaceless {
+ return errors.New("no surface created yet EGL_KHR_surfaceless_context is not supported")
+ }
+ if !eglMakeCurrent(c.disp, c.eglSurf, c.eglSurf, c.eglCtx.ctx) {
+ return fmt.Errorf("eglMakeCurrent error 0x%x", eglGetError())
+ }
+ return nil
+}
+
+func (c *Context) EnableVSync(enable bool) {
+ if enable {
+ eglSwapInterval(c.disp, 1)
+ } else {
+ eglSwapInterval(c.disp, 0)
+ }
+}
+
+func hasExtension(exts []string, ext string) bool {
+ for _, e := range exts {
+ if ext == e {
+ return true
+ }
+ }
+ return false
+}
+
+func createContext(disp _EGLDisplay) (*eglContext, error) {
+ major, minor, ret := eglInitialize(disp)
+ if !ret {
+ return nil, fmt.Errorf("eglInitialize failed: 0x%x", eglGetError())
+ }
+ // sRGB framebuffer support on EGL 1.5 or if EGL_KHR_gl_colorspace is supported.
+ exts := strings.Split(eglQueryString(disp, _EGL_EXTENSIONS), " ")
+ srgb := major > 1 || minor >= 5 || hasExtension(exts, "EGL_KHR_gl_colorspace")
+ attribs := []_EGLint{
+ _EGL_RENDERABLE_TYPE, _EGL_OPENGL_ES2_BIT,
+ _EGL_SURFACE_TYPE, _EGL_WINDOW_BIT,
+ _EGL_BLUE_SIZE, 8,
+ _EGL_GREEN_SIZE, 8,
+ _EGL_RED_SIZE, 8,
+ _EGL_CONFIG_CAVEAT, _EGL_NONE,
+ }
+ if srgb {
+ if runtime.GOOS == "linux" || runtime.GOOS == "android" {
+ // Some Mesa drivers crash if an sRGB framebuffer is requested without alpha.
+ // https://bugs.freedesktop.org/show_bug.cgi?id=107782.
+ //
+ // Also, some Android devices (Samsung S9) need alpha for sRGB to work.
+ attribs = append(attribs, _EGL_ALPHA_SIZE, 8)
+ }
+ }
+ attribs = append(attribs, _EGL_NONE)
+ eglCfg, ret := eglChooseConfig(disp, attribs)
+ if !ret {
+ return nil, fmt.Errorf("eglChooseConfig failed: 0x%x", eglGetError())
+ }
+ if eglCfg == nilEGLConfig {
+ supportsNoCfg := hasExtension(exts, "EGL_KHR_no_config_context")
+ if !supportsNoCfg {
+ return nil, errors.New("eglChooseConfig returned no configs")
+ }
+ }
+ var visID _EGLint
+ if eglCfg != nilEGLConfig {
+ var ok bool
+ visID, ok = eglGetConfigAttrib(disp, eglCfg, _EGL_NATIVE_VISUAL_ID)
+ if !ok {
+ return nil, errors.New("newContext: eglGetConfigAttrib for _EGL_NATIVE_VISUAL_ID failed")
+ }
+ }
+ ctxAttribs := []_EGLint{
+ _EGL_CONTEXT_CLIENT_VERSION, 3,
+ _EGL_NONE,
+ }
+ eglCtx := eglCreateContext(disp, eglCfg, nilEGLContext, ctxAttribs)
+ if eglCtx == nilEGLContext {
+ // Fall back to OpenGL ES 2 and rely on extensions.
+ ctxAttribs := []_EGLint{
+ _EGL_CONTEXT_CLIENT_VERSION, 2,
+ _EGL_NONE,
+ }
+ eglCtx = eglCreateContext(disp, eglCfg, nilEGLContext, ctxAttribs)
+ if eglCtx == nilEGLContext {
+ return nil, fmt.Errorf("eglCreateContext failed: 0x%x", eglGetError())
+ }
+ }
+ return &eglContext{
+ config: _EGLConfig(eglCfg),
+ ctx: _EGLContext(eglCtx),
+ visualID: int(visID),
+ srgb: srgb,
+ surfaceless: hasExtension(exts, "EGL_KHR_surfaceless_context"),
+ }, nil
+}
+
+func createSurface(disp _EGLDisplay, eglCtx *eglContext, win NativeWindowType) (_EGLSurface, error) {
+ var surfAttribs []_EGLint
+ if eglCtx.srgb {
+ surfAttribs = append(surfAttribs, _EGL_GL_COLORSPACE_KHR, _EGL_GL_COLORSPACE_SRGB_KHR)
+ }
+ surfAttribs = append(surfAttribs, _EGL_NONE)
+ eglSurf := eglCreateWindowSurface(disp, eglCtx.config, win, surfAttribs)
+ if eglSurf == nilEGLSurface && eglCtx.srgb {
+ // Try again without sRGB.
+ eglCtx.srgb = false
+ surfAttribs = []_EGLint{_EGL_NONE}
+ eglSurf = eglCreateWindowSurface(disp, eglCtx.config, win, surfAttribs)
+ }
+ if eglSurf == nilEGLSurface {
+ return nilEGLSurface, fmt.Errorf("newContext: eglCreateWindowSurface failed 0x%x (sRGB=%v)", eglGetError(), eglCtx.srgb)
+ }
+ return eglSurf, nil
+}
diff --git a/vendor/gioui.org/internal/egl/egl_unix.go b/vendor/gioui.org/internal/egl/egl_unix.go
new file mode 100644
index 0000000..bd3efa5
--- /dev/null
+++ b/vendor/gioui.org/internal/egl/egl_unix.go
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+//go:build linux || freebsd || openbsd
+// +build linux freebsd openbsd
+
+package egl
+
+/*
+#cgo linux,!android pkg-config: egl
+#cgo freebsd openbsd android LDFLAGS: -lEGL
+#cgo freebsd CFLAGS: -I/usr/local/include
+#cgo freebsd LDFLAGS: -L/usr/local/lib
+#cgo openbsd CFLAGS: -I/usr/X11R6/include
+#cgo openbsd LDFLAGS: -L/usr/X11R6/lib
+#cgo CFLAGS: -DEGL_NO_X11
+
+#include
+#include
+*/
+import "C"
+
+type (
+ _EGLint = C.EGLint
+ _EGLDisplay = C.EGLDisplay
+ _EGLConfig = C.EGLConfig
+ _EGLContext = C.EGLContext
+ _EGLSurface = C.EGLSurface
+ NativeDisplayType = C.EGLNativeDisplayType
+ NativeWindowType = C.EGLNativeWindowType
+)
+
+func loadEGL() error {
+ return nil
+}
+
+func eglChooseConfig(disp _EGLDisplay, attribs []_EGLint) (_EGLConfig, bool) {
+ var cfg C.EGLConfig
+ var ncfg C.EGLint
+ if C.eglChooseConfig(disp, &attribs[0], &cfg, 1, &ncfg) != C.EGL_TRUE {
+ return nilEGLConfig, false
+ }
+ return _EGLConfig(cfg), true
+}
+
+func eglCreateContext(disp _EGLDisplay, cfg _EGLConfig, shareCtx _EGLContext, attribs []_EGLint) _EGLContext {
+ ctx := C.eglCreateContext(disp, cfg, shareCtx, &attribs[0])
+ return _EGLContext(ctx)
+}
+
+func eglDestroySurface(disp _EGLDisplay, surf _EGLSurface) bool {
+ return C.eglDestroySurface(disp, surf) == C.EGL_TRUE
+}
+
+func eglDestroyContext(disp _EGLDisplay, ctx _EGLContext) bool {
+ return C.eglDestroyContext(disp, ctx) == C.EGL_TRUE
+}
+
+func eglGetConfigAttrib(disp _EGLDisplay, cfg _EGLConfig, attr _EGLint) (_EGLint, bool) {
+ var val _EGLint
+ ret := C.eglGetConfigAttrib(disp, cfg, attr, &val)
+ return val, ret == C.EGL_TRUE
+}
+
+func eglGetError() _EGLint {
+ return C.eglGetError()
+}
+
+func eglInitialize(disp _EGLDisplay) (_EGLint, _EGLint, bool) {
+ var maj, min _EGLint
+ ret := C.eglInitialize(disp, &maj, &min)
+ return maj, min, ret == C.EGL_TRUE
+}
+
+func eglMakeCurrent(disp _EGLDisplay, draw, read _EGLSurface, ctx _EGLContext) bool {
+ return C.eglMakeCurrent(disp, draw, read, ctx) == C.EGL_TRUE
+}
+
+func eglReleaseThread() bool {
+ return C.eglReleaseThread() == C.EGL_TRUE
+}
+
+func eglSwapBuffers(disp _EGLDisplay, surf _EGLSurface) bool {
+ return C.eglSwapBuffers(disp, surf) == C.EGL_TRUE
+}
+
+func eglSwapInterval(disp _EGLDisplay, interval _EGLint) bool {
+ return C.eglSwapInterval(disp, interval) == C.EGL_TRUE
+}
+
+func eglTerminate(disp _EGLDisplay) bool {
+ return C.eglTerminate(disp) == C.EGL_TRUE
+}
+
+func eglQueryString(disp _EGLDisplay, name _EGLint) string {
+ return C.GoString(C.eglQueryString(disp, name))
+}
+
+func eglGetDisplay(disp NativeDisplayType) _EGLDisplay {
+ return C.eglGetDisplay(disp)
+}
+
+func eglCreateWindowSurface(disp _EGLDisplay, conf _EGLConfig, win NativeWindowType, attribs []_EGLint) _EGLSurface {
+ eglSurf := C.eglCreateWindowSurface(disp, conf, win, &attribs[0])
+ return eglSurf
+}
+
+func eglWaitClient() bool {
+ return C.eglWaitClient() == C.EGL_TRUE
+}
diff --git a/vendor/gioui.org/internal/egl/egl_windows.go b/vendor/gioui.org/internal/egl/egl_windows.go
new file mode 100644
index 0000000..4433dd7
--- /dev/null
+++ b/vendor/gioui.org/internal/egl/egl_windows.go
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package egl
+
+import (
+ "fmt"
+ "runtime"
+ "sync"
+ "unsafe"
+
+ syscall "golang.org/x/sys/windows"
+
+ "gioui.org/internal/gl"
+)
+
+type (
+ _EGLint int32
+ _EGLDisplay uintptr
+ _EGLConfig uintptr
+ _EGLContext uintptr
+ _EGLSurface uintptr
+ NativeDisplayType uintptr
+ NativeWindowType uintptr
+)
+
+var (
+ libEGL = syscall.NewLazyDLL("libEGL.dll")
+ _eglChooseConfig = libEGL.NewProc("eglChooseConfig")
+ _eglCreateContext = libEGL.NewProc("eglCreateContext")
+ _eglCreateWindowSurface = libEGL.NewProc("eglCreateWindowSurface")
+ _eglDestroyContext = libEGL.NewProc("eglDestroyContext")
+ _eglDestroySurface = libEGL.NewProc("eglDestroySurface")
+ _eglGetConfigAttrib = libEGL.NewProc("eglGetConfigAttrib")
+ _eglGetDisplay = libEGL.NewProc("eglGetDisplay")
+ _eglGetError = libEGL.NewProc("eglGetError")
+ _eglInitialize = libEGL.NewProc("eglInitialize")
+ _eglMakeCurrent = libEGL.NewProc("eglMakeCurrent")
+ _eglReleaseThread = libEGL.NewProc("eglReleaseThread")
+ _eglSwapInterval = libEGL.NewProc("eglSwapInterval")
+ _eglSwapBuffers = libEGL.NewProc("eglSwapBuffers")
+ _eglTerminate = libEGL.NewProc("eglTerminate")
+ _eglQueryString = libEGL.NewProc("eglQueryString")
+ _eglWaitClient = libEGL.NewProc("eglWaitClient")
+)
+
+var loadOnce sync.Once
+
+func loadEGL() error {
+ var err error
+ loadOnce.Do(func() {
+ err = loadDLLs()
+ })
+ return err
+}
+
+func loadDLLs() error {
+ if err := loadDLL(libEGL, "libEGL.dll"); err != nil {
+ return err
+ }
+ if err := loadDLL(gl.LibGLESv2, "libGLESv2.dll"); err != nil {
+ return err
+ }
+ // d3dcompiler_47.dll is needed internally for shader compilation to function.
+ return loadDLL(syscall.NewLazyDLL("d3dcompiler_47.dll"), "d3dcompiler_47.dll")
+}
+
+func loadDLL(dll *syscall.LazyDLL, name string) error {
+ err := dll.Load()
+ if err != nil {
+ return fmt.Errorf("egl: failed to load %s: %v", name, err)
+ }
+ return nil
+}
+
+func eglChooseConfig(disp _EGLDisplay, attribs []_EGLint) (_EGLConfig, bool) {
+ var cfg _EGLConfig
+ var ncfg _EGLint
+ a := &attribs[0]
+ r, _, _ := _eglChooseConfig.Call(uintptr(disp), uintptr(unsafe.Pointer(a)), uintptr(unsafe.Pointer(&cfg)), 1, uintptr(unsafe.Pointer(&ncfg)))
+ issue34474KeepAlive(a)
+ return cfg, r != 0
+}
+
+func eglCreateContext(disp _EGLDisplay, cfg _EGLConfig, shareCtx _EGLContext, attribs []_EGLint) _EGLContext {
+ a := &attribs[0]
+ c, _, _ := _eglCreateContext.Call(uintptr(disp), uintptr(cfg), uintptr(shareCtx), uintptr(unsafe.Pointer(a)))
+ issue34474KeepAlive(a)
+ return _EGLContext(c)
+}
+
+func eglCreateWindowSurface(disp _EGLDisplay, cfg _EGLConfig, win NativeWindowType, attribs []_EGLint) _EGLSurface {
+ a := &attribs[0]
+ s, _, _ := _eglCreateWindowSurface.Call(uintptr(disp), uintptr(cfg), uintptr(win), uintptr(unsafe.Pointer(a)))
+ issue34474KeepAlive(a)
+ return _EGLSurface(s)
+}
+
+func eglDestroySurface(disp _EGLDisplay, surf _EGLSurface) bool {
+ r, _, _ := _eglDestroySurface.Call(uintptr(disp), uintptr(surf))
+ return r != 0
+}
+
+func eglDestroyContext(disp _EGLDisplay, ctx _EGLContext) bool {
+ r, _, _ := _eglDestroyContext.Call(uintptr(disp), uintptr(ctx))
+ return r != 0
+}
+
+func eglGetConfigAttrib(disp _EGLDisplay, cfg _EGLConfig, attr _EGLint) (_EGLint, bool) {
+ var val uintptr
+ r, _, _ := _eglGetConfigAttrib.Call(uintptr(disp), uintptr(cfg), uintptr(attr), uintptr(unsafe.Pointer(&val)))
+ return _EGLint(val), r != 0
+}
+
+func eglGetDisplay(disp NativeDisplayType) _EGLDisplay {
+ d, _, _ := _eglGetDisplay.Call(uintptr(disp))
+ return _EGLDisplay(d)
+}
+
+func eglGetError() _EGLint {
+ e, _, _ := _eglGetError.Call()
+ return _EGLint(e)
+}
+
+func eglInitialize(disp _EGLDisplay) (_EGLint, _EGLint, bool) {
+ var maj, min uintptr
+ r, _, _ := _eglInitialize.Call(uintptr(disp), uintptr(unsafe.Pointer(&maj)), uintptr(unsafe.Pointer(&min)))
+ return _EGLint(maj), _EGLint(min), r != 0
+}
+
+func eglMakeCurrent(disp _EGLDisplay, draw, read _EGLSurface, ctx _EGLContext) bool {
+ r, _, _ := _eglMakeCurrent.Call(uintptr(disp), uintptr(draw), uintptr(read), uintptr(ctx))
+ return r != 0
+}
+
+func eglReleaseThread() bool {
+ r, _, _ := _eglReleaseThread.Call()
+ return r != 0
+}
+
+func eglSwapInterval(disp _EGLDisplay, interval _EGLint) bool {
+ r, _, _ := _eglSwapInterval.Call(uintptr(disp), uintptr(interval))
+ return r != 0
+}
+
+func eglSwapBuffers(disp _EGLDisplay, surf _EGLSurface) bool {
+ r, _, _ := _eglSwapBuffers.Call(uintptr(disp), uintptr(surf))
+ return r != 0
+}
+
+func eglTerminate(disp _EGLDisplay) bool {
+ r, _, _ := _eglTerminate.Call(uintptr(disp))
+ return r != 0
+}
+
+func eglQueryString(disp _EGLDisplay, name _EGLint) string {
+ r, _, _ := _eglQueryString.Call(uintptr(disp), uintptr(name))
+ return syscall.BytePtrToString((*byte)(unsafe.Pointer(r)))
+}
+
+func eglWaitClient() bool {
+ r, _, _ := _eglWaitClient.Call()
+ return r != 0
+}
+
+// issue34474KeepAlive calls runtime.KeepAlive as a
+// workaround for golang.org/issue/34474.
+func issue34474KeepAlive(v interface{}) {
+ runtime.KeepAlive(v)
+}
diff --git a/vendor/gioui.org/internal/f32color/rgba.go b/vendor/gioui.org/internal/f32color/rgba.go
new file mode 100644
index 0000000..eecf018
--- /dev/null
+++ b/vendor/gioui.org/internal/f32color/rgba.go
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package f32color
+
+import (
+ "image/color"
+ "math"
+)
+
+// RGBA is a 32 bit floating point linear premultiplied color space.
+type RGBA struct {
+ R, G, B, A float32
+}
+
+// Array returns rgba values in a [4]float32 array.
+func (rgba RGBA) Array() [4]float32 {
+ return [4]float32{rgba.R, rgba.G, rgba.B, rgba.A}
+}
+
+// Float32 returns r, g, b, a values.
+func (col RGBA) Float32() (r, g, b, a float32) {
+ return col.R, col.G, col.B, col.A
+}
+
+// SRGBA converts from linear to sRGB color space.
+func (col RGBA) SRGB() color.NRGBA {
+ if col.A == 0 {
+ return color.NRGBA{}
+ }
+ return color.NRGBA{
+ R: uint8(linearTosRGB(col.R/col.A)*255 + .5),
+ G: uint8(linearTosRGB(col.G/col.A)*255 + .5),
+ B: uint8(linearTosRGB(col.B/col.A)*255 + .5),
+ A: uint8(col.A*255 + .5),
+ }
+}
+
+// Luminance calculates the relative luminance of a linear RGBA color.
+// Normalized to 0 for black and 1 for white.
+//
+// See https://www.w3.org/TR/WCAG20/#relativeluminancedef for more details
+func (col RGBA) Luminance() float32 {
+ return 0.2126*col.R + 0.7152*col.G + 0.0722*col.B
+}
+
+// Opaque returns the color without alpha component.
+func (col RGBA) Opaque() RGBA {
+ col.A = 1.0
+ return col
+}
+
+// LinearFromSRGB converts from col in the sRGB colorspace to RGBA.
+func LinearFromSRGB(col color.NRGBA) RGBA {
+ af := float32(col.A) / 0xFF
+ return RGBA{
+ R: sRGBToLinear(float32(col.R)/0xff) * af,
+ G: sRGBToLinear(float32(col.G)/0xff) * af,
+ B: sRGBToLinear(float32(col.B)/0xff) * af,
+ A: af,
+ }
+}
+
+// NRGBAToRGBA converts from non-premultiplied sRGB color to premultiplied sRGB color.
+//
+// Each component in the result is `sRGBToLinear(c * alpha)`, where `c`
+// is the linear color.
+func NRGBAToRGBA(col color.NRGBA) color.RGBA {
+ if col.A == 0xFF {
+ return color.RGBA(col)
+ }
+ c := LinearFromSRGB(col)
+ return color.RGBA{
+ R: uint8(linearTosRGB(c.R)*255 + .5),
+ G: uint8(linearTosRGB(c.G)*255 + .5),
+ B: uint8(linearTosRGB(c.B)*255 + .5),
+ A: col.A,
+ }
+}
+
+// NRGBAToLinearRGBA converts from non-premultiplied sRGB color to premultiplied linear RGBA color.
+//
+// Each component in the result is `c * alpha`, where `c` is the linear color.
+func NRGBAToLinearRGBA(col color.NRGBA) color.RGBA {
+ if col.A == 0xFF {
+ return color.RGBA(col)
+ }
+ c := LinearFromSRGB(col)
+ return color.RGBA{
+ R: uint8(c.R*255 + .5),
+ G: uint8(c.G*255 + .5),
+ B: uint8(c.B*255 + .5),
+ A: col.A,
+ }
+}
+
+// RGBAToNRGBA converts from premultiplied sRGB color to non-premultiplied sRGB color.
+func RGBAToNRGBA(col color.RGBA) color.NRGBA {
+ if col.A == 0xFF {
+ return color.NRGBA(col)
+ }
+
+ linear := RGBA{
+ R: sRGBToLinear(float32(col.R) / 0xff),
+ G: sRGBToLinear(float32(col.G) / 0xff),
+ B: sRGBToLinear(float32(col.B) / 0xff),
+ A: float32(col.A) / 0xff,
+ }
+
+ return linear.SRGB()
+}
+
+// linearTosRGB transforms color value from linear to sRGB.
+func linearTosRGB(c float32) float32 {
+ // Formula from EXT_sRGB.
+ switch {
+ case c <= 0:
+ return 0
+ case 0 < c && c < 0.0031308:
+ return 12.92 * c
+ case 0.0031308 <= c && c < 1:
+ return 1.055*float32(math.Pow(float64(c), 0.41666)) - 0.055
+ }
+
+ return 1
+}
+
+// sRGBToLinear transforms color value from sRGB to linear.
+func sRGBToLinear(c float32) float32 {
+ // Formula from EXT_sRGB.
+ if c <= 0.04045 {
+ return c / 12.92
+ } else {
+ return float32(math.Pow(float64((c+0.055)/1.055), 2.4))
+ }
+}
+
+// MulAlpha applies the alpha to the color.
+func MulAlpha(c color.NRGBA, alpha uint8) color.NRGBA {
+ c.A = uint8(uint32(c.A) * uint32(alpha) / 0xFF)
+ return c
+}
+
+// Disabled blends color towards the luminance and multiplies alpha.
+// Blending towards luminance will desaturate the color.
+// Multiplying alpha blends the color together more with the background.
+func Disabled(c color.NRGBA) (d color.NRGBA) {
+ const r = 80 // blend ratio
+ lum := approxLuminance(c)
+ return color.NRGBA{
+ R: byte((int(c.R)*r + int(lum)*(256-r)) / 256),
+ G: byte((int(c.G)*r + int(lum)*(256-r)) / 256),
+ B: byte((int(c.B)*r + int(lum)*(256-r)) / 256),
+ A: byte(int(c.A) * (128 + 32) / 256),
+ }
+}
+
+// Hovered blends color towards a brighter color.
+func Hovered(c color.NRGBA) (d color.NRGBA) {
+ const r = 0x20 // lighten ratio
+ return color.NRGBA{
+ R: byte(255 - int(255-c.R)*(255-r)/256),
+ G: byte(255 - int(255-c.G)*(255-r)/256),
+ B: byte(255 - int(255-c.B)*(255-r)/256),
+ A: c.A,
+ }
+}
+
+// approxLuminance is a fast approximate version of RGBA.Luminance.
+func approxLuminance(c color.NRGBA) byte {
+ const (
+ r = 13933 // 0.2126 * 256 * 256
+ g = 46871 // 0.7152 * 256 * 256
+ b = 4732 // 0.0722 * 256 * 256
+ t = r + g + b
+ )
+ return byte((r*int(c.R) + g*int(c.G) + b*int(c.B)) / t)
+}
diff --git a/vendor/gioui.org/internal/fling/animation.go b/vendor/gioui.org/internal/fling/animation.go
new file mode 100644
index 0000000..bb6f8a2
--- /dev/null
+++ b/vendor/gioui.org/internal/fling/animation.go
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package fling
+
+import (
+ "math"
+ "runtime"
+ "time"
+
+ "gioui.org/unit"
+)
+
+type Animation struct {
+ // Current offset in pixels.
+ x float32
+ // Initial time.
+ t0 time.Time
+ // Initial velocity in pixels pr second.
+ v0 float32
+}
+
+var (
+ // Pixels/second.
+ minFlingVelocity = unit.Dp(50)
+ maxFlingVelocity = unit.Dp(8000)
+)
+
+const (
+ thresholdVelocity = 1
+)
+
+// Start a fling given a starting velocity. Returns whether a
+// fling was started.
+func (f *Animation) Start(c unit.Metric, now time.Time, velocity float32) bool {
+ min := float32(c.Px(minFlingVelocity))
+ v := velocity
+ if -min <= v && v <= min {
+ return false
+ }
+ max := float32(c.Px(maxFlingVelocity))
+ if v > max {
+ v = max
+ } else if v < -max {
+ v = -max
+ }
+ f.init(now, v)
+ return true
+}
+
+func (f *Animation) init(now time.Time, v0 float32) {
+ f.t0 = now
+ f.v0 = v0
+ f.x = 0
+}
+
+func (f *Animation) Active() bool {
+ return f.v0 != 0
+}
+
+// Tick computes and returns a fling distance since
+// the last time Tick was called.
+func (f *Animation) Tick(now time.Time) int {
+ if !f.Active() {
+ return 0
+ }
+ var k float32
+ if runtime.GOOS == "darwin" {
+ k = -2 // iOS
+ } else {
+ k = -4.2 // Android and default
+ }
+ t := now.Sub(f.t0)
+ // The acceleration x''(t) of a point mass with a drag
+ // force, f, proportional with velocity, x'(t), is
+ // governed by the equation
+ //
+ // x''(t) = kx'(t)
+ //
+ // Given the starting position x(0) = 0, the starting
+ // velocity x'(0) = v0, the position is then
+ // given by
+ //
+ // x(t) = v0*e^(k*t)/k - v0/k
+ //
+ ekt := float32(math.Exp(float64(k) * t.Seconds()))
+ x := f.v0*ekt/k - f.v0/k
+ dist := x - f.x
+ idist := int(dist)
+ f.x += float32(idist)
+ // Solving for the velocity x'(t) gives us
+ //
+ // x'(t) = v0*e^(k*t)
+ v := f.v0 * ekt
+ if -thresholdVelocity < v && v < thresholdVelocity {
+ f.v0 = 0
+ }
+ return idist
+}
diff --git a/vendor/gioui.org/internal/fling/extrapolation.go b/vendor/gioui.org/internal/fling/extrapolation.go
new file mode 100644
index 0000000..655ef84
--- /dev/null
+++ b/vendor/gioui.org/internal/fling/extrapolation.go
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package fling
+
+import (
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Extrapolation computes a 1-dimensional velocity estimate
+// for a set of timestamped points using the least squares
+// fit of a 2nd order polynomial. The same method is used
+// by Android.
+type Extrapolation struct {
+ // Index into points.
+ idx int
+ // Circular buffer of samples.
+ samples []sample
+ lastValue float32
+ // Pre-allocated cache for samples.
+ cache [historySize]sample
+
+ // Filtered values and times
+ values [historySize]float32
+ times [historySize]float32
+}
+
+type sample struct {
+ t time.Duration
+ v float32
+}
+
+type matrix struct {
+ rows, cols int
+ data []float32
+}
+
+type Estimate struct {
+ Velocity float32
+ Distance float32
+}
+
+type coefficients [degree + 1]float32
+
+const (
+ degree = 2
+ historySize = 20
+ maxAge = 100 * time.Millisecond
+ maxSampleGap = 40 * time.Millisecond
+)
+
+// SampleDelta adds a relative sample to the estimation.
+func (e *Extrapolation) SampleDelta(t time.Duration, delta float32) {
+ val := delta + e.lastValue
+ e.Sample(t, val)
+}
+
+// Sample adds an absolute sample to the estimation.
+func (e *Extrapolation) Sample(t time.Duration, val float32) {
+ e.lastValue = val
+ if e.samples == nil {
+ e.samples = e.cache[:0]
+ }
+ s := sample{
+ t: t,
+ v: val,
+ }
+ if e.idx == len(e.samples) && e.idx < cap(e.samples) {
+ e.samples = append(e.samples, s)
+ } else {
+ e.samples[e.idx] = s
+ }
+ e.idx++
+ if e.idx == cap(e.samples) {
+ e.idx = 0
+ }
+}
+
+// Velocity returns an estimate of the implied velocity and
+// distance for the points sampled, or zero if the estimation method
+// failed.
+func (e *Extrapolation) Estimate() Estimate {
+ if len(e.samples) == 0 {
+ return Estimate{}
+ }
+ values := e.values[:0]
+ times := e.times[:0]
+ first := e.get(0)
+ t := first.t
+ // Walk backwards collecting samples.
+ for i := 0; i < len(e.samples); i++ {
+ p := e.get(-i)
+ age := first.t - p.t
+ if age >= maxAge || t-p.t >= maxSampleGap {
+ // If the samples are too old or
+ // too much time passed between samples
+ // assume they're not part of the fling.
+ break
+ }
+ t = p.t
+ values = append(values, first.v-p.v)
+ times = append(times, float32((-age).Seconds()))
+ }
+ coef, ok := polyFit(times, values)
+ if !ok {
+ return Estimate{}
+ }
+ dist := values[len(values)-1] - values[0]
+ return Estimate{
+ Velocity: coef[1],
+ Distance: dist,
+ }
+}
+
+func (e *Extrapolation) get(i int) sample {
+ idx := (e.idx + i - 1 + len(e.samples)) % len(e.samples)
+ return e.samples[idx]
+}
+
+// fit computes the least squares polynomial fit for
+// the set of points in X, Y. If the fitting fails
+// because of contradicting or insufficient data,
+// fit returns false.
+func polyFit(X, Y []float32) (coefficients, bool) {
+ if len(X) != len(Y) {
+ panic("X and Y lengths differ")
+ }
+ if len(X) <= degree {
+ // Not enough points to fit a curve.
+ return coefficients{}, false
+ }
+
+ // Use a method similar to Android's VelocityTracker.cpp:
+ // https://android.googlesource.com/platform/frameworks/base/+/56a2301/libs/androidfw/VelocityTracker.cpp
+ // where all weights are 1.
+
+ // First, expand the X vector to the matrix A in column-major order.
+ A := newMatrix(degree+1, len(X))
+ for i, x := range X {
+ A.set(0, i, 1)
+ for j := 1; j < A.rows; j++ {
+ A.set(j, i, A.get(j-1, i)*x)
+ }
+ }
+
+ Q, Rt, ok := decomposeQR(A)
+ if !ok {
+ return coefficients{}, false
+ }
+ // Solve R*B = Qt*Y for B, which is then the polynomial coefficients.
+ // Since R is upper triangular, we can proceed from bottom right to
+ // upper left.
+ // https://en.wikipedia.org/wiki/Non-linear_least_squares
+ var B coefficients
+ for i := Q.rows - 1; i >= 0; i-- {
+ B[i] = dot(Q.col(i), Y)
+ for j := Q.rows - 1; j > i; j-- {
+ B[i] -= Rt.get(i, j) * B[j]
+ }
+ B[i] /= Rt.get(i, i)
+ }
+ return B, true
+}
+
+// decomposeQR computes and returns Q, Rt where Q*transpose(Rt) = A, if
+// possible. R is guaranteed to be upper triangular and only the square
+// part of Rt is returned.
+func decomposeQR(A *matrix) (*matrix, *matrix, bool) {
+ // Gram-Schmidt QR decompose A where Q*R = A.
+ // https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process
+ Q := newMatrix(A.rows, A.cols) // Column-major.
+ Rt := newMatrix(A.rows, A.rows) // R transposed, row-major.
+ for i := 0; i < Q.rows; i++ {
+ // Copy A column.
+ for j := 0; j < Q.cols; j++ {
+ Q.set(i, j, A.get(i, j))
+ }
+ // Subtract projections. Note that int the projection
+ //
+ // proju a = / u
+ //
+ // the normalized column e replaces u, where = 1:
+ //
+ // proje a = / e = e
+ for j := 0; j < i; j++ {
+ d := dot(Q.col(j), Q.col(i))
+ for k := 0; k < Q.cols; k++ {
+ Q.set(i, k, Q.get(i, k)-d*Q.get(j, k))
+ }
+ }
+ // Normalize Q columns.
+ n := norm(Q.col(i))
+ if n < 0.000001 {
+ // Degenerate data, no solution.
+ return nil, nil, false
+ }
+ invNorm := 1 / n
+ for j := 0; j < Q.cols; j++ {
+ Q.set(i, j, Q.get(i, j)*invNorm)
+ }
+ // Update Rt.
+ for j := i; j < Rt.cols; j++ {
+ Rt.set(i, j, dot(Q.col(i), A.col(j)))
+ }
+ }
+ return Q, Rt, true
+}
+
+func norm(V []float32) float32 {
+ var n float32
+ for _, v := range V {
+ n += v * v
+ }
+ return float32(math.Sqrt(float64(n)))
+}
+
+func dot(V1, V2 []float32) float32 {
+ var d float32
+ for i, v1 := range V1 {
+ d += v1 * V2[i]
+ }
+ return d
+}
+
+func newMatrix(rows, cols int) *matrix {
+ return &matrix{
+ rows: rows,
+ cols: cols,
+ data: make([]float32, rows*cols),
+ }
+}
+
+func (m *matrix) set(row, col int, v float32) {
+ if row < 0 || row >= m.rows {
+ panic("row out of range")
+ }
+ if col < 0 || col >= m.cols {
+ panic("col out of range")
+ }
+ m.data[row*m.cols+col] = v
+}
+
+func (m *matrix) get(row, col int) float32 {
+ if row < 0 || row >= m.rows {
+ panic("row out of range")
+ }
+ if col < 0 || col >= m.cols {
+ panic("col out of range")
+ }
+ return m.data[row*m.cols+col]
+}
+
+func (m *matrix) col(c int) []float32 {
+ return m.data[c*m.cols : (c+1)*m.cols]
+}
+
+func (m *matrix) approxEqual(m2 *matrix) bool {
+ if m.rows != m2.rows || m.cols != m2.cols {
+ return false
+ }
+ const epsilon = 0.00001
+ for row := 0; row < m.rows; row++ {
+ for col := 0; col < m.cols; col++ {
+ d := m2.get(row, col) - m.get(row, col)
+ if d < -epsilon || d > epsilon {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func (m *matrix) transpose() *matrix {
+ t := &matrix{
+ rows: m.cols,
+ cols: m.rows,
+ data: make([]float32, len(m.data)),
+ }
+ for i := 0; i < m.rows; i++ {
+ for j := 0; j < m.cols; j++ {
+ t.set(j, i, m.get(i, j))
+ }
+ }
+ return t
+}
+
+func (m *matrix) mul(m2 *matrix) *matrix {
+ if m.rows != m2.cols {
+ panic("mismatched matrices")
+ }
+ mm := &matrix{
+ rows: m.rows,
+ cols: m2.cols,
+ data: make([]float32, m.rows*m2.cols),
+ }
+ for i := 0; i < mm.rows; i++ {
+ for j := 0; j < mm.cols; j++ {
+ var v float32
+ for k := 0; k < m.rows; k++ {
+ v += m.get(k, j) * m2.get(i, k)
+ }
+ mm.set(i, j, v)
+ }
+ }
+ return mm
+}
+
+func (m *matrix) String() string {
+ var b strings.Builder
+ for i := 0; i < m.rows; i++ {
+ for j := 0; j < m.cols; j++ {
+ v := m.get(i, j)
+ b.WriteString(strconv.FormatFloat(float64(v), 'g', -1, 32))
+ b.WriteString(", ")
+ }
+ b.WriteString("\n")
+ }
+ return b.String()
+}
+
+func (c coefficients) approxEqual(c2 coefficients) bool {
+ const epsilon = 0.00001
+ for i, v := range c {
+ d := v - c2[i]
+ if d < -epsilon || d > epsilon {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/gioui.org/internal/gl/gl.go b/vendor/gioui.org/internal/gl/gl.go
new file mode 100644
index 0000000..a9e378a
--- /dev/null
+++ b/vendor/gioui.org/internal/gl/gl.go
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package gl
+
+type (
+ Attrib uint
+ Enum uint
+)
+
+const (
+ ACTIVE_TEXTURE = 0x84E0
+ ALL_BARRIER_BITS = 0xffffffff
+ ARRAY_BUFFER = 0x8892
+ ARRAY_BUFFER_BINDING = 0x8894
+ BACK = 0x0405
+ BLEND = 0xbe2
+ BLEND_DST_RGB = 0x80C8
+ BLEND_SRC_RGB = 0x80C9
+ BLEND_DST_ALPHA = 0x80CA
+ BLEND_SRC_ALPHA = 0x80CB
+ CLAMP_TO_EDGE = 0x812f
+ COLOR_ATTACHMENT0 = 0x8ce0
+ COLOR_BUFFER_BIT = 0x4000
+ COLOR_CLEAR_VALUE = 0x0C22
+ COMPILE_STATUS = 0x8b81
+ COMPUTE_SHADER = 0x91B9
+ CURRENT_PROGRAM = 0x8B8D
+ DEPTH_ATTACHMENT = 0x8d00
+ DEPTH_BUFFER_BIT = 0x100
+ DEPTH_CLEAR_VALUE = 0x0B73
+ DEPTH_COMPONENT16 = 0x81a5
+ DEPTH_COMPONENT24 = 0x81A6
+ DEPTH_COMPONENT32F = 0x8CAC
+ DEPTH_FUNC = 0x0B74
+ DEPTH_TEST = 0xb71
+ DEPTH_WRITEMASK = 0x0B72
+ DRAW_FRAMEBUFFER = 0x8CA9
+ DST_COLOR = 0x306
+ DYNAMIC_DRAW = 0x88E8
+ DYNAMIC_READ = 0x88E9
+ ELEMENT_ARRAY_BUFFER = 0x8893
+ ELEMENT_ARRAY_BUFFER_BINDING = 0x8895
+ EXTENSIONS = 0x1f03
+ FALSE = 0
+ FLOAT = 0x1406
+ FRAGMENT_SHADER = 0x8b30
+ FRAMEBUFFER = 0x8d40
+ FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING = 0x8210
+ FRAMEBUFFER_BINDING = 0x8ca6
+ FRAMEBUFFER_COMPLETE = 0x8cd5
+ FRAMEBUFFER_SRGB = 0x8db9
+ HALF_FLOAT = 0x140b
+ HALF_FLOAT_OES = 0x8d61
+ INFO_LOG_LENGTH = 0x8B84
+ INVALID_INDEX = ^uint(0)
+ GREATER = 0x204
+ GEQUAL = 0x206
+ LINEAR = 0x2601
+ LINK_STATUS = 0x8b82
+ LUMINANCE = 0x1909
+ MAP_READ_BIT = 0x0001
+ MAX_TEXTURE_SIZE = 0xd33
+ NEAREST = 0x2600
+ NO_ERROR = 0x0
+ NUM_EXTENSIONS = 0x821D
+ ONE = 0x1
+ ONE_MINUS_SRC_ALPHA = 0x303
+ PACK_ROW_LENGTH = 0x0D02
+ PROGRAM_BINARY_LENGTH = 0x8741
+ QUERY_RESULT = 0x8866
+ QUERY_RESULT_AVAILABLE = 0x8867
+ R16F = 0x822d
+ R8 = 0x8229
+ READ_FRAMEBUFFER = 0x8ca8
+ READ_FRAMEBUFFER_BINDING = 0x8CAA
+ READ_ONLY = 0x88B8
+ READ_WRITE = 0x88BA
+ RED = 0x1903
+ RENDERER = 0x1F01
+ RENDERBUFFER = 0x8d41
+ RENDERBUFFER_BINDING = 0x8ca7
+ RENDERBUFFER_HEIGHT = 0x8d43
+ RENDERBUFFER_WIDTH = 0x8d42
+ RGB = 0x1907
+ RGBA = 0x1908
+ RGBA8 = 0x8058
+ SHADER_STORAGE_BUFFER = 0x90D2
+ SHADER_STORAGE_BUFFER_BINDING = 0x90D3
+ SHORT = 0x1402
+ SRGB = 0x8c40
+ SRGB_ALPHA_EXT = 0x8c42
+ SRGB8 = 0x8c41
+ SRGB8_ALPHA8 = 0x8c43
+ STATIC_DRAW = 0x88e4
+ STENCIL_BUFFER_BIT = 0x00000400
+ TEXTURE_2D = 0xde1
+ TEXTURE_BINDING_2D = 0x8069
+ TEXTURE_MAG_FILTER = 0x2800
+ TEXTURE_MIN_FILTER = 0x2801
+ TEXTURE_WRAP_S = 0x2802
+ TEXTURE_WRAP_T = 0x2803
+ TEXTURE0 = 0x84c0
+ TEXTURE1 = 0x84c1
+ TRIANGLE_STRIP = 0x5
+ TRIANGLES = 0x4
+ TRUE = 1
+ UNIFORM_BUFFER = 0x8A11
+ UNIFORM_BUFFER_BINDING = 0x8A28
+ UNPACK_ALIGNMENT = 0xcf5
+ UNPACK_ROW_LENGTH = 0x0CF2
+ UNSIGNED_BYTE = 0x1401
+ UNSIGNED_SHORT = 0x1403
+ VIEWPORT = 0x0BA2
+ VERSION = 0x1f02
+ VERTEX_ARRAY_BINDING = 0x85B5
+ VERTEX_SHADER = 0x8b31
+ VERTEX_ATTRIB_ARRAY_BUFFER_BINDING = 0x889F
+ VERTEX_ATTRIB_ARRAY_ENABLED = 0x8622
+ VERTEX_ATTRIB_ARRAY_POINTER = 0x8645
+ VERTEX_ATTRIB_ARRAY_NORMALIZED = 0x886A
+ VERTEX_ATTRIB_ARRAY_SIZE = 0x8623
+ VERTEX_ATTRIB_ARRAY_STRIDE = 0x8624
+ VERTEX_ATTRIB_ARRAY_TYPE = 0x8625
+ WRITE_ONLY = 0x88B9
+ ZERO = 0x0
+
+ // EXT_disjoint_timer_query
+ TIME_ELAPSED_EXT = 0x88BF
+ GPU_DISJOINT_EXT = 0x8FBB
+)
diff --git a/vendor/gioui.org/internal/gl/gl_js.go b/vendor/gioui.org/internal/gl/gl_js.go
new file mode 100644
index 0000000..2c7976e
--- /dev/null
+++ b/vendor/gioui.org/internal/gl/gl_js.go
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package gl
+
+import (
+ "errors"
+ "strings"
+ "syscall/js"
+)
+
+type Functions struct {
+ Ctx js.Value
+ EXT_disjoint_timer_query js.Value
+ EXT_disjoint_timer_query_webgl2 js.Value
+
+ // Cached reference to the Uint8Array JS type.
+ uint8Array js.Value
+
+ // Cached JS arrays.
+ arrayBuf js.Value
+ int32Buf js.Value
+
+ isWebGL2 bool
+}
+
+type Context js.Value
+
+func NewFunctions(ctx Context, forceES bool) (*Functions, error) {
+ f := &Functions{
+ Ctx: js.Value(ctx),
+ uint8Array: js.Global().Get("Uint8Array"),
+ }
+ if err := f.Init(); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+func (f *Functions) Init() error {
+ webgl2Class := js.Global().Get("WebGL2RenderingContext")
+ f.isWebGL2 = !webgl2Class.IsUndefined() && f.Ctx.InstanceOf(webgl2Class)
+ if !f.isWebGL2 {
+ f.EXT_disjoint_timer_query = f.getExtension("EXT_disjoint_timer_query")
+ if f.getExtension("OES_texture_half_float").IsNull() && f.getExtension("OES_texture_float").IsNull() {
+ return errors.New("gl: no support for neither OES_texture_half_float nor OES_texture_float")
+ }
+ if f.getExtension("EXT_sRGB").IsNull() {
+ return errors.New("gl: EXT_sRGB not supported")
+ }
+ } else {
+ // WebGL2 extensions.
+ f.EXT_disjoint_timer_query_webgl2 = f.getExtension("EXT_disjoint_timer_query_webgl2")
+ if f.getExtension("EXT_color_buffer_half_float").IsNull() && f.getExtension("EXT_color_buffer_float").IsNull() {
+ return errors.New("gl: no support for neither EXT_color_buffer_half_float nor EXT_color_buffer_float")
+ }
+ }
+ return nil
+}
+
+func (f *Functions) getExtension(name string) js.Value {
+ return f.Ctx.Call("getExtension", name)
+}
+
+func (f *Functions) ActiveTexture(t Enum) {
+ f.Ctx.Call("activeTexture", int(t))
+}
+func (f *Functions) AttachShader(p Program, s Shader) {
+ f.Ctx.Call("attachShader", js.Value(p), js.Value(s))
+}
+func (f *Functions) BeginQuery(target Enum, query Query) {
+ if !f.EXT_disjoint_timer_query_webgl2.IsNull() {
+ f.Ctx.Call("beginQuery", int(target), js.Value(query))
+ } else {
+ f.EXT_disjoint_timer_query.Call("beginQueryEXT", int(target), js.Value(query))
+ }
+}
+func (f *Functions) BindAttribLocation(p Program, a Attrib, name string) {
+ f.Ctx.Call("bindAttribLocation", js.Value(p), int(a), name)
+}
+func (f *Functions) BindBuffer(target Enum, b Buffer) {
+ f.Ctx.Call("bindBuffer", int(target), js.Value(b))
+}
+func (f *Functions) BindBufferBase(target Enum, index int, b Buffer) {
+ f.Ctx.Call("bindBufferBase", int(target), index, js.Value(b))
+}
+func (f *Functions) BindFramebuffer(target Enum, fb Framebuffer) {
+ f.Ctx.Call("bindFramebuffer", int(target), js.Value(fb))
+}
+func (f *Functions) BindRenderbuffer(target Enum, rb Renderbuffer) {
+ f.Ctx.Call("bindRenderbuffer", int(target), js.Value(rb))
+}
+func (f *Functions) BindTexture(target Enum, t Texture) {
+ f.Ctx.Call("bindTexture", int(target), js.Value(t))
+}
+func (f *Functions) BindImageTexture(unit int, t Texture, level int, layered bool, layer int, access, format Enum) {
+ panic("not implemented")
+}
+func (f *Functions) BindVertexArray(a VertexArray) {
+ panic("not supported")
+}
+func (f *Functions) BlendEquation(mode Enum) {
+ f.Ctx.Call("blendEquation", int(mode))
+}
+func (f *Functions) BlendFuncSeparate(srcRGB, dstRGB, srcA, dstA Enum) {
+ f.Ctx.Call("blendFunc", int(srcRGB), int(dstRGB), int(srcA), int(dstA))
+}
+func (f *Functions) BufferData(target Enum, size int, usage Enum, data []byte) {
+ if data == nil {
+ f.Ctx.Call("bufferData", int(target), size, int(usage))
+ } else {
+ if len(data) != size {
+ panic("size mismatch")
+ }
+ f.Ctx.Call("bufferData", int(target), f.byteArrayOf(data), int(usage))
+ }
+}
+func (f *Functions) BufferSubData(target Enum, offset int, src []byte) {
+ f.Ctx.Call("bufferSubData", int(target), offset, f.byteArrayOf(src))
+}
+func (f *Functions) CheckFramebufferStatus(target Enum) Enum {
+ return Enum(f.Ctx.Call("checkFramebufferStatus", int(target)).Int())
+}
+func (f *Functions) Clear(mask Enum) {
+ f.Ctx.Call("clear", int(mask))
+}
+func (f *Functions) ClearColor(red, green, blue, alpha float32) {
+ f.Ctx.Call("clearColor", red, green, blue, alpha)
+}
+func (f *Functions) ClearDepthf(d float32) {
+ f.Ctx.Call("clearDepth", d)
+}
+func (f *Functions) CompileShader(s Shader) {
+ f.Ctx.Call("compileShader", js.Value(s))
+}
+func (f *Functions) CopyTexSubImage2D(target Enum, level, xoffset, yoffset, x, y, width, height int) {
+ f.Ctx.Call("copyTexSubImage2D", int(target), level, xoffset, yoffset, x, y, width, height)
+}
+func (f *Functions) CreateBuffer() Buffer {
+ return Buffer(f.Ctx.Call("createBuffer"))
+}
+func (f *Functions) CreateFramebuffer() Framebuffer {
+ return Framebuffer(f.Ctx.Call("createFramebuffer"))
+}
+func (f *Functions) CreateProgram() Program {
+ return Program(f.Ctx.Call("createProgram"))
+}
+func (f *Functions) CreateQuery() Query {
+ return Query(f.Ctx.Call("createQuery"))
+}
+func (f *Functions) CreateRenderbuffer() Renderbuffer {
+ return Renderbuffer(f.Ctx.Call("createRenderbuffer"))
+}
+func (f *Functions) CreateShader(ty Enum) Shader {
+ return Shader(f.Ctx.Call("createShader", int(ty)))
+}
+func (f *Functions) CreateTexture() Texture {
+ return Texture(f.Ctx.Call("createTexture"))
+}
+func (f *Functions) CreateVertexArray() VertexArray {
+ panic("not supported")
+}
+func (f *Functions) DeleteBuffer(v Buffer) {
+ f.Ctx.Call("deleteBuffer", js.Value(v))
+}
+func (f *Functions) DeleteFramebuffer(v Framebuffer) {
+ f.Ctx.Call("deleteFramebuffer", js.Value(v))
+}
+func (f *Functions) DeleteProgram(p Program) {
+ f.Ctx.Call("deleteProgram", js.Value(p))
+}
+func (f *Functions) DeleteQuery(query Query) {
+ if !f.EXT_disjoint_timer_query_webgl2.IsNull() {
+ f.Ctx.Call("deleteQuery", js.Value(query))
+ } else {
+ f.EXT_disjoint_timer_query.Call("deleteQueryEXT", js.Value(query))
+ }
+}
+func (f *Functions) DeleteShader(s Shader) {
+ f.Ctx.Call("deleteShader", js.Value(s))
+}
+func (f *Functions) DeleteRenderbuffer(v Renderbuffer) {
+ f.Ctx.Call("deleteRenderbuffer", js.Value(v))
+}
+func (f *Functions) DeleteTexture(v Texture) {
+ f.Ctx.Call("deleteTexture", js.Value(v))
+}
+func (f *Functions) DeleteVertexArray(a VertexArray) {
+ panic("not implemented")
+}
+func (f *Functions) DepthFunc(fn Enum) {
+ f.Ctx.Call("depthFunc", int(fn))
+}
+func (f *Functions) DepthMask(mask bool) {
+ f.Ctx.Call("depthMask", mask)
+}
+func (f *Functions) DisableVertexAttribArray(a Attrib) {
+ f.Ctx.Call("disableVertexAttribArray", int(a))
+}
+func (f *Functions) Disable(cap Enum) {
+ f.Ctx.Call("disable", int(cap))
+}
+func (f *Functions) DrawArrays(mode Enum, first, count int) {
+ f.Ctx.Call("drawArrays", int(mode), first, count)
+}
+func (f *Functions) DrawElements(mode Enum, count int, ty Enum, offset int) {
+ f.Ctx.Call("drawElements", int(mode), count, int(ty), offset)
+}
+func (f *Functions) DispatchCompute(x, y, z int) {
+ panic("not implemented")
+}
+func (f *Functions) Enable(cap Enum) {
+ f.Ctx.Call("enable", int(cap))
+}
+func (f *Functions) EnableVertexAttribArray(a Attrib) {
+ f.Ctx.Call("enableVertexAttribArray", int(a))
+}
+func (f *Functions) EndQuery(target Enum) {
+ if !f.EXT_disjoint_timer_query_webgl2.IsNull() {
+ f.Ctx.Call("endQuery", int(target))
+ } else {
+ f.EXT_disjoint_timer_query.Call("endQueryEXT", int(target))
+ }
+}
+func (f *Functions) Finish() {
+ f.Ctx.Call("finish")
+}
+func (f *Functions) Flush() {
+ f.Ctx.Call("flush")
+}
+func (f *Functions) FramebufferRenderbuffer(target, attachment, renderbuffertarget Enum, renderbuffer Renderbuffer) {
+ f.Ctx.Call("framebufferRenderbuffer", int(target), int(attachment), int(renderbuffertarget), js.Value(renderbuffer))
+}
+func (f *Functions) FramebufferTexture2D(target, attachment, texTarget Enum, t Texture, level int) {
+ f.Ctx.Call("framebufferTexture2D", int(target), int(attachment), int(texTarget), js.Value(t), level)
+}
+func (f *Functions) GetError() Enum {
+ // Avoid slow getError calls. See gio#179.
+ return 0
+}
+func (f *Functions) GetRenderbufferParameteri(target, pname Enum) int {
+ return paramVal(f.Ctx.Call("getRenderbufferParameteri", int(pname)))
+}
+func (f *Functions) GetFramebufferAttachmentParameteri(target, attachment, pname Enum) int {
+ if !f.isWebGL2 && pname == FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING {
+ // FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING is only available on WebGL 2
+ return LINEAR
+ }
+ return paramVal(f.Ctx.Call("getFramebufferAttachmentParameter", int(target), int(attachment), int(pname)))
+}
+func (f *Functions) GetBinding(pname Enum) Object {
+ obj := f.Ctx.Call("getParameter", int(pname))
+ if !obj.Truthy() {
+ return Object{}
+ }
+ return Object(obj)
+}
+func (f *Functions) GetBindingi(pname Enum, idx int) Object {
+ obj := f.Ctx.Call("getIndexedParameter", int(pname), idx)
+ if !obj.Truthy() {
+ return Object{}
+ }
+ return Object(obj)
+}
+func (f *Functions) GetInteger(pname Enum) int {
+ if !f.isWebGL2 {
+ switch pname {
+ case PACK_ROW_LENGTH, UNPACK_ROW_LENGTH:
+ return 0 // PACK_ROW_LENGTH and UNPACK_ROW_LENGTH is only available on WebGL 2
+ }
+ }
+ return paramVal(f.Ctx.Call("getParameter", int(pname)))
+}
+func (f *Functions) GetFloat(pname Enum) float32 {
+ return float32(f.Ctx.Call("getParameter", int(pname)).Float())
+}
+func (f *Functions) GetInteger4(pname Enum) [4]int {
+ arr := f.Ctx.Call("getParameter", int(pname))
+ var res [4]int
+ for i := range res {
+ res[i] = arr.Index(i).Int()
+ }
+ return res
+}
+func (f *Functions) GetFloat4(pname Enum) [4]float32 {
+ arr := f.Ctx.Call("getParameter", int(pname))
+ var res [4]float32
+ for i := range res {
+ res[i] = float32(arr.Index(i).Float())
+ }
+ return res
+}
+func (f *Functions) GetProgrami(p Program, pname Enum) int {
+ return paramVal(f.Ctx.Call("getProgramParameter", js.Value(p), int(pname)))
+}
+func (f *Functions) GetProgramInfoLog(p Program) string {
+ return f.Ctx.Call("getProgramInfoLog", js.Value(p)).String()
+}
+func (f *Functions) GetQueryObjectuiv(query Query, pname Enum) uint {
+ if !f.EXT_disjoint_timer_query_webgl2.IsNull() {
+ return uint(paramVal(f.Ctx.Call("getQueryParameter", js.Value(query), int(pname))))
+ } else {
+ return uint(paramVal(f.EXT_disjoint_timer_query.Call("getQueryObjectEXT", js.Value(query), int(pname))))
+ }
+}
+func (f *Functions) GetShaderi(s Shader, pname Enum) int {
+ return paramVal(f.Ctx.Call("getShaderParameter", js.Value(s), int(pname)))
+}
+func (f *Functions) GetShaderInfoLog(s Shader) string {
+ return f.Ctx.Call("getShaderInfoLog", js.Value(s)).String()
+}
+func (f *Functions) GetString(pname Enum) string {
+ switch pname {
+ case EXTENSIONS:
+ extsjs := f.Ctx.Call("getSupportedExtensions")
+ var exts []string
+ for i := 0; i < extsjs.Length(); i++ {
+ exts = append(exts, "GL_"+extsjs.Index(i).String())
+ }
+ return strings.Join(exts, " ")
+ default:
+ return f.Ctx.Call("getParameter", int(pname)).String()
+ }
+}
+func (f *Functions) GetUniformBlockIndex(p Program, name string) uint {
+ return uint(paramVal(f.Ctx.Call("getUniformBlockIndex", js.Value(p), name)))
+}
+func (f *Functions) GetUniformLocation(p Program, name string) Uniform {
+ return Uniform(f.Ctx.Call("getUniformLocation", js.Value(p), name))
+}
+func (f *Functions) GetVertexAttrib(index int, pname Enum) int {
+ return paramVal(f.Ctx.Call("getVertexAttrib", index, int(pname)))
+}
+func (f *Functions) GetVertexAttribBinding(index int, pname Enum) Object {
+ obj := f.Ctx.Call("getVertexAttrib", index, int(pname))
+ if !obj.Truthy() {
+ return Object{}
+ }
+ return Object(obj)
+}
+func (f *Functions) GetVertexAttribPointer(index int, pname Enum) uintptr {
+ return uintptr(f.Ctx.Call("getVertexAttribOffset", index, int(pname)).Int())
+}
+func (f *Functions) InvalidateFramebuffer(target, attachment Enum) {
+ fn := f.Ctx.Get("invalidateFramebuffer")
+ if !fn.IsUndefined() {
+ if f.int32Buf.IsUndefined() {
+ f.int32Buf = js.Global().Get("Int32Array").New(1)
+ }
+ f.int32Buf.SetIndex(0, int32(attachment))
+ f.Ctx.Call("invalidateFramebuffer", int(target), f.int32Buf)
+ }
+}
+func (f *Functions) IsEnabled(cap Enum) bool {
+ return f.Ctx.Call("isEnabled", int(cap)).Truthy()
+}
+func (f *Functions) LinkProgram(p Program) {
+ f.Ctx.Call("linkProgram", js.Value(p))
+}
+func (f *Functions) PixelStorei(pname Enum, param int) {
+ f.Ctx.Call("pixelStorei", int(pname), param)
+}
+func (f *Functions) MemoryBarrier(barriers Enum) {
+ panic("not implemented")
+}
+func (f *Functions) MapBufferRange(target Enum, offset, length int, access Enum) []byte {
+ panic("not implemented")
+}
+func (f *Functions) RenderbufferStorage(target, internalformat Enum, width, height int) {
+ f.Ctx.Call("renderbufferStorage", int(target), int(internalformat), width, height)
+}
+func (f *Functions) ReadPixels(x, y, width, height int, format, ty Enum, data []byte) {
+ ba := f.byteArrayOf(data)
+ f.Ctx.Call("readPixels", x, y, width, height, int(format), int(ty), ba)
+ js.CopyBytesToGo(data, ba)
+}
+func (f *Functions) Scissor(x, y, width, height int32) {
+ f.Ctx.Call("scissor", x, y, width, height)
+}
+func (f *Functions) ShaderSource(s Shader, src string) {
+ f.Ctx.Call("shaderSource", js.Value(s), src)
+}
+func (f *Functions) TexImage2D(target Enum, level int, internalFormat Enum, width, height int, format, ty Enum) {
+ f.Ctx.Call("texImage2D", int(target), int(level), int(internalFormat), int(width), int(height), 0, int(format), int(ty), nil)
+}
+func (f *Functions) TexStorage2D(target Enum, levels int, internalFormat Enum, width, height int) {
+ f.Ctx.Call("texStorage2D", int(target), levels, int(internalFormat), width, height)
+}
+func (f *Functions) TexSubImage2D(target Enum, level int, x, y, width, height int, format, ty Enum, data []byte) {
+ f.Ctx.Call("texSubImage2D", int(target), level, x, y, width, height, int(format), int(ty), f.byteArrayOf(data))
+}
+func (f *Functions) TexParameteri(target, pname Enum, param int) {
+ f.Ctx.Call("texParameteri", int(target), int(pname), int(param))
+}
+func (f *Functions) UniformBlockBinding(p Program, uniformBlockIndex uint, uniformBlockBinding uint) {
+ f.Ctx.Call("uniformBlockBinding", js.Value(p), int(uniformBlockIndex), int(uniformBlockBinding))
+}
+func (f *Functions) Uniform1f(dst Uniform, v float32) {
+ f.Ctx.Call("uniform1f", js.Value(dst), v)
+}
+func (f *Functions) Uniform1i(dst Uniform, v int) {
+ f.Ctx.Call("uniform1i", js.Value(dst), v)
+}
+func (f *Functions) Uniform2f(dst Uniform, v0, v1 float32) {
+ f.Ctx.Call("uniform2f", js.Value(dst), v0, v1)
+}
+func (f *Functions) Uniform3f(dst Uniform, v0, v1, v2 float32) {
+ f.Ctx.Call("uniform3f", js.Value(dst), v0, v1, v2)
+}
+func (f *Functions) Uniform4f(dst Uniform, v0, v1, v2, v3 float32) {
+ f.Ctx.Call("uniform4f", js.Value(dst), v0, v1, v2, v3)
+}
+func (f *Functions) UseProgram(p Program) {
+ f.Ctx.Call("useProgram", js.Value(p))
+}
+func (f *Functions) UnmapBuffer(target Enum) bool {
+ panic("not implemented")
+}
+func (f *Functions) VertexAttribPointer(dst Attrib, size int, ty Enum, normalized bool, stride, offset int) {
+ f.Ctx.Call("vertexAttribPointer", int(dst), size, int(ty), normalized, stride, offset)
+}
+func (f *Functions) Viewport(x, y, width, height int) {
+ f.Ctx.Call("viewport", x, y, width, height)
+}
+
+func (f *Functions) byteArrayOf(data []byte) js.Value {
+ if len(data) == 0 {
+ return js.Null()
+ }
+ f.resizeByteBuffer(len(data))
+ ba := f.uint8Array.New(f.arrayBuf, int(0), int(len(data)))
+ js.CopyBytesToJS(ba, data)
+ return ba
+}
+
+func (f *Functions) resizeByteBuffer(n int) {
+ if n == 0 {
+ return
+ }
+ if !f.arrayBuf.IsUndefined() && f.arrayBuf.Length() >= n {
+ return
+ }
+ f.arrayBuf = js.Global().Get("ArrayBuffer").New(n)
+}
+
+func paramVal(v js.Value) int {
+ switch v.Type() {
+ case js.TypeBoolean:
+ if b := v.Bool(); b {
+ return 1
+ } else {
+ return 0
+ }
+ case js.TypeNumber:
+ return v.Int()
+ default:
+ panic("unknown parameter type")
+ }
+}
diff --git a/vendor/gioui.org/internal/gl/gl_unix.go b/vendor/gioui.org/internal/gl/gl_unix.go
new file mode 100644
index 0000000..f223ced
--- /dev/null
+++ b/vendor/gioui.org/internal/gl/gl_unix.go
@@ -0,0 +1,1222 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+//go:build darwin || linux || freebsd || openbsd
+// +build darwin linux freebsd openbsd
+
+package gl
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+ "unsafe"
+)
+
+/*
+#cgo CFLAGS: -Werror
+#cgo linux freebsd LDFLAGS: -ldl
+
+#include
+#include
+#include
+#define __USE_GNU
+#include
+
+typedef unsigned int GLenum;
+typedef unsigned int GLuint;
+typedef char GLchar;
+typedef float GLfloat;
+typedef ssize_t GLsizeiptr;
+typedef intptr_t GLintptr;
+typedef unsigned int GLbitfield;
+typedef int GLint;
+typedef unsigned char GLboolean;
+typedef int GLsizei;
+typedef uint8_t GLubyte;
+
+typedef struct {
+ void (*glActiveTexture)(GLenum texture);
+ void (*glAttachShader)(GLuint program, GLuint shader);
+ void (*glBindAttribLocation)(GLuint program, GLuint index, const GLchar *name);
+ void (*glBindBuffer)(GLenum target, GLuint buffer);
+ void (*glBindFramebuffer)(GLenum target, GLuint framebuffer);
+ void (*glBindRenderbuffer)(GLenum target, GLuint renderbuffer);
+ void (*glBindTexture)(GLenum target, GLuint texture);
+ void (*glBlendEquation)(GLenum mode);
+ void (*glBlendFuncSeparate)(GLenum srcRGB, GLenum dstRGB, GLenum srcA, GLenum dstA);
+ void (*glBufferData)(GLenum target, GLsizeiptr size, const void *data, GLenum usage);
+ void (*glBufferSubData)(GLenum target, GLintptr offset, GLsizeiptr size, const void *data);
+ GLenum (*glCheckFramebufferStatus)(GLenum target);
+ void (*glClear)(GLbitfield mask);
+ void (*glClearColor)(GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
+ void (*glClearDepthf)(GLfloat d);
+ void (*glCompileShader)(GLuint shader);
+ void (*glCopyTexSubImage2D)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+ GLuint (*glCreateProgram)(void);
+ GLuint (*glCreateShader)(GLenum type);
+ void (*glDeleteBuffers)(GLsizei n, const GLuint *buffers);
+ void (*glDeleteFramebuffers)(GLsizei n, const GLuint *framebuffers);
+ void (*glDeleteProgram)(GLuint program);
+ void (*glDeleteRenderbuffers)(GLsizei n, const GLuint *renderbuffers);
+ void (*glDeleteShader)(GLuint shader);
+ void (*glDeleteTextures)(GLsizei n, const GLuint *textures);
+ void (*glDepthFunc)(GLenum func);
+ void (*glDepthMask)(GLboolean flag);
+ void (*glDisable)(GLenum cap);
+ void (*glDisableVertexAttribArray)(GLuint index);
+ void (*glDrawArrays)(GLenum mode, GLint first, GLsizei count);
+ void (*glDrawElements)(GLenum mode, GLsizei count, GLenum type, const void *indices);
+ void (*glEnable)(GLenum cap);
+ void (*glEnableVertexAttribArray)(GLuint index);
+ void (*glFinish)(void);
+ void (*glFlush)(void);
+ void (*glFramebufferRenderbuffer)(GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
+ void (*glFramebufferTexture2D)(GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+ void (*glGenBuffers)(GLsizei n, GLuint *buffers);
+ void (*glGenFramebuffers)(GLsizei n, GLuint *framebuffers);
+ void (*glGenRenderbuffers)(GLsizei n, GLuint *renderbuffers);
+ void (*glGenTextures)(GLsizei n, GLuint *textures);
+ GLenum (*glGetError)(void);
+ void (*glGetFramebufferAttachmentParameteriv)(GLenum target, GLenum attachment, GLenum pname, GLint *params);
+ void (*glGetFloatv)(GLenum pname, GLfloat *data);
+ void (*glGetIntegerv)(GLenum pname, GLint *data);
+ void (*glGetIntegeri_v)(GLenum pname, GLuint idx, GLint *data);
+ void (*glGetProgramiv)(GLuint program, GLenum pname, GLint *params);
+ void (*glGetProgramInfoLog)(GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
+ void (*glGetRenderbufferParameteriv)(GLenum target, GLenum pname, GLint *params);
+ void (*glGetShaderiv)(GLuint shader, GLenum pname, GLint *params);
+ void (*glGetShaderInfoLog)(GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
+ const GLubyte *(*glGetString)(GLenum name);
+ GLint (*glGetUniformLocation)(GLuint program, const GLchar *name);
+ void (*glGetVertexAttribiv)(GLuint index, GLenum pname, GLint *params);
+ void (*glGetVertexAttribPointerv)(GLuint index, GLenum pname, void **params);
+ GLboolean (*glIsEnabled)(GLenum cap);
+ void (*glLinkProgram)(GLuint program);
+ void (*glPixelStorei)(GLenum pname, GLint param);
+ void (*glReadPixels)(GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels);
+ void (*glRenderbufferStorage)(GLenum target, GLenum internalformat, GLsizei width, GLsizei height);
+ void (*glScissor)(GLint x, GLint y, GLsizei width, GLsizei height);
+ void (*glShaderSource)(GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length);
+ void (*glTexImage2D)(GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels);
+ void (*glTexParameteri)(GLenum target, GLenum pname, GLint param);
+ void (*glTexSubImage2D)(GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels);
+ void (*glUniform1f)(GLint location, GLfloat v0);
+ void (*glUniform1i)(GLint location, GLint v0);
+ void (*glUniform2f)(GLint location, GLfloat v0, GLfloat v1);
+ void (*glUniform3f)(GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
+ void (*glUniform4f)(GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
+ void (*glUseProgram)(GLuint program);
+ void (*glVertexAttribPointer)(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer);
+ void (*glViewport)(GLint x, GLint y, GLsizei width, GLsizei height);
+
+ void (*glBindVertexArray)(GLuint array);
+ void (*glBindBufferBase)(GLenum target, GLuint index, GLuint buffer);
+ GLuint (*glGetUniformBlockIndex)(GLuint program, const GLchar *uniformBlockName);
+ void (*glUniformBlockBinding)(GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding);
+ void (*glInvalidateFramebuffer)(GLenum target, GLsizei numAttachments, const GLenum *attachments);
+ void (*glBeginQuery)(GLenum target, GLuint id);
+ void (*glDeleteQueries)(GLsizei n, const GLuint *ids);
+ void (*glDeleteVertexArrays)(GLsizei n, const GLuint *ids);
+ void (*glEndQuery)(GLenum target);
+ void (*glGenQueries)(GLsizei n, GLuint *ids);
+ void (*glGenVertexArrays)(GLsizei n, GLuint *ids);
+ void (*glGetProgramBinary)(GLuint program, GLsizei bufsize, GLsizei *length, GLenum *binaryFormat, void *binary);
+ void (*glGetQueryObjectuiv)(GLuint id, GLenum pname, GLuint *params);
+ const GLubyte* (*glGetStringi)(GLenum name, GLuint index);
+ void (*glDispatchCompute)(GLuint x, GLuint y, GLuint z);
+ void (*glMemoryBarrier)(GLbitfield barriers);
+ void* (*glMapBufferRange)(GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access);
+ GLboolean (*glUnmapBuffer)(GLenum target);
+ void (*glBindImageTexture)(GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format);
+ void (*glTexStorage2D)(GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
+ void (*glBlitFramebuffer)(GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
+} glFunctions;
+
+static void glActiveTexture(glFunctions *f, GLenum texture) {
+ f->glActiveTexture(texture);
+}
+
+static void glAttachShader(glFunctions *f, GLuint program, GLuint shader) {
+ f->glAttachShader(program, shader);
+}
+
+static void glBindAttribLocation(glFunctions *f, GLuint program, GLuint index, const GLchar *name) {
+ f->glBindAttribLocation(program, index, name);
+}
+
+static void glBindBuffer(glFunctions *f, GLenum target, GLuint buffer) {
+ f->glBindBuffer(target, buffer);
+}
+
+static void glBindFramebuffer(glFunctions *f, GLenum target, GLuint framebuffer) {
+ f->glBindFramebuffer(target, framebuffer);
+}
+
+static void glBindRenderbuffer(glFunctions *f, GLenum target, GLuint renderbuffer) {
+ f->glBindRenderbuffer(target, renderbuffer);
+}
+
+static void glBindTexture(glFunctions *f, GLenum target, GLuint texture) {
+ f->glBindTexture(target, texture);
+}
+
+static void glBindVertexArray(glFunctions *f, GLuint array) {
+ f->glBindVertexArray(array);
+}
+
+static void glBlendEquation(glFunctions *f, GLenum mode) {
+ f->glBlendEquation(mode);
+}
+
+static void glBlendFuncSeparate(glFunctions *f, GLenum srcRGB, GLenum dstRGB, GLenum srcA, GLenum dstA) {
+ f->glBlendFuncSeparate(srcRGB, dstRGB, srcA, dstA);
+}
+
+static void glBufferData(glFunctions *f, GLenum target, GLsizeiptr size, const void *data, GLenum usage) {
+ f->glBufferData(target, size, data, usage);
+}
+
+static void glBufferSubData(glFunctions *f, GLenum target, GLintptr offset, GLsizeiptr size, const void *data) {
+ f->glBufferSubData(target, offset, size, data);
+}
+
+static GLenum glCheckFramebufferStatus(glFunctions *f, GLenum target) {
+ return f->glCheckFramebufferStatus(target);
+}
+
+static void glClear(glFunctions *f, GLbitfield mask) {
+ f->glClear(mask);
+}
+
+static void glClearColor(glFunctions *f, GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha) {
+ f->glClearColor(red, green, blue, alpha);
+}
+
+static void glClearDepthf(glFunctions *f, GLfloat d) {
+ f->glClearDepthf(d);
+}
+
+static void glCompileShader(glFunctions *f, GLuint shader) {
+ f->glCompileShader(shader);
+}
+
+static void glCopyTexSubImage2D(glFunctions *f, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height) {
+ f->glCopyTexSubImage2D(target, level, xoffset, yoffset, x, y, width, height);
+}
+
+static GLuint glCreateProgram(glFunctions *f) {
+ return f->glCreateProgram();
+}
+
+static GLuint glCreateShader(glFunctions *f, GLenum type) {
+ return f->glCreateShader(type);
+}
+
+static void glDeleteBuffers(glFunctions *f, GLsizei n, const GLuint *buffers) {
+ f->glDeleteBuffers(n, buffers);
+}
+
+static void glDeleteFramebuffers(glFunctions *f, GLsizei n, const GLuint *framebuffers) {
+ f->glDeleteFramebuffers(n, framebuffers);
+}
+
+static void glDeleteProgram(glFunctions *f, GLuint program) {
+ f->glDeleteProgram(program);
+}
+
+static void glDeleteRenderbuffers(glFunctions *f, GLsizei n, const GLuint *renderbuffers) {
+ f->glDeleteRenderbuffers(n, renderbuffers);
+}
+
+static void glDeleteShader(glFunctions *f, GLuint shader) {
+ f->glDeleteShader(shader);
+}
+
+static void glDeleteTextures(glFunctions *f, GLsizei n, const GLuint *textures) {
+ f->glDeleteTextures(n, textures);
+}
+
+static void glDepthFunc(glFunctions *f, GLenum func) {
+ f->glDepthFunc(func);
+}
+
+static void glDepthMask(glFunctions *f, GLboolean flag) {
+ f->glDepthMask(flag);
+}
+
+static void glDisable(glFunctions *f, GLenum cap) {
+ f->glDisable(cap);
+}
+
+static void glDisableVertexAttribArray(glFunctions *f, GLuint index) {
+ f->glDisableVertexAttribArray(index);
+}
+
+static void glDrawArrays(glFunctions *f, GLenum mode, GLint first, GLsizei count) {
+ f->glDrawArrays(mode, first, count);
+}
+
+// offset is defined as an uintptr_t to omit Cgo pointer checks.
+static void glDrawElements(glFunctions *f, GLenum mode, GLsizei count, GLenum type, const uintptr_t offset) {
+ f->glDrawElements(mode, count, type, (const void *)offset);
+}
+
+static void glEnable(glFunctions *f, GLenum cap) {
+ f->glEnable(cap);
+}
+
+static void glEnableVertexAttribArray(glFunctions *f, GLuint index) {
+ f->glEnableVertexAttribArray(index);
+}
+
+static void glFinish(glFunctions *f) {
+ f->glFinish();
+}
+
+static void glFlush(glFunctions *f) {
+ f->glFlush();
+}
+
+static void glFramebufferRenderbuffer(glFunctions *f, GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer) {
+ f->glFramebufferRenderbuffer(target, attachment, renderbuffertarget, renderbuffer);
+}
+
+static void glFramebufferTexture2D(glFunctions *f, GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level) {
+ f->glFramebufferTexture2D(target, attachment, textarget, texture, level);
+}
+
+static void glGenBuffers(glFunctions *f, GLsizei n, GLuint *buffers) {
+ f->glGenBuffers(n, buffers);
+}
+
+static void glGenFramebuffers(glFunctions *f, GLsizei n, GLuint *framebuffers) {
+ f->glGenFramebuffers(n, framebuffers);
+}
+
+static void glGenRenderbuffers(glFunctions *f, GLsizei n, GLuint *renderbuffers) {
+ f->glGenRenderbuffers(n, renderbuffers);
+}
+
+static void glGenTextures(glFunctions *f, GLsizei n, GLuint *textures) {
+ f->glGenTextures(n, textures);
+}
+
+static GLenum glGetError(glFunctions *f) {
+ return f->glGetError();
+}
+
+static void glGetFramebufferAttachmentParameteriv(glFunctions *f, GLenum target, GLenum attachment, GLenum pname, GLint *params) {
+ f->glGetFramebufferAttachmentParameteriv(target, attachment, pname, params);
+}
+
+static void glGetIntegerv(glFunctions *f, GLenum pname, GLint *data) {
+ f->glGetIntegerv(pname, data);
+}
+
+static void glGetFloatv(glFunctions *f, GLenum pname, GLfloat *data) {
+ f->glGetFloatv(pname, data);
+}
+
+static void glGetIntegeri_v(glFunctions *f, GLenum pname, GLuint idx, GLint *data) {
+ f->glGetIntegeri_v(pname, idx, data);
+}
+
+static void glGetProgramiv(glFunctions *f, GLuint program, GLenum pname, GLint *params) {
+ f->glGetProgramiv(program, pname, params);
+}
+
+static void glGetProgramInfoLog(glFunctions *f, GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog) {
+ f->glGetProgramInfoLog(program, bufSize, length, infoLog);
+}
+
+static void glGetRenderbufferParameteriv(glFunctions *f, GLenum target, GLenum pname, GLint *params) {
+ f->glGetRenderbufferParameteriv(target, pname, params);
+}
+
+static void glGetShaderiv(glFunctions *f, GLuint shader, GLenum pname, GLint *params) {
+ f->glGetShaderiv(shader, pname, params);
+}
+
+static void glGetShaderInfoLog(glFunctions *f, GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog) {
+ f->glGetShaderInfoLog(shader, bufSize, length, infoLog);
+}
+
+static const GLubyte *glGetString(glFunctions *f, GLenum name) {
+ return f->glGetString(name);
+}
+
+static GLint glGetUniformLocation(glFunctions *f, GLuint program, const GLchar *name) {
+ return f->glGetUniformLocation(program, name);
+}
+
+static void glGetVertexAttribiv(glFunctions *f, GLuint index, GLenum pname, GLint *data) {
+ f->glGetVertexAttribiv(index, pname, data);
+}
+
+// Return uintptr_t to avoid Cgo pointer check.
+static uintptr_t glGetVertexAttribPointerv(glFunctions *f, GLuint index, GLenum pname) {
+ void *ptrs;
+ f->glGetVertexAttribPointerv(index, pname, &ptrs);
+ return (uintptr_t)ptrs;
+}
+
+static GLboolean glIsEnabled(glFunctions *f, GLenum cap) {
+ return f->glIsEnabled(cap);
+}
+
+static void glLinkProgram(glFunctions *f, GLuint program) {
+ f->glLinkProgram(program);
+}
+
+static void glPixelStorei(glFunctions *f, GLenum pname, GLint param) {
+ f->glPixelStorei(pname, param);
+}
+
+static void glReadPixels(glFunctions *f, GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, void *pixels) {
+ f->glReadPixels(x, y, width, height, format, type, pixels);
+}
+
+static void glRenderbufferStorage(glFunctions *f, GLenum target, GLenum internalformat, GLsizei width, GLsizei height) {
+ f->glRenderbufferStorage(target, internalformat, width, height);
+}
+
+static void glScissor(glFunctions *f, GLint x, GLint y, GLsizei width, GLsizei height) {
+ f->glScissor(x, y, width, height);
+}
+
+static void glShaderSource(glFunctions *f, GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length) {
+ f->glShaderSource(shader, count, string, length);
+}
+
+static void glTexImage2D(glFunctions *f, GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels) {
+ f->glTexImage2D(target, level, internalformat, width, height, border, format, type, pixels);
+}
+
+static void glTexParameteri(glFunctions *f, GLenum target, GLenum pname, GLint param) {
+ f->glTexParameteri(target, pname, param);
+}
+
+static void glTexSubImage2D(glFunctions *f, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels) {
+ f->glTexSubImage2D(target, level, xoffset, yoffset, width, height, format, type, pixels);
+}
+
+static void glUniform1f(glFunctions *f, GLint location, GLfloat v0) {
+ f->glUniform1f(location, v0);
+}
+
+static void glUniform1i(glFunctions *f, GLint location, GLint v0) {
+ f->glUniform1i(location, v0);
+}
+
+static void glUniform2f(glFunctions *f, GLint location, GLfloat v0, GLfloat v1) {
+ f->glUniform2f(location, v0, v1);
+}
+
+static void glUniform3f(glFunctions *f, GLint location, GLfloat v0, GLfloat v1, GLfloat v2) {
+ f->glUniform3f(location, v0, v1, v2);
+}
+
+static void glUniform4f(glFunctions *f, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3) {
+ f->glUniform4f(location, v0, v1, v2, v3);
+}
+
+static void glUseProgram(glFunctions *f, GLuint program) {
+ f->glUseProgram(program);
+}
+
+// offset is defined as an uintptr_t to omit Cgo pointer checks.
+static void glVertexAttribPointer(glFunctions *f, GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, uintptr_t offset) {
+ f->glVertexAttribPointer(index, size, type, normalized, stride, (const void *)offset);
+}
+
+static void glViewport(glFunctions *f, GLint x, GLint y, GLsizei width, GLsizei height) {
+ f->glViewport(x, y, width, height);
+}
+
+static void glBindBufferBase(glFunctions *f, GLenum target, GLuint index, GLuint buffer) {
+ f->glBindBufferBase(target, index, buffer);
+}
+
+static void glUniformBlockBinding(glFunctions *f, GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding) {
+ f->glUniformBlockBinding(program, uniformBlockIndex, uniformBlockBinding);
+}
+
+static GLuint glGetUniformBlockIndex(glFunctions *f, GLuint program, const GLchar *uniformBlockName) {
+ return f->glGetUniformBlockIndex(program, uniformBlockName);
+}
+
+static void glInvalidateFramebuffer(glFunctions *f, GLenum target, GLenum attachment) {
+ // Framebuffer invalidation is just a hint and can safely be ignored.
+ if (f->glInvalidateFramebuffer != NULL) {
+ f->glInvalidateFramebuffer(target, 1, &attachment);
+ }
+}
+
+static void glBeginQuery(glFunctions *f, GLenum target, GLenum attachment) {
+ f->glBeginQuery(target, attachment);
+}
+
+static void glDeleteQueries(glFunctions *f, GLsizei n, const GLuint *ids) {
+ f->glDeleteQueries(n, ids);
+}
+
+static void glDeleteVertexArrays(glFunctions *f, GLsizei n, const GLuint *ids) {
+ f->glDeleteVertexArrays(n, ids);
+}
+
+static void glEndQuery(glFunctions *f, GLenum target) {
+ f->glEndQuery(target);
+}
+
+static const GLubyte* glGetStringi(glFunctions *f, GLenum name, GLuint index) {
+ return f->glGetStringi(name, index);
+}
+
+static void glGenQueries(glFunctions *f, GLsizei n, GLuint *ids) {
+ f->glGenQueries(n, ids);
+}
+
+static void glGenVertexArrays(glFunctions *f, GLsizei n, GLuint *ids) {
+ f->glGenVertexArrays(n, ids);
+}
+
+static void glGetProgramBinary(glFunctions *f, GLuint program, GLsizei bufsize, GLsizei *length, GLenum *binaryFormat, void *binary) {
+ f->glGetProgramBinary(program, bufsize, length, binaryFormat, binary);
+}
+
+static void glGetQueryObjectuiv(glFunctions *f, GLuint id, GLenum pname, GLuint *params) {
+ f->glGetQueryObjectuiv(id, pname, params);
+}
+
+static void glMemoryBarrier(glFunctions *f, GLbitfield barriers) {
+ f->glMemoryBarrier(barriers);
+}
+
+static void glDispatchCompute(glFunctions *f, GLuint x, GLuint y, GLuint z) {
+ f->glDispatchCompute(x, y, z);
+}
+
+static void *glMapBufferRange(glFunctions *f, GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access) {
+ return f->glMapBufferRange(target, offset, length, access);
+}
+
+static GLboolean glUnmapBuffer(glFunctions *f, GLenum target) {
+ return f->glUnmapBuffer(target);
+}
+
+static void glBindImageTexture(glFunctions *f, GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format) {
+ f->glBindImageTexture(unit, texture, level, layered, layer, access, format);
+}
+
+static void glTexStorage2D(glFunctions *f, GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height) {
+ f->glTexStorage2D(target, levels, internalFormat, width, height);
+}
+
+static void glBlitFramebuffer(glFunctions *f, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter) {
+ f->glBlitFramebuffer(srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
+}
+*/
+import "C"
+
+type Context interface{}
+
+type Functions struct {
+ // Query caches.
+ uints [100]C.GLuint
+ ints [100]C.GLint
+ floats [100]C.GLfloat
+
+ f C.glFunctions
+}
+
+func NewFunctions(ctx Context, forceES bool) (*Functions, error) {
+ if ctx != nil {
+ panic("non-nil context")
+ }
+ f := new(Functions)
+ err := f.load(forceES)
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+func dlsym(handle unsafe.Pointer, s string) unsafe.Pointer {
+ cs := C.CString(s)
+ defer C.free(unsafe.Pointer(cs))
+ return C.dlsym(handle, cs)
+}
+
+func dlopen(lib string) unsafe.Pointer {
+ clib := C.CString(lib)
+ defer C.free(unsafe.Pointer(clib))
+ return C.dlopen(clib, C.RTLD_NOW|C.RTLD_LOCAL)
+}
+
+func (f *Functions) load(forceES bool) error {
+ var (
+ loadErr error
+ libNames []string
+ handles []unsafe.Pointer
+ )
+ switch {
+ case runtime.GOOS == "darwin" && !forceES:
+ libNames = []string{"/System/Library/Frameworks/OpenGL.framework/OpenGL"}
+ case runtime.GOOS == "darwin" && forceES:
+ libNames = []string{"libGLESv2.dylib"}
+ case runtime.GOOS == "ios":
+ libNames = []string{"/System/Library/Frameworks/OpenGLES.framework/OpenGLES"}
+ case runtime.GOOS == "android":
+ libNames = []string{"libGLESv2.so", "libGLESv3.so"}
+ default:
+ libNames = []string{"libGLESv2.so.2"}
+ }
+ for _, lib := range libNames {
+ if h := dlopen(lib); h != nil {
+ handles = append(handles, h)
+ }
+ }
+ if len(handles) == 0 {
+ return fmt.Errorf("gl: no OpenGL implementation could be loaded (tried %q)", libNames)
+ }
+ load := func(s string) *[0]byte {
+ for _, h := range handles {
+ if f := dlsym(h, s); f != nil {
+ return (*[0]byte)(f)
+ }
+ }
+ return nil
+ }
+ must := func(s string) *[0]byte {
+ ptr := load(s)
+ if ptr == nil {
+ loadErr = fmt.Errorf("gl: failed to load symbol %q", s)
+ }
+ return ptr
+ }
+ // GL ES 2.0 functions.
+ f.f.glActiveTexture = must("glActiveTexture")
+ f.f.glAttachShader = must("glAttachShader")
+ f.f.glBindAttribLocation = must("glBindAttribLocation")
+ f.f.glBindBuffer = must("glBindBuffer")
+ f.f.glBindFramebuffer = must("glBindFramebuffer")
+ f.f.glBindRenderbuffer = must("glBindRenderbuffer")
+ f.f.glBindTexture = must("glBindTexture")
+ f.f.glBlendEquation = must("glBlendEquation")
+ f.f.glBlendFuncSeparate = must("glBlendFuncSeparate")
+ f.f.glBufferData = must("glBufferData")
+ f.f.glBufferSubData = must("glBufferSubData")
+ f.f.glCheckFramebufferStatus = must("glCheckFramebufferStatus")
+ f.f.glClear = must("glClear")
+ f.f.glClearColor = must("glClearColor")
+ f.f.glClearDepthf = must("glClearDepthf")
+ f.f.glCompileShader = must("glCompileShader")
+ f.f.glCopyTexSubImage2D = must("glCopyTexSubImage2D")
+ f.f.glCreateProgram = must("glCreateProgram")
+ f.f.glCreateShader = must("glCreateShader")
+ f.f.glDeleteBuffers = must("glDeleteBuffers")
+ f.f.glDeleteFramebuffers = must("glDeleteFramebuffers")
+ f.f.glDeleteProgram = must("glDeleteProgram")
+ f.f.glDeleteRenderbuffers = must("glDeleteRenderbuffers")
+ f.f.glDeleteShader = must("glDeleteShader")
+ f.f.glDeleteTextures = must("glDeleteTextures")
+ f.f.glDepthFunc = must("glDepthFunc")
+ f.f.glDepthMask = must("glDepthMask")
+ f.f.glDisable = must("glDisable")
+ f.f.glDisableVertexAttribArray = must("glDisableVertexAttribArray")
+ f.f.glDrawArrays = must("glDrawArrays")
+ f.f.glDrawElements = must("glDrawElements")
+ f.f.glEnable = must("glEnable")
+ f.f.glEnableVertexAttribArray = must("glEnableVertexAttribArray")
+ f.f.glFinish = must("glFinish")
+ f.f.glFlush = must("glFlush")
+ f.f.glFramebufferRenderbuffer = must("glFramebufferRenderbuffer")
+ f.f.glFramebufferTexture2D = must("glFramebufferTexture2D")
+ f.f.glGenBuffers = must("glGenBuffers")
+ f.f.glGenFramebuffers = must("glGenFramebuffers")
+ f.f.glGenRenderbuffers = must("glGenRenderbuffers")
+ f.f.glGenTextures = must("glGenTextures")
+ f.f.glGetError = must("glGetError")
+ f.f.glGetFramebufferAttachmentParameteriv = must("glGetFramebufferAttachmentParameteriv")
+ f.f.glGetIntegerv = must("glGetIntegerv")
+ f.f.glGetFloatv = must("glGetFloatv")
+ f.f.glGetProgramiv = must("glGetProgramiv")
+ f.f.glGetProgramInfoLog = must("glGetProgramInfoLog")
+ f.f.glGetRenderbufferParameteriv = must("glGetRenderbufferParameteriv")
+ f.f.glGetShaderiv = must("glGetShaderiv")
+ f.f.glGetShaderInfoLog = must("glGetShaderInfoLog")
+ f.f.glGetString = must("glGetString")
+ f.f.glGetUniformLocation = must("glGetUniformLocation")
+ f.f.glGetVertexAttribiv = must("glGetVertexAttribiv")
+ f.f.glGetVertexAttribPointerv = must("glGetVertexAttribPointerv")
+ f.f.glIsEnabled = must("glIsEnabled")
+ f.f.glLinkProgram = must("glLinkProgram")
+ f.f.glPixelStorei = must("glPixelStorei")
+ f.f.glReadPixels = must("glReadPixels")
+ f.f.glRenderbufferStorage = must("glRenderbufferStorage")
+ f.f.glScissor = must("glScissor")
+ f.f.glShaderSource = must("glShaderSource")
+ f.f.glTexImage2D = must("glTexImage2D")
+ f.f.glTexParameteri = must("glTexParameteri")
+ f.f.glTexSubImage2D = must("glTexSubImage2D")
+ f.f.glUniform1f = must("glUniform1f")
+ f.f.glUniform1i = must("glUniform1i")
+ f.f.glUniform2f = must("glUniform2f")
+ f.f.glUniform3f = must("glUniform3f")
+ f.f.glUniform4f = must("glUniform4f")
+ f.f.glUseProgram = must("glUseProgram")
+ f.f.glVertexAttribPointer = must("glVertexAttribPointer")
+ f.f.glViewport = must("glViewport")
+
+ // Extensions and GL ES 3 functions.
+ f.f.glBindBufferBase = load("glBindBufferBase")
+ f.f.glBindVertexArray = load("glBindVertexArray")
+ f.f.glGetIntegeri_v = load("glGetIntegeri_v")
+ f.f.glGetUniformBlockIndex = load("glGetUniformBlockIndex")
+ f.f.glUniformBlockBinding = load("glUniformBlockBinding")
+ f.f.glInvalidateFramebuffer = load("glInvalidateFramebuffer")
+ f.f.glGetStringi = load("glGetStringi")
+ // Fall back to EXT_invalidate_framebuffer if available.
+ if f.f.glInvalidateFramebuffer == nil {
+ f.f.glInvalidateFramebuffer = load("glDiscardFramebufferEXT")
+ }
+
+ f.f.glBeginQuery = load("glBeginQuery")
+ if f.f.glBeginQuery == nil {
+ f.f.glBeginQuery = load("glBeginQueryEXT")
+ }
+ f.f.glDeleteQueries = load("glDeleteQueries")
+ if f.f.glDeleteQueries == nil {
+ f.f.glDeleteQueries = load("glDeleteQueriesEXT")
+ }
+ f.f.glEndQuery = load("glEndQuery")
+ if f.f.glEndQuery == nil {
+ f.f.glEndQuery = load("glEndQueryEXT")
+ }
+ f.f.glGenQueries = load("glGenQueries")
+ if f.f.glGenQueries == nil {
+ f.f.glGenQueries = load("glGenQueriesEXT")
+ }
+ f.f.glGetQueryObjectuiv = load("glGetQueryObjectuiv")
+ if f.f.glGetQueryObjectuiv == nil {
+ f.f.glGetQueryObjectuiv = load("glGetQueryObjectuivEXT")
+ }
+
+ f.f.glDeleteVertexArrays = load("glDeleteVertexArrays")
+ f.f.glGenVertexArrays = load("glGenVertexArrays")
+ f.f.glMemoryBarrier = load("glMemoryBarrier")
+ f.f.glDispatchCompute = load("glDispatchCompute")
+ f.f.glMapBufferRange = load("glMapBufferRange")
+ f.f.glUnmapBuffer = load("glUnmapBuffer")
+ f.f.glBindImageTexture = load("glBindImageTexture")
+ f.f.glTexStorage2D = load("glTexStorage2D")
+ f.f.glBlitFramebuffer = load("glBlitFramebuffer")
+ f.f.glGetProgramBinary = load("glGetProgramBinary")
+
+ return loadErr
+}
+
+func (f *Functions) ActiveTexture(texture Enum) {
+ C.glActiveTexture(&f.f, C.GLenum(texture))
+}
+
+func (f *Functions) AttachShader(p Program, s Shader) {
+ C.glAttachShader(&f.f, C.GLuint(p.V), C.GLuint(s.V))
+}
+
+func (f *Functions) BeginQuery(target Enum, query Query) {
+ C.glBeginQuery(&f.f, C.GLenum(target), C.GLenum(query.V))
+}
+
+func (f *Functions) BindAttribLocation(p Program, a Attrib, name string) {
+ cname := C.CString(name)
+ defer C.free(unsafe.Pointer(cname))
+ C.glBindAttribLocation(&f.f, C.GLuint(p.V), C.GLuint(a), cname)
+}
+
+func (f *Functions) BindBufferBase(target Enum, index int, b Buffer) {
+ C.glBindBufferBase(&f.f, C.GLenum(target), C.GLuint(index), C.GLuint(b.V))
+}
+
+func (f *Functions) BindBuffer(target Enum, b Buffer) {
+ C.glBindBuffer(&f.f, C.GLenum(target), C.GLuint(b.V))
+}
+
+func (f *Functions) BindFramebuffer(target Enum, fb Framebuffer) {
+ C.glBindFramebuffer(&f.f, C.GLenum(target), C.GLuint(fb.V))
+}
+
+func (f *Functions) BindRenderbuffer(target Enum, fb Renderbuffer) {
+ C.glBindRenderbuffer(&f.f, C.GLenum(target), C.GLuint(fb.V))
+}
+
+func (f *Functions) BindImageTexture(unit int, t Texture, level int, layered bool, layer int, access, format Enum) {
+ l := C.GLboolean(FALSE)
+ if layered {
+ l = TRUE
+ }
+ C.glBindImageTexture(&f.f, C.GLuint(unit), C.GLuint(t.V), C.GLint(level), l, C.GLint(layer), C.GLenum(access), C.GLenum(format))
+}
+
+func (f *Functions) BindTexture(target Enum, t Texture) {
+ C.glBindTexture(&f.f, C.GLenum(target), C.GLuint(t.V))
+}
+
+func (f *Functions) BindVertexArray(a VertexArray) {
+ C.glBindVertexArray(&f.f, C.GLuint(a.V))
+}
+
+func (f *Functions) BlendEquation(mode Enum) {
+ C.glBlendEquation(&f.f, C.GLenum(mode))
+}
+
+func (f *Functions) BlendFuncSeparate(srcRGB, dstRGB, srcA, dstA Enum) {
+ C.glBlendFuncSeparate(&f.f, C.GLenum(srcRGB), C.GLenum(dstRGB), C.GLenum(srcA), C.GLenum(dstA))
+}
+
+func (f *Functions) BlitFramebuffer(sx0, sy0, sx1, sy1, dx0, dy0, dx1, dy1 int, mask Enum, filter Enum) {
+ C.glBlitFramebuffer(&f.f,
+ C.GLint(sx0), C.GLint(sy0), C.GLint(sx1), C.GLint(sy1),
+ C.GLint(dx0), C.GLint(dy0), C.GLint(dx1), C.GLint(dy1),
+ C.GLenum(mask), C.GLenum(filter),
+ )
+}
+
+func (f *Functions) BufferData(target Enum, size int, usage Enum, data []byte) {
+ var p unsafe.Pointer
+ if len(data) > 0 {
+ p = unsafe.Pointer(&data[0])
+ }
+ C.glBufferData(&f.f, C.GLenum(target), C.GLsizeiptr(size), p, C.GLenum(usage))
+}
+
+func (f *Functions) BufferSubData(target Enum, offset int, src []byte) {
+ var p unsafe.Pointer
+ if len(src) > 0 {
+ p = unsafe.Pointer(&src[0])
+ }
+ C.glBufferSubData(&f.f, C.GLenum(target), C.GLintptr(offset), C.GLsizeiptr(len(src)), p)
+}
+
+func (f *Functions) CheckFramebufferStatus(target Enum) Enum {
+ return Enum(C.glCheckFramebufferStatus(&f.f, C.GLenum(target)))
+}
+
+func (f *Functions) Clear(mask Enum) {
+ C.glClear(&f.f, C.GLbitfield(mask))
+}
+
+func (f *Functions) ClearColor(red float32, green float32, blue float32, alpha float32) {
+ C.glClearColor(&f.f, C.GLfloat(red), C.GLfloat(green), C.GLfloat(blue), C.GLfloat(alpha))
+}
+
+func (f *Functions) ClearDepthf(d float32) {
+ C.glClearDepthf(&f.f, C.GLfloat(d))
+}
+
+func (f *Functions) CompileShader(s Shader) {
+ C.glCompileShader(&f.f, C.GLuint(s.V))
+}
+
+func (f *Functions) CopyTexSubImage2D(target Enum, level, xoffset, yoffset, x, y, width, height int) {
+ C.glCopyTexSubImage2D(&f.f, C.GLenum(target), C.GLint(level), C.GLint(xoffset), C.GLint(yoffset), C.GLint(x), C.GLint(y), C.GLsizei(width), C.GLsizei(height))
+}
+
+func (f *Functions) CreateBuffer() Buffer {
+ C.glGenBuffers(&f.f, 1, &f.uints[0])
+ return Buffer{uint(f.uints[0])}
+}
+
+func (f *Functions) CreateFramebuffer() Framebuffer {
+ C.glGenFramebuffers(&f.f, 1, &f.uints[0])
+ return Framebuffer{uint(f.uints[0])}
+}
+
+func (f *Functions) CreateProgram() Program {
+ return Program{uint(C.glCreateProgram(&f.f))}
+}
+
+func (f *Functions) CreateQuery() Query {
+ C.glGenQueries(&f.f, 1, &f.uints[0])
+ return Query{uint(f.uints[0])}
+}
+
+func (f *Functions) CreateRenderbuffer() Renderbuffer {
+ C.glGenRenderbuffers(&f.f, 1, &f.uints[0])
+ return Renderbuffer{uint(f.uints[0])}
+}
+
+func (f *Functions) CreateShader(ty Enum) Shader {
+ return Shader{uint(C.glCreateShader(&f.f, C.GLenum(ty)))}
+}
+
+func (f *Functions) CreateTexture() Texture {
+ C.glGenTextures(&f.f, 1, &f.uints[0])
+ return Texture{uint(f.uints[0])}
+}
+
+func (f *Functions) CreateVertexArray() VertexArray {
+ C.glGenVertexArrays(&f.f, 1, &f.uints[0])
+ return VertexArray{uint(f.uints[0])}
+}
+
+func (f *Functions) DeleteBuffer(v Buffer) {
+ f.uints[0] = C.GLuint(v.V)
+ C.glDeleteBuffers(&f.f, 1, &f.uints[0])
+}
+
+func (f *Functions) DeleteFramebuffer(v Framebuffer) {
+ f.uints[0] = C.GLuint(v.V)
+ C.glDeleteFramebuffers(&f.f, 1, &f.uints[0])
+}
+
+func (f *Functions) DeleteProgram(p Program) {
+ C.glDeleteProgram(&f.f, C.GLuint(p.V))
+}
+
+func (f *Functions) DeleteQuery(query Query) {
+ f.uints[0] = C.GLuint(query.V)
+ C.glDeleteQueries(&f.f, 1, &f.uints[0])
+}
+
+func (f *Functions) DeleteVertexArray(array VertexArray) {
+ f.uints[0] = C.GLuint(array.V)
+ C.glDeleteVertexArrays(&f.f, 1, &f.uints[0])
+}
+
+func (f *Functions) DeleteRenderbuffer(v Renderbuffer) {
+ f.uints[0] = C.GLuint(v.V)
+ C.glDeleteRenderbuffers(&f.f, 1, &f.uints[0])
+}
+
+func (f *Functions) DeleteShader(s Shader) {
+ C.glDeleteShader(&f.f, C.GLuint(s.V))
+}
+
+func (f *Functions) DeleteTexture(v Texture) {
+ f.uints[0] = C.GLuint(v.V)
+ C.glDeleteTextures(&f.f, 1, &f.uints[0])
+}
+
+func (f *Functions) DepthFunc(v Enum) {
+ C.glDepthFunc(&f.f, C.GLenum(v))
+}
+
+func (f *Functions) DepthMask(mask bool) {
+ m := C.GLboolean(FALSE)
+ if mask {
+ m = C.GLboolean(TRUE)
+ }
+ C.glDepthMask(&f.f, m)
+}
+
+func (f *Functions) DisableVertexAttribArray(a Attrib) {
+ C.glDisableVertexAttribArray(&f.f, C.GLuint(a))
+}
+
+func (f *Functions) Disable(cap Enum) {
+ C.glDisable(&f.f, C.GLenum(cap))
+}
+
+func (f *Functions) DrawArrays(mode Enum, first int, count int) {
+ C.glDrawArrays(&f.f, C.GLenum(mode), C.GLint(first), C.GLsizei(count))
+}
+
+func (f *Functions) DrawElements(mode Enum, count int, ty Enum, offset int) {
+ C.glDrawElements(&f.f, C.GLenum(mode), C.GLsizei(count), C.GLenum(ty), C.uintptr_t(offset))
+}
+
+func (f *Functions) DispatchCompute(x, y, z int) {
+ C.glDispatchCompute(&f.f, C.GLuint(x), C.GLuint(y), C.GLuint(z))
+}
+
+func (f *Functions) Enable(cap Enum) {
+ C.glEnable(&f.f, C.GLenum(cap))
+}
+
+func (f *Functions) EndQuery(target Enum) {
+ C.glEndQuery(&f.f, C.GLenum(target))
+}
+
+func (f *Functions) EnableVertexAttribArray(a Attrib) {
+ C.glEnableVertexAttribArray(&f.f, C.GLuint(a))
+}
+
+func (f *Functions) Finish() {
+ C.glFinish(&f.f)
+}
+
+func (f *Functions) Flush() {
+ C.glFlush(&f.f)
+}
+
+func (f *Functions) FramebufferRenderbuffer(target, attachment, renderbuffertarget Enum, renderbuffer Renderbuffer) {
+ C.glFramebufferRenderbuffer(&f.f, C.GLenum(target), C.GLenum(attachment), C.GLenum(renderbuffertarget), C.GLuint(renderbuffer.V))
+}
+
+func (f *Functions) FramebufferTexture2D(target, attachment, texTarget Enum, t Texture, level int) {
+ C.glFramebufferTexture2D(&f.f, C.GLenum(target), C.GLenum(attachment), C.GLenum(texTarget), C.GLuint(t.V), C.GLint(level))
+}
+
+func (c *Functions) GetBinding(pname Enum) Object {
+ return Object{uint(c.GetInteger(pname))}
+}
+
+func (c *Functions) GetBindingi(pname Enum, idx int) Object {
+ return Object{uint(c.GetIntegeri(pname, idx))}
+}
+
+func (f *Functions) GetError() Enum {
+ return Enum(C.glGetError(&f.f))
+}
+
+func (f *Functions) GetRenderbufferParameteri(target, pname Enum) int {
+ C.glGetRenderbufferParameteriv(&f.f, C.GLenum(target), C.GLenum(pname), &f.ints[0])
+ return int(f.ints[0])
+}
+
+func (f *Functions) GetFramebufferAttachmentParameteri(target, attachment, pname Enum) int {
+ C.glGetFramebufferAttachmentParameteriv(&f.f, C.GLenum(target), C.GLenum(attachment), C.GLenum(pname), &f.ints[0])
+ return int(f.ints[0])
+}
+
+func (f *Functions) GetFloat4(pname Enum) [4]float32 {
+ C.glGetFloatv(&f.f, C.GLenum(pname), &f.floats[0])
+ var r [4]float32
+ for i := range r {
+ r[i] = float32(f.floats[i])
+ }
+ return r
+}
+
+func (f *Functions) GetFloat(pname Enum) float32 {
+ C.glGetFloatv(&f.f, C.GLenum(pname), &f.floats[0])
+ return float32(f.floats[0])
+}
+
+func (f *Functions) GetInteger4(pname Enum) [4]int {
+ C.glGetIntegerv(&f.f, C.GLenum(pname), &f.ints[0])
+ var r [4]int
+ for i := range r {
+ r[i] = int(f.ints[i])
+ }
+ return r
+}
+
+func (f *Functions) GetInteger(pname Enum) int {
+ C.glGetIntegerv(&f.f, C.GLenum(pname), &f.ints[0])
+ return int(f.ints[0])
+}
+
+func (f *Functions) GetIntegeri(pname Enum, idx int) int {
+ C.glGetIntegeri_v(&f.f, C.GLenum(pname), C.GLuint(idx), &f.ints[0])
+ return int(f.ints[0])
+}
+
+func (f *Functions) GetProgrami(p Program, pname Enum) int {
+ C.glGetProgramiv(&f.f, C.GLuint(p.V), C.GLenum(pname), &f.ints[0])
+ return int(f.ints[0])
+}
+
+func (f *Functions) GetProgramBinary(p Program) []byte {
+ sz := f.GetProgrami(p, PROGRAM_BINARY_LENGTH)
+ if sz == 0 {
+ return nil
+ }
+ buf := make([]byte, sz)
+ var format C.GLenum
+ C.glGetProgramBinary(&f.f, C.GLuint(p.V), C.GLsizei(sz), nil, &format, unsafe.Pointer(&buf[0]))
+ return buf
+}
+
+func (f *Functions) GetProgramInfoLog(p Program) string {
+ n := f.GetProgrami(p, INFO_LOG_LENGTH)
+ buf := make([]byte, n)
+ C.glGetProgramInfoLog(&f.f, C.GLuint(p.V), C.GLsizei(len(buf)), nil, (*C.GLchar)(unsafe.Pointer(&buf[0])))
+ return string(buf)
+}
+
+func (f *Functions) GetQueryObjectuiv(query Query, pname Enum) uint {
+ C.glGetQueryObjectuiv(&f.f, C.GLuint(query.V), C.GLenum(pname), &f.uints[0])
+ return uint(f.uints[0])
+}
+
+func (f *Functions) GetShaderi(s Shader, pname Enum) int {
+ C.glGetShaderiv(&f.f, C.GLuint(s.V), C.GLenum(pname), &f.ints[0])
+ return int(f.ints[0])
+}
+
+func (f *Functions) GetShaderInfoLog(s Shader) string {
+ n := f.GetShaderi(s, INFO_LOG_LENGTH)
+ buf := make([]byte, n)
+ C.glGetShaderInfoLog(&f.f, C.GLuint(s.V), C.GLsizei(len(buf)), nil, (*C.GLchar)(unsafe.Pointer(&buf[0])))
+ return string(buf)
+}
+
+func (f *Functions) getStringi(pname Enum, index int) string {
+ str := C.glGetStringi(&f.f, C.GLenum(pname), C.GLuint(index))
+ if str == nil {
+ return ""
+ }
+ return C.GoString((*C.char)(unsafe.Pointer(str)))
+}
+
+func (f *Functions) GetString(pname Enum) string {
+ switch {
+ case runtime.GOOS == "darwin" && pname == EXTENSIONS:
+ // macOS OpenGL 3 core profile doesn't support glGetString(GL_EXTENSIONS).
+ // Use glGetStringi(GL_EXTENSIONS, ).
+ var exts []string
+ nexts := f.GetInteger(NUM_EXTENSIONS)
+ for i := 0; i < nexts; i++ {
+ ext := f.getStringi(EXTENSIONS, i)
+ exts = append(exts, ext)
+ }
+ return strings.Join(exts, " ")
+ default:
+ str := C.glGetString(&f.f, C.GLenum(pname))
+ return C.GoString((*C.char)(unsafe.Pointer(str)))
+ }
+}
+
+func (f *Functions) GetUniformBlockIndex(p Program, name string) uint {
+ cname := C.CString(name)
+ defer C.free(unsafe.Pointer(cname))
+ return uint(C.glGetUniformBlockIndex(&f.f, C.GLuint(p.V), cname))
+}
+
+func (f *Functions) GetUniformLocation(p Program, name string) Uniform {
+ cname := C.CString(name)
+ defer C.free(unsafe.Pointer(cname))
+ return Uniform{int(C.glGetUniformLocation(&f.f, C.GLuint(p.V), cname))}
+}
+
+func (f *Functions) GetVertexAttrib(index int, pname Enum) int {
+ C.glGetVertexAttribiv(&f.f, C.GLuint(index), C.GLenum(pname), &f.ints[0])
+ return int(f.ints[0])
+}
+
+func (f *Functions) GetVertexAttribBinding(index int, pname Enum) Object {
+ return Object{uint(f.GetVertexAttrib(index, pname))}
+}
+
+func (f *Functions) GetVertexAttribPointer(index int, pname Enum) uintptr {
+ ptr := C.glGetVertexAttribPointerv(&f.f, C.GLuint(index), C.GLenum(pname))
+ return uintptr(ptr)
+}
+
+func (f *Functions) InvalidateFramebuffer(target, attachment Enum) {
+ C.glInvalidateFramebuffer(&f.f, C.GLenum(target), C.GLenum(attachment))
+}
+
+func (f *Functions) IsEnabled(cap Enum) bool {
+ return C.glIsEnabled(&f.f, C.GLenum(cap)) == TRUE
+}
+
+func (f *Functions) LinkProgram(p Program) {
+ C.glLinkProgram(&f.f, C.GLuint(p.V))
+}
+
+func (f *Functions) PixelStorei(pname Enum, param int) {
+ C.glPixelStorei(&f.f, C.GLenum(pname), C.GLint(param))
+}
+
+func (f *Functions) MemoryBarrier(barriers Enum) {
+ C.glMemoryBarrier(&f.f, C.GLbitfield(barriers))
+}
+
+func (f *Functions) MapBufferRange(target Enum, offset, length int, access Enum) []byte {
+ p := C.glMapBufferRange(&f.f, C.GLenum(target), C.GLintptr(offset), C.GLsizeiptr(length), C.GLbitfield(access))
+ if p == nil {
+ return nil
+ }
+ return (*[1 << 30]byte)(p)[:length:length]
+}
+
+func (f *Functions) Scissor(x, y, width, height int32) {
+ C.glScissor(&f.f, C.GLint(x), C.GLint(y), C.GLsizei(width), C.GLsizei(height))
+}
+
+func (f *Functions) ReadPixels(x, y, width, height int, format, ty Enum, data []byte) {
+ var p unsafe.Pointer
+ if len(data) > 0 {
+ p = unsafe.Pointer(&data[0])
+ }
+ C.glReadPixels(&f.f, C.GLint(x), C.GLint(y), C.GLsizei(width), C.GLsizei(height), C.GLenum(format), C.GLenum(ty), p)
+}
+
+func (f *Functions) RenderbufferStorage(target, internalformat Enum, width, height int) {
+ C.glRenderbufferStorage(&f.f, C.GLenum(target), C.GLenum(internalformat), C.GLsizei(width), C.GLsizei(height))
+}
+
+func (f *Functions) ShaderSource(s Shader, src string) {
+ csrc := C.CString(src)
+ defer C.free(unsafe.Pointer(csrc))
+ strlen := C.GLint(len(src))
+ C.glShaderSource(&f.f, C.GLuint(s.V), 1, &csrc, &strlen)
+}
+
+func (f *Functions) TexImage2D(target Enum, level int, internalFormat Enum, width int, height int, format Enum, ty Enum) {
+ C.glTexImage2D(&f.f, C.GLenum(target), C.GLint(level), C.GLint(internalFormat), C.GLsizei(width), C.GLsizei(height), 0, C.GLenum(format), C.GLenum(ty), nil)
+}
+
+func (f *Functions) TexStorage2D(target Enum, levels int, internalFormat Enum, width, height int) {
+ C.glTexStorage2D(&f.f, C.GLenum(target), C.GLsizei(levels), C.GLenum(internalFormat), C.GLsizei(width), C.GLsizei(height))
+}
+
+func (f *Functions) TexSubImage2D(target Enum, level int, x int, y int, width int, height int, format Enum, ty Enum, data []byte) {
+ var p unsafe.Pointer
+ if len(data) > 0 {
+ p = unsafe.Pointer(&data[0])
+ }
+ C.glTexSubImage2D(&f.f, C.GLenum(target), C.GLint(level), C.GLint(x), C.GLint(y), C.GLsizei(width), C.GLsizei(height), C.GLenum(format), C.GLenum(ty), p)
+}
+
+func (f *Functions) TexParameteri(target, pname Enum, param int) {
+ C.glTexParameteri(&f.f, C.GLenum(target), C.GLenum(pname), C.GLint(param))
+}
+
+func (f *Functions) UniformBlockBinding(p Program, uniformBlockIndex uint, uniformBlockBinding uint) {
+ C.glUniformBlockBinding(&f.f, C.GLuint(p.V), C.GLuint(uniformBlockIndex), C.GLuint(uniformBlockBinding))
+}
+
+func (f *Functions) Uniform1f(dst Uniform, v float32) {
+ C.glUniform1f(&f.f, C.GLint(dst.V), C.GLfloat(v))
+}
+
+func (f *Functions) Uniform1i(dst Uniform, v int) {
+ C.glUniform1i(&f.f, C.GLint(dst.V), C.GLint(v))
+}
+
+func (f *Functions) Uniform2f(dst Uniform, v0 float32, v1 float32) {
+ C.glUniform2f(&f.f, C.GLint(dst.V), C.GLfloat(v0), C.GLfloat(v1))
+}
+
+func (f *Functions) Uniform3f(dst Uniform, v0 float32, v1 float32, v2 float32) {
+ C.glUniform3f(&f.f, C.GLint(dst.V), C.GLfloat(v0), C.GLfloat(v1), C.GLfloat(v2))
+}
+
+func (f *Functions) Uniform4f(dst Uniform, v0 float32, v1 float32, v2 float32, v3 float32) {
+ C.glUniform4f(&f.f, C.GLint(dst.V), C.GLfloat(v0), C.GLfloat(v1), C.GLfloat(v2), C.GLfloat(v3))
+}
+
+func (f *Functions) UseProgram(p Program) {
+ C.glUseProgram(&f.f, C.GLuint(p.V))
+}
+
+func (f *Functions) UnmapBuffer(target Enum) bool {
+ r := C.glUnmapBuffer(&f.f, C.GLenum(target))
+ return r == TRUE
+}
+
+func (f *Functions) VertexAttribPointer(dst Attrib, size int, ty Enum, normalized bool, stride int, offset int) {
+ var n C.GLboolean = FALSE
+ if normalized {
+ n = TRUE
+ }
+ C.glVertexAttribPointer(&f.f, C.GLuint(dst), C.GLint(size), C.GLenum(ty), n, C.GLsizei(stride), C.uintptr_t(offset))
+}
+
+func (f *Functions) Viewport(x int, y int, width int, height int) {
+ C.glViewport(&f.f, C.GLint(x), C.GLint(y), C.GLsizei(width), C.GLsizei(height))
+}
diff --git a/vendor/gioui.org/internal/gl/gl_windows.go b/vendor/gioui.org/internal/gl/gl_windows.go
new file mode 100644
index 0000000..99b1694
--- /dev/null
+++ b/vendor/gioui.org/internal/gl/gl_windows.go
@@ -0,0 +1,502 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package gl
+
+import (
+ "math"
+ "runtime"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var (
+ LibGLESv2 = windows.NewLazyDLL("libGLESv2.dll")
+ _glActiveTexture = LibGLESv2.NewProc("glActiveTexture")
+ _glAttachShader = LibGLESv2.NewProc("glAttachShader")
+ _glBeginQuery = LibGLESv2.NewProc("glBeginQuery")
+ _glBindAttribLocation = LibGLESv2.NewProc("glBindAttribLocation")
+ _glBindBuffer = LibGLESv2.NewProc("glBindBuffer")
+ _glBindBufferBase = LibGLESv2.NewProc("glBindBufferBase")
+ _glBindFramebuffer = LibGLESv2.NewProc("glBindFramebuffer")
+ _glBindRenderbuffer = LibGLESv2.NewProc("glBindRenderbuffer")
+ _glBindTexture = LibGLESv2.NewProc("glBindTexture")
+ _glBindVertexArray = LibGLESv2.NewProc("glBindVertexArray")
+ _glBlendEquation = LibGLESv2.NewProc("glBlendEquation")
+ _glBlendFuncSeparate = LibGLESv2.NewProc("glBlendFuncSeparate")
+ _glBufferData = LibGLESv2.NewProc("glBufferData")
+ _glBufferSubData = LibGLESv2.NewProc("glBufferSubData")
+ _glCheckFramebufferStatus = LibGLESv2.NewProc("glCheckFramebufferStatus")
+ _glClear = LibGLESv2.NewProc("glClear")
+ _glClearColor = LibGLESv2.NewProc("glClearColor")
+ _glClearDepthf = LibGLESv2.NewProc("glClearDepthf")
+ _glDeleteQueries = LibGLESv2.NewProc("glDeleteQueries")
+ _glDeleteVertexArrays = LibGLESv2.NewProc("glDeleteVertexArrays")
+ _glCompileShader = LibGLESv2.NewProc("glCompileShader")
+ _glCopyTexSubImage2D = LibGLESv2.NewProc("glCopyTexSubImage2D")
+ _glGenBuffers = LibGLESv2.NewProc("glGenBuffers")
+ _glGenFramebuffers = LibGLESv2.NewProc("glGenFramebuffers")
+ _glGenVertexArrays = LibGLESv2.NewProc("glGenVertexArrays")
+ _glGetUniformBlockIndex = LibGLESv2.NewProc("glGetUniformBlockIndex")
+ _glCreateProgram = LibGLESv2.NewProc("glCreateProgram")
+ _glGenRenderbuffers = LibGLESv2.NewProc("glGenRenderbuffers")
+ _glCreateShader = LibGLESv2.NewProc("glCreateShader")
+ _glGenTextures = LibGLESv2.NewProc("glGenTextures")
+ _glDeleteBuffers = LibGLESv2.NewProc("glDeleteBuffers")
+ _glDeleteFramebuffers = LibGLESv2.NewProc("glDeleteFramebuffers")
+ _glDeleteProgram = LibGLESv2.NewProc("glDeleteProgram")
+ _glDeleteShader = LibGLESv2.NewProc("glDeleteShader")
+ _glDeleteRenderbuffers = LibGLESv2.NewProc("glDeleteRenderbuffers")
+ _glDeleteTextures = LibGLESv2.NewProc("glDeleteTextures")
+ _glDepthFunc = LibGLESv2.NewProc("glDepthFunc")
+ _glDepthMask = LibGLESv2.NewProc("glDepthMask")
+ _glDisableVertexAttribArray = LibGLESv2.NewProc("glDisableVertexAttribArray")
+ _glDisable = LibGLESv2.NewProc("glDisable")
+ _glDrawArrays = LibGLESv2.NewProc("glDrawArrays")
+ _glDrawElements = LibGLESv2.NewProc("glDrawElements")
+ _glEnable = LibGLESv2.NewProc("glEnable")
+ _glEnableVertexAttribArray = LibGLESv2.NewProc("glEnableVertexAttribArray")
+ _glEndQuery = LibGLESv2.NewProc("glEndQuery")
+ _glFinish = LibGLESv2.NewProc("glFinish")
+ _glFlush = LibGLESv2.NewProc("glFlush")
+ _glFramebufferRenderbuffer = LibGLESv2.NewProc("glFramebufferRenderbuffer")
+ _glFramebufferTexture2D = LibGLESv2.NewProc("glFramebufferTexture2D")
+ _glGenQueries = LibGLESv2.NewProc("glGenQueries")
+ _glGetError = LibGLESv2.NewProc("glGetError")
+ _glGetRenderbufferParameteriv = LibGLESv2.NewProc("glGetRenderbufferParameteriv")
+ _glGetFloatv = LibGLESv2.NewProc("glGetFloatv")
+ _glGetFramebufferAttachmentParameteriv = LibGLESv2.NewProc("glGetFramebufferAttachmentParameteriv")
+ _glGetIntegerv = LibGLESv2.NewProc("glGetIntegerv")
+ _glGetIntegeri_v = LibGLESv2.NewProc("glGetIntegeri_v")
+ _glGetProgramiv = LibGLESv2.NewProc("glGetProgramiv")
+ _glGetProgramInfoLog = LibGLESv2.NewProc("glGetProgramInfoLog")
+ _glGetQueryObjectuiv = LibGLESv2.NewProc("glGetQueryObjectuiv")
+ _glGetShaderiv = LibGLESv2.NewProc("glGetShaderiv")
+ _glGetShaderInfoLog = LibGLESv2.NewProc("glGetShaderInfoLog")
+ _glGetString = LibGLESv2.NewProc("glGetString")
+ _glGetUniformLocation = LibGLESv2.NewProc("glGetUniformLocation")
+ _glGetVertexAttribiv = LibGLESv2.NewProc("glGetVertexAttribiv")
+ _glGetVertexAttribPointerv = LibGLESv2.NewProc("glGetVertexAttribPointerv")
+ _glInvalidateFramebuffer = LibGLESv2.NewProc("glInvalidateFramebuffer")
+ _glIsEnabled = LibGLESv2.NewProc("glIsEnabled")
+ _glLinkProgram = LibGLESv2.NewProc("glLinkProgram")
+ _glPixelStorei = LibGLESv2.NewProc("glPixelStorei")
+ _glReadPixels = LibGLESv2.NewProc("glReadPixels")
+ _glRenderbufferStorage = LibGLESv2.NewProc("glRenderbufferStorage")
+ _glScissor = LibGLESv2.NewProc("glScissor")
+ _glShaderSource = LibGLESv2.NewProc("glShaderSource")
+ _glTexImage2D = LibGLESv2.NewProc("glTexImage2D")
+ _glTexStorage2D = LibGLESv2.NewProc("glTexStorage2D")
+ _glTexSubImage2D = LibGLESv2.NewProc("glTexSubImage2D")
+ _glTexParameteri = LibGLESv2.NewProc("glTexParameteri")
+ _glUniformBlockBinding = LibGLESv2.NewProc("glUniformBlockBinding")
+ _glUniform1f = LibGLESv2.NewProc("glUniform1f")
+ _glUniform1i = LibGLESv2.NewProc("glUniform1i")
+ _glUniform2f = LibGLESv2.NewProc("glUniform2f")
+ _glUniform3f = LibGLESv2.NewProc("glUniform3f")
+ _glUniform4f = LibGLESv2.NewProc("glUniform4f")
+ _glUseProgram = LibGLESv2.NewProc("glUseProgram")
+ _glVertexAttribPointer = LibGLESv2.NewProc("glVertexAttribPointer")
+ _glViewport = LibGLESv2.NewProc("glViewport")
+)
+
+type Functions struct {
+ // Query caches.
+ int32s [100]int32
+ float32s [100]float32
+ uintptrs [100]uintptr
+}
+
+type Context interface{}
+
+func NewFunctions(ctx Context, forceES bool) (*Functions, error) {
+ if ctx != nil {
+ panic("non-nil context")
+ }
+ return new(Functions), nil
+}
+
+func (c *Functions) ActiveTexture(t Enum) {
+ syscall.Syscall(_glActiveTexture.Addr(), 1, uintptr(t), 0, 0)
+}
+func (c *Functions) AttachShader(p Program, s Shader) {
+ syscall.Syscall(_glAttachShader.Addr(), 2, uintptr(p.V), uintptr(s.V), 0)
+}
+func (f *Functions) BeginQuery(target Enum, query Query) {
+ syscall.Syscall(_glBeginQuery.Addr(), 2, uintptr(target), uintptr(query.V), 0)
+}
+func (c *Functions) BindAttribLocation(p Program, a Attrib, name string) {
+ cname := cString(name)
+ c0 := &cname[0]
+ syscall.Syscall(_glBindAttribLocation.Addr(), 3, uintptr(p.V), uintptr(a), uintptr(unsafe.Pointer(c0)))
+ issue34474KeepAlive(c)
+}
+func (c *Functions) BindBuffer(target Enum, b Buffer) {
+ syscall.Syscall(_glBindBuffer.Addr(), 2, uintptr(target), uintptr(b.V), 0)
+}
+func (c *Functions) BindBufferBase(target Enum, index int, b Buffer) {
+ syscall.Syscall(_glBindBufferBase.Addr(), 3, uintptr(target), uintptr(index), uintptr(b.V))
+}
+func (c *Functions) BindFramebuffer(target Enum, fb Framebuffer) {
+ syscall.Syscall(_glBindFramebuffer.Addr(), 2, uintptr(target), uintptr(fb.V), 0)
+}
+func (c *Functions) BindRenderbuffer(target Enum, rb Renderbuffer) {
+ syscall.Syscall(_glBindRenderbuffer.Addr(), 2, uintptr(target), uintptr(rb.V), 0)
+}
+func (f *Functions) BindImageTexture(unit int, t Texture, level int, layered bool, layer int, access, format Enum) {
+ panic("not implemented")
+}
+func (c *Functions) BindTexture(target Enum, t Texture) {
+ syscall.Syscall(_glBindTexture.Addr(), 2, uintptr(target), uintptr(t.V), 0)
+}
+func (c *Functions) BindVertexArray(a VertexArray) {
+ syscall.Syscall(_glBindVertexArray.Addr(), 1, uintptr(a.V), 0, 0)
+}
+func (c *Functions) BlendEquation(mode Enum) {
+ syscall.Syscall(_glBlendEquation.Addr(), 1, uintptr(mode), 0, 0)
+}
+func (c *Functions) BlendFuncSeparate(srcRGB, dstRGB, srcA, dstA Enum) {
+ syscall.Syscall6(_glBlendFuncSeparate.Addr(), 4, uintptr(srcRGB), uintptr(dstRGB), uintptr(srcA), uintptr(dstA), 0, 0)
+}
+func (c *Functions) BufferData(target Enum, size int, usage Enum, data []byte) {
+ var p unsafe.Pointer
+ if len(data) > 0 {
+ p = unsafe.Pointer(&data[0])
+ }
+ syscall.Syscall6(_glBufferData.Addr(), 4, uintptr(target), uintptr(size), uintptr(p), uintptr(usage), 0, 0)
+}
+func (f *Functions) BufferSubData(target Enum, offset int, src []byte) {
+ if n := len(src); n > 0 {
+ s0 := &src[0]
+ syscall.Syscall6(_glBufferSubData.Addr(), 4, uintptr(target), uintptr(offset), uintptr(n), uintptr(unsafe.Pointer(s0)), 0, 0)
+ issue34474KeepAlive(s0)
+ }
+}
+func (c *Functions) CheckFramebufferStatus(target Enum) Enum {
+ s, _, _ := syscall.Syscall(_glCheckFramebufferStatus.Addr(), 1, uintptr(target), 0, 0)
+ return Enum(s)
+}
+func (c *Functions) Clear(mask Enum) {
+ syscall.Syscall(_glClear.Addr(), 1, uintptr(mask), 0, 0)
+}
+func (c *Functions) ClearColor(red, green, blue, alpha float32) {
+ syscall.Syscall6(_glClearColor.Addr(), 4, uintptr(math.Float32bits(red)), uintptr(math.Float32bits(green)), uintptr(math.Float32bits(blue)), uintptr(math.Float32bits(alpha)), 0, 0)
+}
+func (c *Functions) ClearDepthf(d float32) {
+ syscall.Syscall(_glClearDepthf.Addr(), 1, uintptr(math.Float32bits(d)), 0, 0)
+}
+func (c *Functions) CompileShader(s Shader) {
+ syscall.Syscall(_glCompileShader.Addr(), 1, uintptr(s.V), 0, 0)
+}
+func (f *Functions) CopyTexSubImage2D(target Enum, level, xoffset, yoffset, x, y, width, height int) {
+ syscall.Syscall9(_glCopyTexSubImage2D.Addr(), 8, uintptr(target), uintptr(level), uintptr(xoffset), uintptr(yoffset), uintptr(x), uintptr(y), uintptr(width), uintptr(height), 0)
+}
+func (c *Functions) CreateBuffer() Buffer {
+ var buf uintptr
+ syscall.Syscall(_glGenBuffers.Addr(), 2, 1, uintptr(unsafe.Pointer(&buf)), 0)
+ return Buffer{uint(buf)}
+}
+func (c *Functions) CreateFramebuffer() Framebuffer {
+ var fb uintptr
+ syscall.Syscall(_glGenFramebuffers.Addr(), 2, 1, uintptr(unsafe.Pointer(&fb)), 0)
+ return Framebuffer{uint(fb)}
+}
+func (c *Functions) CreateProgram() Program {
+ p, _, _ := syscall.Syscall(_glCreateProgram.Addr(), 0, 0, 0, 0)
+ return Program{uint(p)}
+}
+func (f *Functions) CreateQuery() Query {
+ var q uintptr
+ syscall.Syscall(_glGenQueries.Addr(), 2, 1, uintptr(unsafe.Pointer(&q)), 0)
+ return Query{uint(q)}
+}
+func (c *Functions) CreateRenderbuffer() Renderbuffer {
+ var rb uintptr
+ syscall.Syscall(_glGenRenderbuffers.Addr(), 2, 1, uintptr(unsafe.Pointer(&rb)), 0)
+ return Renderbuffer{uint(rb)}
+}
+func (c *Functions) CreateShader(ty Enum) Shader {
+ s, _, _ := syscall.Syscall(_glCreateShader.Addr(), 1, uintptr(ty), 0, 0)
+ return Shader{uint(s)}
+}
+func (c *Functions) CreateTexture() Texture {
+ var t uintptr
+ syscall.Syscall(_glGenTextures.Addr(), 2, 1, uintptr(unsafe.Pointer(&t)), 0)
+ return Texture{uint(t)}
+}
+func (c *Functions) CreateVertexArray() VertexArray {
+ var t uintptr
+ syscall.Syscall(_glGenVertexArrays.Addr(), 2, 1, uintptr(unsafe.Pointer(&t)), 0)
+ return VertexArray{uint(t)}
+}
+func (c *Functions) DeleteBuffer(v Buffer) {
+ syscall.Syscall(_glDeleteBuffers.Addr(), 2, 1, uintptr(unsafe.Pointer(&v)), 0)
+}
+func (c *Functions) DeleteFramebuffer(v Framebuffer) {
+ syscall.Syscall(_glDeleteFramebuffers.Addr(), 2, 1, uintptr(unsafe.Pointer(&v.V)), 0)
+}
+func (c *Functions) DeleteProgram(p Program) {
+ syscall.Syscall(_glDeleteProgram.Addr(), 1, uintptr(p.V), 0, 0)
+}
+func (f *Functions) DeleteQuery(query Query) {
+ syscall.Syscall(_glDeleteQueries.Addr(), 2, 1, uintptr(unsafe.Pointer(&query.V)), 0)
+}
+func (c *Functions) DeleteShader(s Shader) {
+ syscall.Syscall(_glDeleteShader.Addr(), 1, uintptr(s.V), 0, 0)
+}
+func (c *Functions) DeleteRenderbuffer(v Renderbuffer) {
+ syscall.Syscall(_glDeleteRenderbuffers.Addr(), 2, 1, uintptr(unsafe.Pointer(&v.V)), 0)
+}
+func (c *Functions) DeleteTexture(v Texture) {
+ syscall.Syscall(_glDeleteTextures.Addr(), 2, 1, uintptr(unsafe.Pointer(&v.V)), 0)
+}
+func (f *Functions) DeleteVertexArray(array VertexArray) {
+ syscall.Syscall(_glDeleteVertexArrays.Addr(), 2, 1, uintptr(unsafe.Pointer(&array.V)), 0)
+}
+func (c *Functions) DepthFunc(f Enum) {
+ syscall.Syscall(_glDepthFunc.Addr(), 1, uintptr(f), 0, 0)
+}
+func (c *Functions) DepthMask(mask bool) {
+ var m uintptr
+ if mask {
+ m = 1
+ }
+ syscall.Syscall(_glDepthMask.Addr(), 1, m, 0, 0)
+}
+func (c *Functions) DisableVertexAttribArray(a Attrib) {
+ syscall.Syscall(_glDisableVertexAttribArray.Addr(), 1, uintptr(a), 0, 0)
+}
+func (c *Functions) Disable(cap Enum) {
+ syscall.Syscall(_glDisable.Addr(), 1, uintptr(cap), 0, 0)
+}
+func (c *Functions) DrawArrays(mode Enum, first, count int) {
+ syscall.Syscall(_glDrawArrays.Addr(), 3, uintptr(mode), uintptr(first), uintptr(count))
+}
+func (c *Functions) DrawElements(mode Enum, count int, ty Enum, offset int) {
+ syscall.Syscall6(_glDrawElements.Addr(), 4, uintptr(mode), uintptr(count), uintptr(ty), uintptr(offset), 0, 0)
+}
+func (f *Functions) DispatchCompute(x, y, z int) {
+ panic("not implemented")
+}
+func (c *Functions) Enable(cap Enum) {
+ syscall.Syscall(_glEnable.Addr(), 1, uintptr(cap), 0, 0)
+}
+func (c *Functions) EnableVertexAttribArray(a Attrib) {
+ syscall.Syscall(_glEnableVertexAttribArray.Addr(), 1, uintptr(a), 0, 0)
+}
+func (f *Functions) EndQuery(target Enum) {
+ syscall.Syscall(_glEndQuery.Addr(), 1, uintptr(target), 0, 0)
+}
+func (c *Functions) Finish() {
+ syscall.Syscall(_glFinish.Addr(), 0, 0, 0, 0)
+}
+func (c *Functions) Flush() {
+ syscall.Syscall(_glFlush.Addr(), 0, 0, 0, 0)
+}
+func (c *Functions) FramebufferRenderbuffer(target, attachment, renderbuffertarget Enum, renderbuffer Renderbuffer) {
+ syscall.Syscall6(_glFramebufferRenderbuffer.Addr(), 4, uintptr(target), uintptr(attachment), uintptr(renderbuffertarget), uintptr(renderbuffer.V), 0, 0)
+}
+func (c *Functions) FramebufferTexture2D(target, attachment, texTarget Enum, t Texture, level int) {
+ syscall.Syscall6(_glFramebufferTexture2D.Addr(), 5, uintptr(target), uintptr(attachment), uintptr(texTarget), uintptr(t.V), uintptr(level), 0)
+}
+func (f *Functions) GetUniformBlockIndex(p Program, name string) uint {
+ cname := cString(name)
+ c0 := &cname[0]
+ u, _, _ := syscall.Syscall(_glGetUniformBlockIndex.Addr(), 2, uintptr(p.V), uintptr(unsafe.Pointer(c0)), 0)
+ issue34474KeepAlive(c0)
+ return uint(u)
+}
+func (c *Functions) GetBinding(pname Enum) Object {
+ return Object{uint(c.GetInteger(pname))}
+}
+func (c *Functions) GetBindingi(pname Enum, idx int) Object {
+ return Object{uint(c.GetIntegeri(pname, idx))}
+}
+func (c *Functions) GetError() Enum {
+ e, _, _ := syscall.Syscall(_glGetError.Addr(), 0, 0, 0, 0)
+ return Enum(e)
+}
+func (c *Functions) GetRenderbufferParameteri(target, pname Enum) int {
+ syscall.Syscall(_glGetRenderbufferParameteriv.Addr(), 3, uintptr(target), uintptr(pname), uintptr(unsafe.Pointer(&c.int32s[0])))
+ return int(c.int32s[0])
+}
+func (c *Functions) GetFramebufferAttachmentParameteri(target, attachment, pname Enum) int {
+ syscall.Syscall6(_glGetFramebufferAttachmentParameteriv.Addr(), 4, uintptr(target), uintptr(attachment), uintptr(pname), uintptr(unsafe.Pointer(&c.int32s[0])), 0, 0)
+ return int(c.int32s[0])
+}
+func (c *Functions) GetInteger4(pname Enum) [4]int {
+ syscall.Syscall(_glGetIntegerv.Addr(), 2, uintptr(pname), uintptr(unsafe.Pointer(&c.int32s[0])), 0)
+ var r [4]int
+ for i := range r {
+ r[i] = int(c.int32s[i])
+ }
+ return r
+}
+func (c *Functions) GetInteger(pname Enum) int {
+ syscall.Syscall(_glGetIntegerv.Addr(), 2, uintptr(pname), uintptr(unsafe.Pointer(&c.int32s[0])), 0)
+ return int(c.int32s[0])
+}
+func (c *Functions) GetIntegeri(pname Enum, idx int) int {
+ syscall.Syscall(_glGetIntegeri_v.Addr(), 3, uintptr(pname), uintptr(idx), uintptr(unsafe.Pointer(&c.int32s[0])))
+ return int(c.int32s[0])
+}
+func (c *Functions) GetFloat(pname Enum) float32 {
+ syscall.Syscall(_glGetFloatv.Addr(), 2, uintptr(pname), uintptr(unsafe.Pointer(&c.float32s[0])), 0)
+ return c.float32s[0]
+}
+func (c *Functions) GetFloat4(pname Enum) [4]float32 {
+ syscall.Syscall(_glGetFloatv.Addr(), 2, uintptr(pname), uintptr(unsafe.Pointer(&c.float32s[0])), 0)
+ var r [4]float32
+ copy(r[:], c.float32s[:])
+ return r
+}
+func (c *Functions) GetProgrami(p Program, pname Enum) int {
+ syscall.Syscall(_glGetProgramiv.Addr(), 3, uintptr(p.V), uintptr(pname), uintptr(unsafe.Pointer(&c.int32s[0])))
+ return int(c.int32s[0])
+}
+func (c *Functions) GetProgramInfoLog(p Program) string {
+ n := c.GetProgrami(p, INFO_LOG_LENGTH)
+ buf := make([]byte, n)
+ syscall.Syscall6(_glGetProgramInfoLog.Addr(), 4, uintptr(p.V), uintptr(len(buf)), 0, uintptr(unsafe.Pointer(&buf[0])), 0, 0)
+ return string(buf)
+}
+func (c *Functions) GetQueryObjectuiv(query Query, pname Enum) uint {
+ syscall.Syscall(_glGetQueryObjectuiv.Addr(), 3, uintptr(query.V), uintptr(pname), uintptr(unsafe.Pointer(&c.int32s[0])))
+ return uint(c.int32s[0])
+}
+func (c *Functions) GetShaderi(s Shader, pname Enum) int {
+ syscall.Syscall(_glGetShaderiv.Addr(), 3, uintptr(s.V), uintptr(pname), uintptr(unsafe.Pointer(&c.int32s[0])))
+ return int(c.int32s[0])
+}
+func (c *Functions) GetShaderInfoLog(s Shader) string {
+ n := c.GetShaderi(s, INFO_LOG_LENGTH)
+ buf := make([]byte, n)
+ syscall.Syscall6(_glGetShaderInfoLog.Addr(), 4, uintptr(s.V), uintptr(len(buf)), 0, uintptr(unsafe.Pointer(&buf[0])), 0, 0)
+ return string(buf)
+}
+func (c *Functions) GetString(pname Enum) string {
+ s, _, _ := syscall.Syscall(_glGetString.Addr(), 1, uintptr(pname), 0, 0)
+ return windows.BytePtrToString((*byte)(unsafe.Pointer(s)))
+}
+func (c *Functions) GetUniformLocation(p Program, name string) Uniform {
+ cname := cString(name)
+ c0 := &cname[0]
+ u, _, _ := syscall.Syscall(_glGetUniformLocation.Addr(), 2, uintptr(p.V), uintptr(unsafe.Pointer(c0)), 0)
+ issue34474KeepAlive(c0)
+ return Uniform{int(u)}
+}
+func (c *Functions) GetVertexAttrib(index int, pname Enum) int {
+ syscall.Syscall(_glGetVertexAttribiv.Addr(), 3, uintptr(index), uintptr(pname), uintptr(unsafe.Pointer(&c.int32s[0])))
+ return int(c.int32s[0])
+}
+
+func (c *Functions) GetVertexAttribBinding(index int, pname Enum) Object {
+ return Object{uint(c.GetVertexAttrib(index, pname))}
+}
+
+func (c *Functions) GetVertexAttribPointer(index int, pname Enum) uintptr {
+ syscall.Syscall(_glGetVertexAttribPointerv.Addr(), 3, uintptr(index), uintptr(pname), uintptr(unsafe.Pointer(&c.uintptrs[0])))
+ return c.uintptrs[0]
+}
+func (c *Functions) InvalidateFramebuffer(target, attachment Enum) {
+ addr := _glInvalidateFramebuffer.Addr()
+ if addr == 0 {
+ // InvalidateFramebuffer is just a hint. Skip it if not supported.
+ return
+ }
+ syscall.Syscall(addr, 3, uintptr(target), 1, uintptr(unsafe.Pointer(&attachment)))
+}
+func (f *Functions) IsEnabled(cap Enum) bool {
+ u, _, _ := syscall.Syscall(_glIsEnabled.Addr(), 1, uintptr(cap), 0, 0)
+ return u == TRUE
+}
+func (c *Functions) LinkProgram(p Program) {
+ syscall.Syscall(_glLinkProgram.Addr(), 1, uintptr(p.V), 0, 0)
+}
+func (c *Functions) PixelStorei(pname Enum, param int) {
+ syscall.Syscall(_glPixelStorei.Addr(), 2, uintptr(pname), uintptr(param), 0)
+}
+func (f *Functions) MemoryBarrier(barriers Enum) {
+ panic("not implemented")
+}
+func (f *Functions) MapBufferRange(target Enum, offset, length int, access Enum) []byte {
+ panic("not implemented")
+}
+func (f *Functions) ReadPixels(x, y, width, height int, format, ty Enum, data []byte) {
+ d0 := &data[0]
+ syscall.Syscall9(_glReadPixels.Addr(), 7, uintptr(x), uintptr(y), uintptr(width), uintptr(height), uintptr(format), uintptr(ty), uintptr(unsafe.Pointer(d0)), 0, 0)
+ issue34474KeepAlive(d0)
+}
+func (c *Functions) RenderbufferStorage(target, internalformat Enum, width, height int) {
+ syscall.Syscall6(_glRenderbufferStorage.Addr(), 4, uintptr(target), uintptr(internalformat), uintptr(width), uintptr(height), 0, 0)
+}
+func (c *Functions) Scissor(x, y, width, height int32) {
+ syscall.Syscall6(_glScissor.Addr(), 4, uintptr(x), uintptr(y), uintptr(width), uintptr(height), 0, 0)
+}
+func (c *Functions) ShaderSource(s Shader, src string) {
+ var n uintptr = uintptr(len(src))
+ psrc := &src
+ syscall.Syscall6(_glShaderSource.Addr(), 4, uintptr(s.V), 1, uintptr(unsafe.Pointer(psrc)), uintptr(unsafe.Pointer(&n)), 0, 0)
+ issue34474KeepAlive(psrc)
+}
+func (f *Functions) TexImage2D(target Enum, level int, internalFormat Enum, width int, height int, format Enum, ty Enum) {
+ syscall.Syscall9(_glTexImage2D.Addr(), 9, uintptr(target), uintptr(level), uintptr(internalFormat), uintptr(width), uintptr(height), 0, uintptr(format), uintptr(ty), 0)
+}
+func (f *Functions) TexStorage2D(target Enum, levels int, internalFormat Enum, width, height int) {
+ syscall.Syscall6(_glTexStorage2D.Addr(), 5, uintptr(target), uintptr(levels), uintptr(internalFormat), uintptr(width), uintptr(height), 0)
+}
+func (c *Functions) TexSubImage2D(target Enum, level int, x, y, width, height int, format, ty Enum, data []byte) {
+ d0 := &data[0]
+ syscall.Syscall9(_glTexSubImage2D.Addr(), 9, uintptr(target), uintptr(level), uintptr(x), uintptr(y), uintptr(width), uintptr(height), uintptr(format), uintptr(ty), uintptr(unsafe.Pointer(d0)))
+ issue34474KeepAlive(d0)
+}
+func (c *Functions) TexParameteri(target, pname Enum, param int) {
+ syscall.Syscall(_glTexParameteri.Addr(), 3, uintptr(target), uintptr(pname), uintptr(param))
+}
+func (f *Functions) UniformBlockBinding(p Program, uniformBlockIndex uint, uniformBlockBinding uint) {
+ syscall.Syscall(_glUniformBlockBinding.Addr(), 3, uintptr(p.V), uintptr(uniformBlockIndex), uintptr(uniformBlockBinding))
+}
+func (c *Functions) Uniform1f(dst Uniform, v float32) {
+ syscall.Syscall(_glUniform1f.Addr(), 2, uintptr(dst.V), uintptr(math.Float32bits(v)), 0)
+}
+func (c *Functions) Uniform1i(dst Uniform, v int) {
+ syscall.Syscall(_glUniform1i.Addr(), 2, uintptr(dst.V), uintptr(v), 0)
+}
+func (c *Functions) Uniform2f(dst Uniform, v0, v1 float32) {
+ syscall.Syscall(_glUniform2f.Addr(), 3, uintptr(dst.V), uintptr(math.Float32bits(v0)), uintptr(math.Float32bits(v1)))
+}
+func (c *Functions) Uniform3f(dst Uniform, v0, v1, v2 float32) {
+ syscall.Syscall6(_glUniform3f.Addr(), 4, uintptr(dst.V), uintptr(math.Float32bits(v0)), uintptr(math.Float32bits(v1)), uintptr(math.Float32bits(v2)), 0, 0)
+}
+func (c *Functions) Uniform4f(dst Uniform, v0, v1, v2, v3 float32) {
+ syscall.Syscall6(_glUniform4f.Addr(), 5, uintptr(dst.V), uintptr(math.Float32bits(v0)), uintptr(math.Float32bits(v1)), uintptr(math.Float32bits(v2)), uintptr(math.Float32bits(v3)), 0)
+}
+func (c *Functions) UseProgram(p Program) {
+ syscall.Syscall(_glUseProgram.Addr(), 1, uintptr(p.V), 0, 0)
+}
+func (f *Functions) UnmapBuffer(target Enum) bool {
+ panic("not implemented")
+}
+func (c *Functions) VertexAttribPointer(dst Attrib, size int, ty Enum, normalized bool, stride, offset int) {
+ var norm uintptr
+ if normalized {
+ norm = 1
+ }
+ syscall.Syscall6(_glVertexAttribPointer.Addr(), 6, uintptr(dst), uintptr(size), uintptr(ty), norm, uintptr(stride), uintptr(offset))
+}
+func (c *Functions) Viewport(x, y, width, height int) {
+ syscall.Syscall6(_glViewport.Addr(), 4, uintptr(x), uintptr(y), uintptr(width), uintptr(height), 0, 0)
+}
+
+func cString(s string) []byte {
+ b := make([]byte, len(s)+1)
+ copy(b, s)
+ return b
+}
+
+// issue34474KeepAlive calls runtime.KeepAlive as a
+// workaround for golang.org/issue/34474.
+func issue34474KeepAlive(v interface{}) {
+ runtime.KeepAlive(v)
+}
diff --git a/vendor/gioui.org/internal/gl/types.go b/vendor/gioui.org/internal/gl/types.go
new file mode 100644
index 0000000..dd24963
--- /dev/null
+++ b/vendor/gioui.org/internal/gl/types.go
@@ -0,0 +1,77 @@
+//go:build !js
+// +build !js
+
+package gl
+
+type (
+ Object struct{ V uint }
+ Buffer Object
+ Framebuffer Object
+ Program Object
+ Renderbuffer Object
+ Shader Object
+ Texture Object
+ Query Object
+ Uniform struct{ V int }
+ VertexArray Object
+)
+
+func (o Object) valid() bool {
+ return o.V != 0
+}
+
+func (o Object) equal(o2 Object) bool {
+ return o == o2
+}
+
+func (u Framebuffer) Valid() bool {
+ return Object(u).valid()
+}
+
+func (u Uniform) Valid() bool {
+ return u.V != -1
+}
+
+func (p Program) Valid() bool {
+ return Object(p).valid()
+}
+
+func (s Shader) Valid() bool {
+ return Object(s).valid()
+}
+
+func (a VertexArray) Valid() bool {
+ return Object(a).valid()
+}
+
+func (f Framebuffer) Equal(f2 Framebuffer) bool {
+ return Object(f).equal(Object(f2))
+}
+
+func (p Program) Equal(p2 Program) bool {
+ return Object(p).equal(Object(p2))
+}
+
+func (s Shader) Equal(s2 Shader) bool {
+ return Object(s).equal(Object(s2))
+}
+
+func (u Uniform) Equal(u2 Uniform) bool {
+ return u == u2
+}
+
+func (a VertexArray) Equal(a2 VertexArray) bool {
+ return Object(a).equal(Object(a2))
+}
+
+func (r Renderbuffer) Equal(r2 Renderbuffer) bool {
+ return Object(r).equal(Object(r2))
+}
+
+func (t Texture) Equal(t2 Texture) bool {
+ return Object(t).equal(Object(t2))
+}
+
+func (b Buffer) Equal(b2 Buffer) bool {
+ return Object(b).equal(Object(b2))
+}
diff --git a/vendor/gioui.org/internal/gl/types_js.go b/vendor/gioui.org/internal/gl/types_js.go
new file mode 100644
index 0000000..8d91a6b
--- /dev/null
+++ b/vendor/gioui.org/internal/gl/types_js.go
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package gl
+
+import "syscall/js"
+
+type (
+ Object js.Value
+ Buffer Object
+ Framebuffer Object
+ Program Object
+ Renderbuffer Object
+ Shader Object
+ Texture Object
+ Query Object
+ Uniform Object
+ VertexArray Object
+)
+
+func (o Object) valid() bool {
+ return js.Value(o).Truthy()
+}
+
+func (o Object) equal(o2 Object) bool {
+ return js.Value(o).Equal(js.Value(o2))
+}
+
+func (b Buffer) Valid() bool {
+ return Object(b).valid()
+}
+
+func (f Framebuffer) Valid() bool {
+ return Object(f).valid()
+}
+
+func (p Program) Valid() bool {
+ return Object(p).valid()
+}
+
+func (r Renderbuffer) Valid() bool {
+ return Object(r).valid()
+}
+
+func (s Shader) Valid() bool {
+ return Object(s).valid()
+}
+
+func (t Texture) Valid() bool {
+ return Object(t).valid()
+}
+
+func (u Uniform) Valid() bool {
+ return Object(u).valid()
+}
+
+func (a VertexArray) Valid() bool {
+ return Object(a).valid()
+}
+
+func (f Framebuffer) Equal(f2 Framebuffer) bool {
+ return Object(f).equal(Object(f2))
+}
+
+func (p Program) Equal(p2 Program) bool {
+ return Object(p).equal(Object(p2))
+}
+
+func (s Shader) Equal(s2 Shader) bool {
+ return Object(s).equal(Object(s2))
+}
+
+func (u Uniform) Equal(u2 Uniform) bool {
+ return Object(u).equal(Object(u2))
+}
+
+func (a VertexArray) Equal(a2 VertexArray) bool {
+ return Object(a).equal(Object(a2))
+}
+
+func (r Renderbuffer) Equal(r2 Renderbuffer) bool {
+ return Object(r).equal(Object(r2))
+}
+
+func (t Texture) Equal(t2 Texture) bool {
+ return Object(t).equal(Object(t2))
+}
+
+func (b Buffer) Equal(b2 Buffer) bool {
+ return Object(b).equal(Object(b2))
+}
diff --git a/vendor/gioui.org/internal/gl/util.go b/vendor/gioui.org/internal/gl/util.go
new file mode 100644
index 0000000..c696b69
--- /dev/null
+++ b/vendor/gioui.org/internal/gl/util.go
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package gl
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+func CreateProgram(ctx *Functions, vsSrc, fsSrc string, attribs []string) (Program, error) {
+ vs, err := CreateShader(ctx, VERTEX_SHADER, vsSrc)
+ if err != nil {
+ return Program{}, err
+ }
+ defer ctx.DeleteShader(vs)
+ fs, err := CreateShader(ctx, FRAGMENT_SHADER, fsSrc)
+ if err != nil {
+ return Program{}, err
+ }
+ defer ctx.DeleteShader(fs)
+ prog := ctx.CreateProgram()
+ if !prog.Valid() {
+ return Program{}, errors.New("glCreateProgram failed")
+ }
+ ctx.AttachShader(prog, vs)
+ ctx.AttachShader(prog, fs)
+ for i, a := range attribs {
+ ctx.BindAttribLocation(prog, Attrib(i), a)
+ }
+ ctx.LinkProgram(prog)
+ if ctx.GetProgrami(prog, LINK_STATUS) == 0 {
+ log := ctx.GetProgramInfoLog(prog)
+ ctx.DeleteProgram(prog)
+ return Program{}, fmt.Errorf("program link failed: %s", strings.TrimSpace(log))
+ }
+ return prog, nil
+}
+
+func CreateComputeProgram(ctx *Functions, src string) (Program, error) {
+ cs, err := CreateShader(ctx, COMPUTE_SHADER, src)
+ if err != nil {
+ return Program{}, err
+ }
+ defer ctx.DeleteShader(cs)
+ prog := ctx.CreateProgram()
+ if !prog.Valid() {
+ return Program{}, errors.New("glCreateProgram failed")
+ }
+ ctx.AttachShader(prog, cs)
+ ctx.LinkProgram(prog)
+ if ctx.GetProgrami(prog, LINK_STATUS) == 0 {
+ log := ctx.GetProgramInfoLog(prog)
+ ctx.DeleteProgram(prog)
+ return Program{}, fmt.Errorf("program link failed: %s", strings.TrimSpace(log))
+ }
+ return prog, nil
+}
+
+func CreateShader(ctx *Functions, typ Enum, src string) (Shader, error) {
+ sh := ctx.CreateShader(typ)
+ if !sh.Valid() {
+ return Shader{}, errors.New("glCreateShader failed")
+ }
+ ctx.ShaderSource(sh, src)
+ ctx.CompileShader(sh)
+ if ctx.GetShaderi(sh, COMPILE_STATUS) == 0 {
+ log := ctx.GetShaderInfoLog(sh)
+ ctx.DeleteShader(sh)
+ return Shader{}, fmt.Errorf("shader compilation failed: %s", strings.TrimSpace(log))
+ }
+ return sh, nil
+}
+
+func ParseGLVersion(glVer string) (version [2]int, gles bool, err error) {
+ var ver [2]int
+ if _, err := fmt.Sscanf(glVer, "OpenGL ES %d.%d", &ver[0], &ver[1]); err == nil {
+ return ver, true, nil
+ } else if _, err := fmt.Sscanf(glVer, "WebGL %d.%d", &ver[0], &ver[1]); err == nil {
+ // WebGL major version v corresponds to OpenGL ES version v + 1
+ ver[0]++
+ return ver, true, nil
+ } else if _, err := fmt.Sscanf(glVer, "%d.%d", &ver[0], &ver[1]); err == nil {
+ return ver, false, nil
+ }
+ return ver, false, fmt.Errorf("failed to parse OpenGL ES version (%s)", glVer)
+}
diff --git a/vendor/gioui.org/internal/ops/ops.go b/vendor/gioui.org/internal/ops/ops.go
new file mode 100644
index 0000000..faf16a4
--- /dev/null
+++ b/vendor/gioui.org/internal/ops/ops.go
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package ops
+
+import (
+ "encoding/binary"
+ "image"
+ "math"
+
+ "gioui.org/f32"
+ "gioui.org/internal/byteslice"
+ "gioui.org/internal/scene"
+)
+
+type Ops struct {
+ // version is incremented at each Reset.
+ version int
+ // data contains the serialized operations.
+ data []byte
+ // refs hold external references for operations.
+ refs []interface{}
+ // nextStateID is the id allocated for the next
+ // StateOp.
+ nextStateID int
+
+ macroStack stack
+ stacks [5]stack
+}
+
+type OpType byte
+
+type Shape byte
+
+// Start at a high number for easier debugging.
+const firstOpIndex = 200
+
+const (
+ TypeMacro OpType = iota + firstOpIndex
+ TypeCall
+ TypeDefer
+ TypePushTransform
+ TypeTransform
+ TypePopTransform
+ TypeInvalidate
+ TypeImage
+ TypePaint
+ TypeColor
+ TypeLinearGradient
+ TypePass
+ TypePopPass
+ TypePointerInput
+ TypeClipboardRead
+ TypeClipboardWrite
+ TypeKeyInput
+ TypeKeyFocus
+ TypeKeySoftKeyboard
+ TypeSave
+ TypeLoad
+ TypeAux
+ TypeClip
+ TypePopClip
+ TypeProfile
+ TypeCursor
+ TypePath
+ TypeStroke
+ TypeSemanticLabel
+ TypeSemanticDesc
+ TypeSemanticClass
+ TypeSemanticSelected
+ TypeSemanticDisabled
+)
+
+type StackID struct {
+ id int
+ prev int
+}
+
+// StateOp represents a saved operation snapshop to be restored
+// later.
+type StateOp struct {
+ id int
+ macroID int
+ ops *Ops
+}
+
+// stack tracks the integer identities of stack operations to ensure correct
+// pairing of their push and pop methods.
+type stack struct {
+ currentID int
+ nextID int
+}
+
+type StackKind uint8
+
+// ClipOp is the shadow of clip.Op.
+type ClipOp struct {
+ Bounds image.Rectangle
+ Outline bool
+ Shape Shape
+}
+
+const (
+ ClipStack StackKind = iota
+ TransStack
+ PassStack
+ MetaStack
+)
+
+const (
+ Path Shape = iota
+ Ellipse
+ Rect
+)
+
+const (
+ TypeMacroLen = 1 + 4 + 4
+ TypeCallLen = 1 + 4 + 4
+ TypeDeferLen = 1
+ TypePushTransformLen = 1 + 4*6
+ TypeTransformLen = 1 + 1 + 4*6
+ TypePopTransformLen = 1
+ TypeRedrawLen = 1 + 8
+ TypeImageLen = 1
+ TypePaintLen = 1
+ TypeColorLen = 1 + 4
+ TypeLinearGradientLen = 1 + 8*2 + 4*2
+ TypePassLen = 1
+ TypePopPassLen = 1
+ TypePointerInputLen = 1 + 1 + 1*2 + 2*4 + 2*4
+ TypeClipboardReadLen = 1
+ TypeClipboardWriteLen = 1
+ TypeKeyInputLen = 1 + 1
+ TypeKeyFocusLen = 1 + 1
+ TypeKeySoftKeyboardLen = 1 + 1
+ TypeSaveLen = 1 + 4
+ TypeLoadLen = 1 + 4
+ TypeAuxLen = 1
+ TypeClipLen = 1 + 4*4 + 1 + 1
+ TypePopClipLen = 1
+ TypeProfileLen = 1
+ TypeCursorLen = 1 + 1
+ TypePathLen = 8 + 1
+ TypeStrokeLen = 1 + 4
+ TypeSemanticLabelLen = 1
+ TypeSemanticDescLen = 1
+ TypeSemanticClassLen = 2
+ TypeSemanticSelectedLen = 2
+ TypeSemanticDisabledLen = 2
+)
+
+func (op *ClipOp) Decode(data []byte) {
+ if OpType(data[0]) != TypeClip {
+ panic("invalid op")
+ }
+ bo := binary.LittleEndian
+ r := image.Rectangle{
+ Min: image.Point{
+ X: int(int32(bo.Uint32(data[1:]))),
+ Y: int(int32(bo.Uint32(data[5:]))),
+ },
+ Max: image.Point{
+ X: int(int32(bo.Uint32(data[9:]))),
+ Y: int(int32(bo.Uint32(data[13:]))),
+ },
+ }
+ *op = ClipOp{
+ Bounds: r,
+ Outline: data[17] == 1,
+ Shape: Shape(data[18]),
+ }
+}
+
+func Reset(o *Ops) {
+ o.macroStack = stack{}
+ for i := range o.stacks {
+ o.stacks[i] = stack{}
+ }
+ // Leave references to the GC.
+ for i := range o.refs {
+ o.refs[i] = nil
+ }
+ o.data = o.data[:0]
+ o.refs = o.refs[:0]
+ o.nextStateID = 0
+ o.version++
+}
+
+func Write(o *Ops, n int) []byte {
+ o.data = append(o.data, make([]byte, n)...)
+ return o.data[len(o.data)-n:]
+}
+
+func PushMacro(o *Ops) StackID {
+ return o.macroStack.push()
+}
+
+func PopMacro(o *Ops, id StackID) {
+ o.macroStack.pop(id)
+}
+
+func FillMacro(o *Ops, startPC PC) {
+ pc := PCFor(o)
+ // Fill out the macro definition reserved in Record.
+ data := o.data[startPC.data:]
+ data = data[:TypeMacroLen]
+ data[0] = byte(TypeMacro)
+ bo := binary.LittleEndian
+ bo.PutUint32(data[1:], uint32(pc.data))
+ bo.PutUint32(data[5:], uint32(pc.refs))
+}
+
+func AddCall(o *Ops, callOps *Ops, pc PC) {
+ data := Write1(o, TypeCallLen, callOps)
+ data[0] = byte(TypeCall)
+ bo := binary.LittleEndian
+ bo.PutUint32(data[1:], uint32(pc.data))
+ bo.PutUint32(data[5:], uint32(pc.refs))
+}
+
+func PushOp(o *Ops, kind StackKind) (StackID, int) {
+ return o.stacks[kind].push(), o.macroStack.currentID
+}
+
+func PopOp(o *Ops, kind StackKind, sid StackID, macroID int) {
+ if o.macroStack.currentID != macroID {
+ panic("stack push and pop must not cross macro boundary")
+ }
+ o.stacks[kind].pop(sid)
+}
+
+func Write1(o *Ops, n int, ref1 interface{}) []byte {
+ o.data = append(o.data, make([]byte, n)...)
+ o.refs = append(o.refs, ref1)
+ return o.data[len(o.data)-n:]
+}
+
+func Write2(o *Ops, n int, ref1, ref2 interface{}) []byte {
+ o.data = append(o.data, make([]byte, n)...)
+ o.refs = append(o.refs, ref1, ref2)
+ return o.data[len(o.data)-n:]
+}
+
+func PCFor(o *Ops) PC {
+ return PC{data: len(o.data), refs: len(o.refs)}
+}
+
+func (s *stack) push() StackID {
+ s.nextID++
+ sid := StackID{
+ id: s.nextID,
+ prev: s.currentID,
+ }
+ s.currentID = s.nextID
+ return sid
+}
+
+func (s *stack) check(sid StackID) {
+ if s.currentID != sid.id {
+ panic("unbalanced operation")
+ }
+}
+
+func (s *stack) pop(sid StackID) {
+ s.check(sid)
+ s.currentID = sid.prev
+}
+
+// Save the effective transformation.
+func Save(o *Ops) StateOp {
+ o.nextStateID++
+ s := StateOp{
+ ops: o,
+ id: o.nextStateID,
+ macroID: o.macroStack.currentID,
+ }
+ bo := binary.LittleEndian
+ data := Write(o, TypeSaveLen)
+ data[0] = byte(TypeSave)
+ bo.PutUint32(data[1:], uint32(s.id))
+ return s
+}
+
+// Load a previously saved operations state given
+// its ID.
+func (s StateOp) Load() {
+ bo := binary.LittleEndian
+ data := Write(s.ops, TypeLoadLen)
+ data[0] = byte(TypeLoad)
+ bo.PutUint32(data[1:], uint32(s.id))
+}
+
+func DecodeCommand(d []byte) scene.Command {
+ var cmd scene.Command
+ copy(byteslice.Uint32(cmd[:]), d)
+ return cmd
+}
+
+func EncodeCommand(out []byte, cmd scene.Command) {
+ copy(out, byteslice.Uint32(cmd[:]))
+}
+
+func DecodeTransform(data []byte) (t f32.Affine2D, push bool) {
+ if OpType(data[0]) != TypeTransform {
+ panic("invalid op")
+ }
+ push = data[1] != 0
+ data = data[2:]
+ data = data[:4*6]
+
+ bo := binary.LittleEndian
+ a := math.Float32frombits(bo.Uint32(data))
+ b := math.Float32frombits(bo.Uint32(data[4*1:]))
+ c := math.Float32frombits(bo.Uint32(data[4*2:]))
+ d := math.Float32frombits(bo.Uint32(data[4*3:]))
+ e := math.Float32frombits(bo.Uint32(data[4*4:]))
+ f := math.Float32frombits(bo.Uint32(data[4*5:]))
+ return f32.NewAffine2D(a, b, c, d, e, f), push
+}
+
+// DecodeSave decodes the state id of a save op.
+func DecodeSave(data []byte) int {
+ if OpType(data[0]) != TypeSave {
+ panic("invalid op")
+ }
+ bo := binary.LittleEndian
+ return int(bo.Uint32(data[1:]))
+}
+
+// DecodeLoad decodes the state id of a load op.
+func DecodeLoad(data []byte) int {
+ if OpType(data[0]) != TypeLoad {
+ panic("invalid op")
+ }
+ bo := binary.LittleEndian
+ return int(bo.Uint32(data[1:]))
+}
+
+func (t OpType) Size() int {
+ return [...]int{
+ TypeMacroLen,
+ TypeCallLen,
+ TypeDeferLen,
+ TypePushTransformLen,
+ TypeTransformLen,
+ TypePopTransformLen,
+ TypeRedrawLen,
+ TypeImageLen,
+ TypePaintLen,
+ TypeColorLen,
+ TypeLinearGradientLen,
+ TypePassLen,
+ TypePopPassLen,
+ TypePointerInputLen,
+ TypeClipboardReadLen,
+ TypeClipboardWriteLen,
+ TypeKeyInputLen,
+ TypeKeyFocusLen,
+ TypeKeySoftKeyboardLen,
+ TypeSaveLen,
+ TypeLoadLen,
+ TypeAuxLen,
+ TypeClipLen,
+ TypePopClipLen,
+ TypeProfileLen,
+ TypeCursorLen,
+ TypePathLen,
+ TypeStrokeLen,
+ TypeSemanticLabelLen,
+ TypeSemanticDescLen,
+ TypeSemanticClassLen,
+ TypeSemanticSelectedLen,
+ TypeSemanticDisabledLen,
+ }[t-firstOpIndex]
+}
+
+func (t OpType) NumRefs() int {
+ switch t {
+ case TypeKeyInput, TypeKeyFocus, TypePointerInput, TypeProfile, TypeCall, TypeClipboardRead, TypeClipboardWrite, TypeCursor, TypeSemanticLabel, TypeSemanticDesc:
+ return 1
+ case TypeImage:
+ return 2
+ default:
+ return 0
+ }
+}
+
+func (t OpType) String() string {
+ switch t {
+ case TypeMacro:
+ return "Macro"
+ case TypeCall:
+ return "Call"
+ case TypeDefer:
+ return "Defer"
+ case TypePushTransform:
+ return "PushTransform"
+ case TypeTransform:
+ return "Transform"
+ case TypePopTransform:
+ return "PopTransform"
+ case TypeInvalidate:
+ return "Invalidate"
+ case TypeImage:
+ return "Image"
+ case TypePaint:
+ return "Paint"
+ case TypeColor:
+ return "Color"
+ case TypeLinearGradient:
+ return "LinearGradient"
+ case TypePass:
+ return "Pass"
+ case TypePopPass:
+ return "PopPass"
+ case TypePointerInput:
+ return "PointerInput"
+ case TypeClipboardRead:
+ return "ClipboardRead"
+ case TypeClipboardWrite:
+ return "ClipboardWrite"
+ case TypeKeyInput:
+ return "KeyInput"
+ case TypeKeyFocus:
+ return "KeyFocus"
+ case TypeKeySoftKeyboard:
+ return "KeySoftKeyboard"
+ case TypeSave:
+ return "Save"
+ case TypeLoad:
+ return "Load"
+ case TypeAux:
+ return "Aux"
+ case TypeClip:
+ return "Clip"
+ case TypePopClip:
+ return "PopClip"
+ case TypeProfile:
+ return "Profile"
+ case TypeCursor:
+ return "Cursor"
+ case TypePath:
+ return "Path"
+ case TypeStroke:
+ return "Stroke"
+ case TypeSemanticLabel:
+ return "SemanticDescription"
+ default:
+ panic("unknown OpType")
+ }
+}
diff --git a/vendor/gioui.org/internal/ops/reader.go b/vendor/gioui.org/internal/ops/reader.go
new file mode 100644
index 0000000..99b8cb6
--- /dev/null
+++ b/vendor/gioui.org/internal/ops/reader.go
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package ops
+
+import (
+ "encoding/binary"
+)
+
+// Reader parses an ops list.
+type Reader struct {
+ pc PC
+ stack []macro
+ ops *Ops
+ deferOps Ops
+ deferDone bool
+}
+
+// EncodedOp represents an encoded op returned by
+// Reader.
+type EncodedOp struct {
+ Key Key
+ Data []byte
+ Refs []interface{}
+}
+
+// Key is a unique key for a given op.
+type Key struct {
+ ops *Ops
+ pc int
+ version int
+}
+
+// Shadow of op.MacroOp.
+type macroOp struct {
+ ops *Ops
+ pc PC
+}
+
+// PC is an instruction counter for an operation list.
+type PC struct {
+ data int
+ refs int
+}
+
+type macro struct {
+ ops *Ops
+ retPC PC
+ endPC PC
+}
+
+type opMacroDef struct {
+ endpc PC
+}
+
+// Reset start reading from the beginning of ops.
+func (r *Reader) Reset(ops *Ops) {
+ r.ResetAt(ops, PC{})
+}
+
+// ResetAt is like Reset, except it starts reading from pc.
+func (r *Reader) ResetAt(ops *Ops, pc PC) {
+ r.stack = r.stack[:0]
+ Reset(&r.deferOps)
+ r.deferDone = false
+ r.pc = pc
+ r.ops = ops
+}
+
+func (r *Reader) Decode() (EncodedOp, bool) {
+ if r.ops == nil {
+ return EncodedOp{}, false
+ }
+ deferring := false
+ for {
+ if len(r.stack) > 0 {
+ b := r.stack[len(r.stack)-1]
+ if r.pc == b.endPC {
+ r.ops = b.ops
+ r.pc = b.retPC
+ r.stack = r.stack[:len(r.stack)-1]
+ continue
+ }
+ }
+ data := r.ops.data
+ data = data[r.pc.data:]
+ refs := r.ops.refs
+ if len(data) == 0 {
+ if r.deferDone {
+ return EncodedOp{}, false
+ }
+ r.deferDone = true
+ // Execute deferred macros.
+ r.ops = &r.deferOps
+ r.pc = PC{}
+ continue
+ }
+ key := Key{ops: r.ops, pc: r.pc.data, version: r.ops.version}
+ t := OpType(data[0])
+ n := t.Size()
+ nrefs := t.NumRefs()
+ data = data[:n]
+ refs = refs[r.pc.refs:]
+ refs = refs[:nrefs]
+ switch t {
+ case TypeDefer:
+ deferring = true
+ r.pc.data += n
+ r.pc.refs += nrefs
+ continue
+ case TypeAux:
+ // An Aux operations is always wrapped in a macro, and
+ // its length is the remaining space.
+ block := r.stack[len(r.stack)-1]
+ n += block.endPC.data - r.pc.data - TypeAuxLen
+ data = data[:n]
+ case TypeCall:
+ if deferring {
+ deferring = false
+ // Copy macro for deferred execution.
+ if t.NumRefs() != 1 {
+ panic("internal error: unexpected number of macro refs")
+ }
+ deferData := Write1(&r.deferOps, t.Size(), refs[0])
+ copy(deferData, data)
+ r.pc.data += n
+ r.pc.refs += nrefs
+ continue
+ }
+ var op macroOp
+ op.decode(data, refs)
+ macroData := op.ops.data[op.pc.data:]
+ if OpType(macroData[0]) != TypeMacro {
+ panic("invalid macro reference")
+ }
+ var opDef opMacroDef
+ opDef.decode(macroData[:TypeMacro.Size()])
+ retPC := r.pc
+ retPC.data += n
+ retPC.refs += nrefs
+ r.stack = append(r.stack, macro{
+ ops: r.ops,
+ retPC: retPC,
+ endPC: opDef.endpc,
+ })
+ r.ops = op.ops
+ r.pc = op.pc
+ r.pc.data += TypeMacro.Size()
+ r.pc.refs += TypeMacro.NumRefs()
+ continue
+ case TypeMacro:
+ var op opMacroDef
+ op.decode(data)
+ r.pc = op.endpc
+ continue
+ }
+ r.pc.data += n
+ r.pc.refs += nrefs
+ return EncodedOp{Key: key, Data: data, Refs: refs}, true
+ }
+}
+
+func (op *opMacroDef) decode(data []byte) {
+ if OpType(data[0]) != TypeMacro {
+ panic("invalid op")
+ }
+ bo := binary.LittleEndian
+ data = data[:9]
+ dataIdx := int(int32(bo.Uint32(data[1:])))
+ refsIdx := int(int32(bo.Uint32(data[5:])))
+ *op = opMacroDef{
+ endpc: PC{
+ data: dataIdx,
+ refs: refsIdx,
+ },
+ }
+}
+
+func (m *macroOp) decode(data []byte, refs []interface{}) {
+ if OpType(data[0]) != TypeCall {
+ panic("invalid op")
+ }
+ data = data[:9]
+ bo := binary.LittleEndian
+ dataIdx := int(int32(bo.Uint32(data[1:])))
+ refsIdx := int(int32(bo.Uint32(data[5:])))
+ *m = macroOp{
+ ops: refs[0].(*Ops),
+ pc: PC{
+ data: dataIdx,
+ refs: refsIdx,
+ },
+ }
+}
diff --git a/vendor/gioui.org/internal/scene/scene.go b/vendor/gioui.org/internal/scene/scene.go
new file mode 100644
index 0000000..21fd92c
--- /dev/null
+++ b/vendor/gioui.org/internal/scene/scene.go
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+// Package scene encodes and decodes graphics commands in the format used by the
+// compute renderer.
+package scene
+
+import (
+ "fmt"
+ "image"
+ "image/color"
+ "math"
+ "unsafe"
+
+ "gioui.org/f32"
+)
+
+type Op uint32
+
+type Command [sceneElemSize / 4]uint32
+
+// GPU commands from scene.h
+const (
+ OpNop Op = iota
+ OpLine
+ OpQuad
+ OpCubic
+ OpFillColor
+ OpLineWidth
+ OpTransform
+ OpBeginClip
+ OpEndClip
+ OpFillImage
+ OpSetFillMode
+)
+
+// FillModes, from setup.h.
+type FillMode uint32
+
+const (
+ FillModeNonzero = 0
+ FillModeStroke = 1
+)
+
+const CommandSize = int(unsafe.Sizeof(Command{}))
+
+const sceneElemSize = 36
+
+func (c Command) Op() Op {
+ return Op(c[0])
+}
+
+func (c Command) String() string {
+ switch Op(c[0]) {
+ case OpNop:
+ return "nop"
+ case OpLine:
+ from, to := DecodeLine(c)
+ return fmt.Sprintf("line(%v, %v)", from, to)
+ case OpQuad:
+ from, ctrl, to := DecodeQuad(c)
+ return fmt.Sprintf("quad(%v, %v, %v)", from, ctrl, to)
+ case OpCubic:
+ from, ctrl0, ctrl1, to := DecodeCubic(c)
+ return fmt.Sprintf("cubic(%v, %v, %v, %v)", from, ctrl0, ctrl1, to)
+ case OpFillColor:
+ return fmt.Sprintf("fillcolor %#.8x", c[1])
+ case OpLineWidth:
+ return "linewidth"
+ case OpTransform:
+ t := f32.NewAffine2D(
+ math.Float32frombits(c[1]),
+ math.Float32frombits(c[3]),
+ math.Float32frombits(c[5]),
+ math.Float32frombits(c[2]),
+ math.Float32frombits(c[4]),
+ math.Float32frombits(c[6]),
+ )
+ return fmt.Sprintf("transform (%v)", t)
+ case OpBeginClip:
+ bounds := f32.Rectangle{
+ Min: f32.Pt(math.Float32frombits(c[1]), math.Float32frombits(c[2])),
+ Max: f32.Pt(math.Float32frombits(c[3]), math.Float32frombits(c[4])),
+ }
+ return fmt.Sprintf("beginclip (%v)", bounds)
+ case OpEndClip:
+ bounds := f32.Rectangle{
+ Min: f32.Pt(math.Float32frombits(c[1]), math.Float32frombits(c[2])),
+ Max: f32.Pt(math.Float32frombits(c[3]), math.Float32frombits(c[4])),
+ }
+ return fmt.Sprintf("endclip (%v)", bounds)
+ case OpFillImage:
+ return "fillimage"
+ case OpSetFillMode:
+ return "setfillmode"
+ default:
+ panic("unreachable")
+ }
+}
+
+func Line(start, end f32.Point) Command {
+ return Command{
+ 0: uint32(OpLine),
+ 1: math.Float32bits(start.X),
+ 2: math.Float32bits(start.Y),
+ 3: math.Float32bits(end.X),
+ 4: math.Float32bits(end.Y),
+ }
+}
+
+func Cubic(start, ctrl0, ctrl1, end f32.Point) Command {
+ return Command{
+ 0: uint32(OpCubic),
+ 1: math.Float32bits(start.X),
+ 2: math.Float32bits(start.Y),
+ 3: math.Float32bits(ctrl0.X),
+ 4: math.Float32bits(ctrl0.Y),
+ 5: math.Float32bits(ctrl1.X),
+ 6: math.Float32bits(ctrl1.Y),
+ 7: math.Float32bits(end.X),
+ 8: math.Float32bits(end.Y),
+ }
+}
+
+func Quad(start, ctrl, end f32.Point) Command {
+ return Command{
+ 0: uint32(OpQuad),
+ 1: math.Float32bits(start.X),
+ 2: math.Float32bits(start.Y),
+ 3: math.Float32bits(ctrl.X),
+ 4: math.Float32bits(ctrl.Y),
+ 5: math.Float32bits(end.X),
+ 6: math.Float32bits(end.Y),
+ }
+}
+
+func Transform(m f32.Affine2D) Command {
+ sx, hx, ox, hy, sy, oy := m.Elems()
+ return Command{
+ 0: uint32(OpTransform),
+ 1: math.Float32bits(sx),
+ 2: math.Float32bits(hy),
+ 3: math.Float32bits(hx),
+ 4: math.Float32bits(sy),
+ 5: math.Float32bits(ox),
+ 6: math.Float32bits(oy),
+ }
+}
+
+func SetLineWidth(width float32) Command {
+ return Command{
+ 0: uint32(OpLineWidth),
+ 1: math.Float32bits(width),
+ }
+}
+
+func BeginClip(bbox f32.Rectangle) Command {
+ return Command{
+ 0: uint32(OpBeginClip),
+ 1: math.Float32bits(bbox.Min.X),
+ 2: math.Float32bits(bbox.Min.Y),
+ 3: math.Float32bits(bbox.Max.X),
+ 4: math.Float32bits(bbox.Max.Y),
+ }
+}
+
+func EndClip(bbox f32.Rectangle) Command {
+ return Command{
+ 0: uint32(OpEndClip),
+ 1: math.Float32bits(bbox.Min.X),
+ 2: math.Float32bits(bbox.Min.Y),
+ 3: math.Float32bits(bbox.Max.X),
+ 4: math.Float32bits(bbox.Max.Y),
+ }
+}
+
+func FillColor(col color.RGBA) Command {
+ return Command{
+ 0: uint32(OpFillColor),
+ 1: uint32(col.R)<<24 | uint32(col.G)<<16 | uint32(col.B)<<8 | uint32(col.A),
+ }
+}
+
+func FillImage(index int, offset image.Point) Command {
+ x := int16(offset.X)
+ y := int16(offset.Y)
+ return Command{
+ 0: uint32(OpFillImage),
+ 1: uint32(index),
+ 2: uint32(uint16(x)) | uint32(uint16(y))<<16,
+ }
+}
+
+func SetFillMode(mode FillMode) Command {
+ return Command{
+ 0: uint32(OpSetFillMode),
+ 1: uint32(mode),
+ }
+}
+
+func DecodeLine(cmd Command) (from, to f32.Point) {
+ if cmd[0] != uint32(OpLine) {
+ panic("invalid command")
+ }
+ from = f32.Pt(math.Float32frombits(cmd[1]), math.Float32frombits(cmd[2]))
+ to = f32.Pt(math.Float32frombits(cmd[3]), math.Float32frombits(cmd[4]))
+ return
+}
+
+func DecodeQuad(cmd Command) (from, ctrl, to f32.Point) {
+ if cmd[0] != uint32(OpQuad) {
+ panic("invalid command")
+ }
+ from = f32.Pt(math.Float32frombits(cmd[1]), math.Float32frombits(cmd[2]))
+ ctrl = f32.Pt(math.Float32frombits(cmd[3]), math.Float32frombits(cmd[4]))
+ to = f32.Pt(math.Float32frombits(cmd[5]), math.Float32frombits(cmd[6]))
+ return
+}
+
+func DecodeCubic(cmd Command) (from, ctrl0, ctrl1, to f32.Point) {
+ if cmd[0] != uint32(OpCubic) {
+ panic("invalid command")
+ }
+ from = f32.Pt(math.Float32frombits(cmd[1]), math.Float32frombits(cmd[2]))
+ ctrl0 = f32.Pt(math.Float32frombits(cmd[3]), math.Float32frombits(cmd[4]))
+ ctrl1 = f32.Pt(math.Float32frombits(cmd[5]), math.Float32frombits(cmd[6]))
+ to = f32.Pt(math.Float32frombits(cmd[7]), math.Float32frombits(cmd[8]))
+ return
+}
diff --git a/vendor/gioui.org/internal/stroke/stroke.go b/vendor/gioui.org/internal/stroke/stroke.go
new file mode 100644
index 0000000..f60dab0
--- /dev/null
+++ b/vendor/gioui.org/internal/stroke/stroke.go
@@ -0,0 +1,737 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+// Most of the algorithms to compute strokes and their offsets have been
+// extracted, adapted from (and used as a reference implementation):
+// - github.com/tdewolff/canvas (Licensed under MIT)
+//
+// These algorithms have been implemented from:
+// Fast, precise flattening of cubic Bézier path and offset curves
+// Thomas F. Hain, et al.
+//
+// An electronic version is available at:
+// https://seant23.files.wordpress.com/2010/11/fastpreciseflatteningofbeziercurve.pdf
+//
+// Possible improvements (in term of speed and/or accuracy) on these
+// algorithms are:
+//
+// - Polar Stroking: New Theory and Methods for Stroking Paths,
+// M. Kilgard
+// https://arxiv.org/pdf/2007.00308.pdf
+//
+// - https://raphlinus.github.io/graphics/curves/2019/12/23/flatten-quadbez.html
+// R. Levien
+
+// Package stroke implements conversion of strokes to filled outlines. It is used as a
+// fallback for stroke configurations not natively supported by the renderer.
+package stroke
+
+import (
+ "encoding/binary"
+ "math"
+
+ "gioui.org/f32"
+ "gioui.org/internal/ops"
+ "gioui.org/internal/scene"
+)
+
+// The following are copies of types from op/clip to avoid a circular import of
+// that package.
+// TODO: when the old renderer is gone, this package can be merged with
+// op/clip, eliminating the duplicate types.
+type StrokeStyle struct {
+ Width float32
+}
+
+// strokeTolerance is used to reconcile rounding errors arising
+// when splitting quads into smaller and smaller segments to approximate
+// them into straight lines, and when joining back segments.
+//
+// The magic value of 0.01 was found by striking a compromise between
+// aesthetic looking (curves did look like curves, even after linearization)
+// and speed.
+const strokeTolerance = 0.01
+
+type QuadSegment struct {
+ From, Ctrl, To f32.Point
+}
+
+type StrokeQuad struct {
+ Contour uint32
+ Quad QuadSegment
+}
+
+type strokeState struct {
+ p0, p1 f32.Point // p0 is the start point, p1 the end point.
+ n0, n1 f32.Point // n0 is the normal vector at the start point, n1 at the end point.
+ r0, r1 float32 // r0 is the curvature at the start point, r1 at the end point.
+ ctl f32.Point // ctl is the control point of the quadratic Bézier segment.
+}
+
+type StrokeQuads []StrokeQuad
+
+func (qs *StrokeQuads) setContour(n uint32) {
+ for i := range *qs {
+ (*qs)[i].Contour = n
+ }
+}
+
+func (qs *StrokeQuads) pen() f32.Point {
+ return (*qs)[len(*qs)-1].Quad.To
+}
+
+func (qs *StrokeQuads) lineTo(pt f32.Point) {
+ end := qs.pen()
+ *qs = append(*qs, StrokeQuad{
+ Quad: QuadSegment{
+ From: end,
+ Ctrl: end.Add(pt).Mul(0.5),
+ To: pt,
+ },
+ })
+}
+
+func (qs *StrokeQuads) arc(f1, f2 f32.Point, angle float32) {
+ const segments = 16
+ pen := qs.pen()
+ m := ArcTransform(pen, f1.Add(pen), f2.Add(pen), angle, segments)
+ for i := 0; i < segments; i++ {
+ p0 := qs.pen()
+ p1 := m.Transform(p0)
+ p2 := m.Transform(p1)
+ ctl := p1.Mul(2).Sub(p0.Add(p2).Mul(.5))
+ *qs = append(*qs, StrokeQuad{
+ Quad: QuadSegment{
+ From: p0, Ctrl: ctl, To: p2,
+ },
+ })
+ }
+}
+
+// split splits a slice of quads into slices of quads grouped
+// by contours (ie: splitted at move-to boundaries).
+func (qs StrokeQuads) split() []StrokeQuads {
+ if len(qs) == 0 {
+ return nil
+ }
+
+ var (
+ c uint32
+ o []StrokeQuads
+ i = len(o)
+ )
+ for _, q := range qs {
+ if q.Contour != c {
+ c = q.Contour
+ i = len(o)
+ o = append(o, StrokeQuads{})
+ }
+ o[i] = append(o[i], q)
+ }
+
+ return o
+}
+
+func (qs StrokeQuads) stroke(stroke StrokeStyle) StrokeQuads {
+ var (
+ o StrokeQuads
+ hw = 0.5 * stroke.Width
+ )
+
+ for _, ps := range qs.split() {
+ rhs, lhs := ps.offset(hw, stroke)
+ switch lhs {
+ case nil:
+ o = o.append(rhs)
+ default:
+ // Closed path.
+ // Inner path should go opposite direction to cancel outer path.
+ switch {
+ case ps.ccw():
+ lhs = lhs.reverse()
+ o = o.append(rhs)
+ o = o.append(lhs)
+ default:
+ rhs = rhs.reverse()
+ o = o.append(lhs)
+ o = o.append(rhs)
+ }
+ }
+ }
+
+ return o
+}
+
+// offset returns the right-hand and left-hand sides of the path, offset by
+// the half-width hw.
+// The stroke handles how segments are joined and ends are capped.
+func (qs StrokeQuads) offset(hw float32, stroke StrokeStyle) (rhs, lhs StrokeQuads) {
+ var (
+ states []strokeState
+ beg = qs[0].Quad.From
+ end = qs[len(qs)-1].Quad.To
+ closed = beg == end
+ )
+ for i := range qs {
+ q := qs[i].Quad
+
+ var (
+ n0 = strokePathNorm(q.From, q.Ctrl, q.To, 0, hw)
+ n1 = strokePathNorm(q.From, q.Ctrl, q.To, 1, hw)
+ r0 = strokePathCurv(q.From, q.Ctrl, q.To, 0)
+ r1 = strokePathCurv(q.From, q.Ctrl, q.To, 1)
+ )
+ states = append(states, strokeState{
+ p0: q.From,
+ p1: q.To,
+ n0: n0,
+ n1: n1,
+ r0: r0,
+ r1: r1,
+ ctl: q.Ctrl,
+ })
+ }
+
+ for i, state := range states {
+ rhs = rhs.append(strokeQuadBezier(state, +hw, strokeTolerance))
+ lhs = lhs.append(strokeQuadBezier(state, -hw, strokeTolerance))
+
+ // join the current and next segments
+ if hasNext := i+1 < len(states); hasNext || closed {
+ var next strokeState
+ switch {
+ case hasNext:
+ next = states[i+1]
+ case closed:
+ next = states[0]
+ }
+ if state.n1 != next.n0 {
+ strokePathJoin(stroke, &rhs, &lhs, hw, state.p1, state.n1, next.n0, state.r1, next.r0)
+ }
+ }
+ }
+
+ if closed {
+ rhs.close()
+ lhs.close()
+ return rhs, lhs
+ }
+
+ qbeg := &states[0]
+ qend := &states[len(states)-1]
+
+ // Default to counter-clockwise direction.
+ lhs = lhs.reverse()
+ strokePathCap(stroke, &rhs, hw, qend.p1, qend.n1)
+
+ rhs = rhs.append(lhs)
+ strokePathCap(stroke, &rhs, hw, qbeg.p0, qbeg.n0.Mul(-1))
+
+ rhs.close()
+
+ return rhs, nil
+}
+
+func (qs *StrokeQuads) close() {
+ p0 := (*qs)[len(*qs)-1].Quad.To
+ p1 := (*qs)[0].Quad.From
+
+ if p1 == p0 {
+ return
+ }
+
+ *qs = append(*qs, StrokeQuad{
+ Quad: QuadSegment{
+ From: p0,
+ Ctrl: p0.Add(p1).Mul(0.5),
+ To: p1,
+ },
+ })
+}
+
+// ccw returns whether the path is counter-clockwise.
+func (qs StrokeQuads) ccw() bool {
+ // Use the Shoelace formula:
+ // https://en.wikipedia.org/wiki/Shoelace_formula
+ var area float32
+ for _, ps := range qs.split() {
+ for i := 1; i < len(ps); i++ {
+ pi := ps[i].Quad.To
+ pj := ps[i-1].Quad.To
+ area += (pi.X - pj.X) * (pi.Y + pj.Y)
+ }
+ }
+ return area <= 0.0
+}
+
+func (qs StrokeQuads) reverse() StrokeQuads {
+ if len(qs) == 0 {
+ return nil
+ }
+
+ ps := make(StrokeQuads, 0, len(qs))
+ for i := range qs {
+ q := qs[len(qs)-1-i]
+ q.Quad.To, q.Quad.From = q.Quad.From, q.Quad.To
+ ps = append(ps, q)
+ }
+
+ return ps
+}
+
+func (qs StrokeQuads) append(ps StrokeQuads) StrokeQuads {
+ switch {
+ case len(ps) == 0:
+ return qs
+ case len(qs) == 0:
+ return ps
+ }
+
+ // Consolidate quads and smooth out rounding errors.
+ // We need to also check for the strokeTolerance to correctly handle
+ // join/cap points or on-purpose disjoint quads.
+ p0 := qs[len(qs)-1].Quad.To
+ p1 := ps[0].Quad.From
+ if p0 != p1 && lenPt(p0.Sub(p1)) < strokeTolerance {
+ qs = append(qs, StrokeQuad{
+ Quad: QuadSegment{
+ From: p0,
+ Ctrl: p0.Add(p1).Mul(0.5),
+ To: p1,
+ },
+ })
+ }
+ return append(qs, ps...)
+}
+
+func (q QuadSegment) Transform(t f32.Affine2D) QuadSegment {
+ q.From = t.Transform(q.From)
+ q.Ctrl = t.Transform(q.Ctrl)
+ q.To = t.Transform(q.To)
+ return q
+}
+
+// strokePathNorm returns the normal vector at t.
+func strokePathNorm(p0, p1, p2 f32.Point, t, d float32) f32.Point {
+ switch t {
+ case 0:
+ n := p1.Sub(p0)
+ if n.X == 0 && n.Y == 0 {
+ return f32.Point{}
+ }
+ n = rot90CW(n)
+ return normPt(n, d)
+ case 1:
+ n := p2.Sub(p1)
+ if n.X == 0 && n.Y == 0 {
+ return f32.Point{}
+ }
+ n = rot90CW(n)
+ return normPt(n, d)
+ }
+ panic("impossible")
+}
+
+func rot90CW(p f32.Point) f32.Point { return f32.Pt(+p.Y, -p.X) }
+func rot90CCW(p f32.Point) f32.Point { return f32.Pt(-p.Y, +p.X) }
+
+// cosPt returns the cosine of the opening angle between p and q.
+func cosPt(p, q f32.Point) float32 {
+ np := math.Hypot(float64(p.X), float64(p.Y))
+ nq := math.Hypot(float64(q.X), float64(q.Y))
+ return dotPt(p, q) / float32(np*nq)
+}
+
+func normPt(p f32.Point, l float32) f32.Point {
+ d := math.Hypot(float64(p.X), float64(p.Y))
+ l64 := float64(l)
+ if math.Abs(d-l64) < 1e-10 {
+ return f32.Point{}
+ }
+ n := float32(l64 / d)
+ return f32.Point{X: p.X * n, Y: p.Y * n}
+}
+
+func lenPt(p f32.Point) float32 {
+ return float32(math.Hypot(float64(p.X), float64(p.Y)))
+}
+
+func dotPt(p, q f32.Point) float32 {
+ return p.X*q.X + p.Y*q.Y
+}
+
+func perpDot(p, q f32.Point) float32 {
+ return p.X*q.Y - p.Y*q.X
+}
+
+// strokePathCurv returns the curvature at t, along the quadratic Bézier
+// curve defined by the triplet (beg, ctl, end).
+func strokePathCurv(beg, ctl, end f32.Point, t float32) float32 {
+ var (
+ d1p = quadBezierD1(beg, ctl, end, t)
+ d2p = quadBezierD2(beg, ctl, end, t)
+
+ // Negative when bending right, ie: the curve is CW at this point.
+ a = float64(perpDot(d1p, d2p))
+ )
+
+ // We check early that the segment isn't too line-like and
+ // save a costly call to math.Pow that will be discarded by dividing
+ // with a too small 'a'.
+ if math.Abs(a) < 1e-10 {
+ return float32(math.NaN())
+ }
+ return float32(math.Pow(float64(d1p.X*d1p.X+d1p.Y*d1p.Y), 1.5) / a)
+}
+
+// quadBezierSample returns the point on the Bézier curve at t.
+// B(t) = (1-t)^2 P0 + 2(1-t)t P1 + t^2 P2
+func quadBezierSample(p0, p1, p2 f32.Point, t float32) f32.Point {
+ t1 := 1 - t
+ c0 := t1 * t1
+ c1 := 2 * t1 * t
+ c2 := t * t
+
+ o := p0.Mul(c0)
+ o = o.Add(p1.Mul(c1))
+ o = o.Add(p2.Mul(c2))
+ return o
+}
+
+// quadBezierD1 returns the first derivative of the Bézier curve with respect to t.
+// B'(t) = 2(1-t)(P1 - P0) + 2t(P2 - P1)
+func quadBezierD1(p0, p1, p2 f32.Point, t float32) f32.Point {
+ p10 := p1.Sub(p0).Mul(2 * (1 - t))
+ p21 := p2.Sub(p1).Mul(2 * t)
+
+ return p10.Add(p21)
+}
+
+// quadBezierD2 returns the second derivative of the Bézier curve with respect to t:
+// B''(t) = 2(P2 - 2P1 + P0)
+func quadBezierD2(p0, p1, p2 f32.Point, t float32) f32.Point {
+ p := p2.Sub(p1.Mul(2)).Add(p0)
+ return p.Mul(2)
+}
+
+func strokeQuadBezier(state strokeState, d, flatness float32) StrokeQuads {
+ // Gio strokes are only quadratic Bézier curves, w/o any inflection point.
+ // So we just have to flatten them.
+ var qs StrokeQuads
+ return flattenQuadBezier(qs, state.p0, state.ctl, state.p1, d, flatness)
+}
+
+// flattenQuadBezier splits a Bézier quadratic curve into linear sub-segments,
+// themselves also encoded as Bézier (degenerate, flat) quadratic curves.
+func flattenQuadBezier(qs StrokeQuads, p0, p1, p2 f32.Point, d, flatness float32) StrokeQuads {
+ var (
+ t float32
+ flat64 = float64(flatness)
+ )
+ for t < 1 {
+ s2 := float64((p2.X-p0.X)*(p1.Y-p0.Y) - (p2.Y-p0.Y)*(p1.X-p0.X))
+ den := math.Hypot(float64(p1.X-p0.X), float64(p1.Y-p0.Y))
+ if s2*den == 0.0 {
+ break
+ }
+
+ s2 /= den
+ t = 2.0 * float32(math.Sqrt(flat64/3.0/math.Abs(s2)))
+ if t >= 1.0 {
+ break
+ }
+ var q0, q1, q2 f32.Point
+ q0, q1, q2, p0, p1, p2 = quadBezierSplit(p0, p1, p2, t)
+ qs.addLine(q0, q1, q2, 0, d)
+ }
+ qs.addLine(p0, p1, p2, 1, d)
+ return qs
+}
+
+func (qs *StrokeQuads) addLine(p0, ctrl, p1 f32.Point, t, d float32) {
+
+ switch i := len(*qs); i {
+ case 0:
+ p0 = p0.Add(strokePathNorm(p0, ctrl, p1, 0, d))
+ default:
+ // Address possible rounding errors and use previous point.
+ p0 = (*qs)[i-1].Quad.To
+ }
+
+ p1 = p1.Add(strokePathNorm(p0, ctrl, p1, 1, d))
+
+ *qs = append(*qs,
+ StrokeQuad{
+ Quad: QuadSegment{
+ From: p0,
+ Ctrl: p0.Add(p1).Mul(0.5),
+ To: p1,
+ },
+ },
+ )
+}
+
+// quadInterp returns the interpolated point at t.
+func quadInterp(p, q f32.Point, t float32) f32.Point {
+ return f32.Pt(
+ (1-t)*p.X+t*q.X,
+ (1-t)*p.Y+t*q.Y,
+ )
+}
+
+// quadBezierSplit returns the pair of triplets (from,ctrl,to) Bézier curve,
+// split before (resp. after) the provided parametric t value.
+func quadBezierSplit(p0, p1, p2 f32.Point, t float32) (f32.Point, f32.Point, f32.Point, f32.Point, f32.Point, f32.Point) {
+
+ var (
+ b0 = p0
+ b1 = quadInterp(p0, p1, t)
+ b2 = quadBezierSample(p0, p1, p2, t)
+
+ a0 = b2
+ a1 = quadInterp(p1, p2, t)
+ a2 = p2
+ )
+
+ return b0, b1, b2, a0, a1, a2
+}
+
+// strokePathJoin joins the two paths rhs and lhs, according to the provided
+// stroke operation.
+func strokePathJoin(stroke StrokeStyle, rhs, lhs *StrokeQuads, hw float32, pivot, n0, n1 f32.Point, r0, r1 float32) {
+ strokePathRoundJoin(rhs, lhs, hw, pivot, n0, n1, r0, r1)
+}
+
+func strokePathRoundJoin(rhs, lhs *StrokeQuads, hw float32, pivot, n0, n1 f32.Point, r0, r1 float32) {
+ rp := pivot.Add(n1)
+ lp := pivot.Sub(n1)
+ cw := dotPt(rot90CW(n0), n1) >= 0.0
+ switch {
+ case cw:
+ // Path bends to the right, ie. CW (or 180 degree turn).
+ c := pivot.Sub(lhs.pen())
+ angle := -math.Acos(float64(cosPt(n0, n1)))
+ lhs.arc(c, c, float32(angle))
+ lhs.lineTo(lp) // Add a line to accommodate for rounding errors.
+ rhs.lineTo(rp)
+ default:
+ // Path bends to the left, ie. CCW.
+ angle := math.Acos(float64(cosPt(n0, n1)))
+ c := pivot.Sub(rhs.pen())
+ rhs.arc(c, c, float32(angle))
+ rhs.lineTo(rp) // Add a line to accommodate for rounding errors.
+ lhs.lineTo(lp)
+ }
+}
+
+// strokePathCap caps the provided path qs, according to the provided stroke operation.
+func strokePathCap(stroke StrokeStyle, qs *StrokeQuads, hw float32, pivot, n0 f32.Point) {
+ strokePathRoundCap(qs, hw, pivot, n0)
+}
+
+// strokePathRoundCap caps the start or end of a path with a round cap.
+func strokePathRoundCap(qs *StrokeQuads, hw float32, pivot, n0 f32.Point) {
+ c := pivot.Sub(qs.pen())
+ qs.arc(c, c, math.Pi)
+}
+
+// ArcTransform computes a transformation that can be used for generating quadratic bézier
+// curve approximations for an arc.
+//
+// The math is extracted from the following paper:
+// "Drawing an elliptical arc using polylines, quadratic or
+// cubic Bezier curves", L. Maisonobe
+// An electronic version may be found at:
+// http://spaceroots.org/documents/ellipse/elliptical-arc.pdf
+func ArcTransform(p, f1, f2 f32.Point, angle float32, segments int) f32.Affine2D {
+ var rx, ry, alpha float64
+ if f1 == f2 {
+ // degenerate case of a circle.
+ rx = dist(f1, p)
+ ry = rx
+ } else {
+ // semi-major axis: 2a = |PF1| + |PF2|
+ a := 0.5 * (dist(f1, p) + dist(f2, p))
+ // semi-minor axis: c^2 = a^2 - b^2 (c: focal distance)
+ c := dist(f1, f2) * 0.5
+ b := math.Sqrt(a*a - c*c)
+ switch {
+ case a > b:
+ rx = a
+ ry = b
+ default:
+ rx = b
+ ry = a
+ }
+ if f1.X == f2.X {
+ // special case of a "vertical" ellipse.
+ alpha = math.Pi / 2
+ if f1.Y < f2.Y {
+ alpha = -alpha
+ }
+ } else {
+ x := float64(f1.X-f2.X) * 0.5
+ if x < 0 {
+ x = -x
+ }
+ alpha = math.Acos(x / c)
+ }
+ }
+
+ var (
+ θ = angle / float32(segments)
+ ref f32.Affine2D // transform from absolute frame to ellipse-based one
+ rot f32.Affine2D // rotation matrix for each segment
+ inv f32.Affine2D // transform from ellipse-based frame to absolute one
+ )
+ center := f32.Point{
+ X: 0.5 * (f1.X + f2.X),
+ Y: 0.5 * (f1.Y + f2.Y),
+ }
+ ref = ref.Offset(f32.Point{}.Sub(center))
+ ref = ref.Rotate(f32.Point{}, float32(-alpha))
+ ref = ref.Scale(f32.Point{}, f32.Point{
+ X: float32(1 / rx),
+ Y: float32(1 / ry),
+ })
+ inv = ref.Invert()
+ rot = rot.Rotate(f32.Point{}, 0.5*θ)
+
+ // Instead of invoking math.Sincos for every segment, compute a rotation
+ // matrix once and apply for each segment.
+ // Before applying the rotation matrix rot, transform the coordinates
+ // to a frame centered to the ellipse (and warped into a unit circle), then rotate.
+ // Finally, transform back into the original frame.
+ return inv.Mul(rot).Mul(ref)
+}
+
+func dist(p1, p2 f32.Point) float64 {
+ var (
+ x1 = float64(p1.X)
+ y1 = float64(p1.Y)
+ x2 = float64(p2.X)
+ y2 = float64(p2.Y)
+ dx = x2 - x1
+ dy = y2 - y1
+ )
+ return math.Hypot(dx, dy)
+}
+
+func StrokePathCommands(style StrokeStyle, scene []byte) StrokeQuads {
+ quads := decodeToStrokeQuads(scene)
+ return quads.stroke(style)
+}
+
+// decodeToStrokeQuads decodes scene commands to quads ready to stroke.
+func decodeToStrokeQuads(pathData []byte) StrokeQuads {
+ quads := make(StrokeQuads, 0, 2*len(pathData)/(scene.CommandSize+4))
+ for len(pathData) >= scene.CommandSize+4 {
+ contour := binary.LittleEndian.Uint32(pathData)
+ cmd := ops.DecodeCommand(pathData[4:])
+ switch cmd.Op() {
+ case scene.OpLine:
+ var q QuadSegment
+ q.From, q.To = scene.DecodeLine(cmd)
+ q.Ctrl = q.From.Add(q.To).Mul(.5)
+ quad := StrokeQuad{
+ Contour: contour,
+ Quad: q,
+ }
+ quads = append(quads, quad)
+ case scene.OpQuad:
+ var q QuadSegment
+ q.From, q.Ctrl, q.To = scene.DecodeQuad(cmd)
+ quad := StrokeQuad{
+ Contour: contour,
+ Quad: q,
+ }
+ quads = append(quads, quad)
+ case scene.OpCubic:
+ for _, q := range SplitCubic(scene.DecodeCubic(cmd)) {
+ quad := StrokeQuad{
+ Contour: contour,
+ Quad: q,
+ }
+ quads = append(quads, quad)
+ }
+ default:
+ panic("unsupported scene command")
+ }
+ pathData = pathData[scene.CommandSize+4:]
+ }
+ return quads
+}
+
+func SplitCubic(from, ctrl0, ctrl1, to f32.Point) []QuadSegment {
+ quads := make([]QuadSegment, 0, 10)
+ // Set the maximum distance proportionally to the longest side
+ // of the bounding rectangle.
+ hull := f32.Rectangle{
+ Min: from,
+ Max: ctrl0,
+ }.Canon().Add(ctrl1).Add(to)
+ l := hull.Dx()
+ if h := hull.Dy(); h > l {
+ l = h
+ }
+ approxCubeTo(&quads, 0, l*0.001, from, ctrl0, ctrl1, to)
+ return quads
+}
+
+// approxCubeTo approximates a cubic Bézier by a series of quadratic
+// curves.
+func approxCubeTo(quads *[]QuadSegment, splits int, maxDist float32, from, ctrl0, ctrl1, to f32.Point) int {
+ // The idea is from
+ // https://caffeineowl.com/graphics/2d/vectorial/cubic2quad01.html
+ // where a quadratic approximates a cubic by eliminating its t³ term
+ // from its polynomial expression anchored at the starting point:
+ //
+ // P(t) = pen + 3t(ctrl0 - pen) + 3t²(ctrl1 - 2ctrl0 + pen) + t³(to - 3ctrl1 + 3ctrl0 - pen)
+ //
+ // The control point for the new quadratic Q1 that shares starting point, pen, with P is
+ //
+ // C1 = (3ctrl0 - pen)/2
+ //
+ // The reverse cubic anchored at the end point has the polynomial
+ //
+ // P'(t) = to + 3t(ctrl1 - to) + 3t²(ctrl0 - 2ctrl1 + to) + t³(pen - 3ctrl0 + 3ctrl1 - to)
+ //
+ // The corresponding quadratic Q2 that shares the end point, to, with P has control
+ // point
+ //
+ // C2 = (3ctrl1 - to)/2
+ //
+ // The combined quadratic Bézier, Q, shares both start and end points with its cubic
+ // and use the midpoint between the two curves Q1 and Q2 as control point:
+ //
+ // C = (3ctrl0 - pen + 3ctrl1 - to)/4
+ c := ctrl0.Mul(3).Sub(from).Add(ctrl1.Mul(3)).Sub(to).Mul(1.0 / 4.0)
+ const maxSplits = 32
+ if splits >= maxSplits {
+ *quads = append(*quads, QuadSegment{From: from, Ctrl: c, To: to})
+ return splits
+ }
+ // The maximum distance between the cubic P and its approximation Q given t
+ // can be shown to be
+ //
+ // d = sqrt(3)/36*|to - 3ctrl1 + 3ctrl0 - pen|
+ //
+ // To save a square root, compare d² with the squared tolerance.
+ v := to.Sub(ctrl1.Mul(3)).Add(ctrl0.Mul(3)).Sub(from)
+ d2 := (v.X*v.X + v.Y*v.Y) * 3 / (36 * 36)
+ if d2 <= maxDist*maxDist {
+ *quads = append(*quads, QuadSegment{From: from, Ctrl: c, To: to})
+ return splits
+ }
+ // De Casteljau split the curve and approximate the halves.
+ t := float32(0.5)
+ c0 := from.Add(ctrl0.Sub(from).Mul(t))
+ c1 := ctrl0.Add(ctrl1.Sub(ctrl0).Mul(t))
+ c2 := ctrl1.Add(to.Sub(ctrl1).Mul(t))
+ c01 := c0.Add(c1.Sub(c0).Mul(t))
+ c12 := c1.Add(c2.Sub(c1).Mul(t))
+ c0112 := c01.Add(c12.Sub(c01).Mul(t))
+ splits++
+ splits = approxCubeTo(quads, splits, maxDist, from, c0, c01, c0112)
+ splits = approxCubeTo(quads, splits, maxDist, c0112, c12, c2, to)
+ return splits
+}
diff --git a/vendor/gioui.org/internal/vk/vulkan.go b/vendor/gioui.org/internal/vk/vulkan.go
new file mode 100644
index 0000000..f210d40
--- /dev/null
+++ b/vendor/gioui.org/internal/vk/vulkan.go
@@ -0,0 +1,2081 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+//go:build linux || freebsd
+// +build linux freebsd
+
+package vk
+
+/*
+#cgo linux freebsd LDFLAGS: -ldl
+#cgo freebsd CFLAGS: -I/usr/local/include
+#cgo CFLAGS: -Werror -Werror=return-type
+
+#define VK_NO_PROTOTYPES 1
+#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object;
+#include
+#define __USE_GNU
+#include
+#include
+
+static VkResult vkCreateInstance(PFN_vkCreateInstance f, VkInstanceCreateInfo pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
+ return f(&pCreateInfo, pAllocator, pInstance);
+}
+
+static void vkDestroyInstance(PFN_vkDestroyInstance f, VkInstance instance, const VkAllocationCallbacks *pAllocator) {
+ f(instance, pAllocator);
+}
+
+static VkResult vkEnumeratePhysicalDevices(PFN_vkEnumeratePhysicalDevices f, VkInstance instance, uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices) {
+ return f(instance, pPhysicalDeviceCount, pPhysicalDevices);
+}
+
+static void vkGetPhysicalDeviceQueueFamilyProperties(PFN_vkGetPhysicalDeviceQueueFamilyProperties f, VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties *pQueueFamilyProperties) {
+ f(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
+}
+
+static void vkGetPhysicalDeviceFormatProperties(PFN_vkGetPhysicalDeviceFormatProperties f, VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties *pFormatProperties) {
+ f(physicalDevice, format, pFormatProperties);
+}
+
+static VkResult vkCreateDevice(PFN_vkCreateDevice f, VkPhysicalDevice physicalDevice, VkDeviceCreateInfo pCreateInfo, VkDeviceQueueCreateInfo qinf, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
+ pCreateInfo.pQueueCreateInfos = &qinf;
+ return f(physicalDevice, &pCreateInfo, pAllocator, pDevice);
+}
+
+static void vkDestroyDevice(PFN_vkDestroyDevice f, VkDevice device, const VkAllocationCallbacks *pAllocator) {
+ f(device, pAllocator);
+}
+
+static void vkGetDeviceQueue(PFN_vkGetDeviceQueue f, VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
+ f(device, queueFamilyIndex, queueIndex, pQueue);
+}
+
+static VkResult vkCreateImageView(PFN_vkCreateImageView f, VkDevice device, const VkImageViewCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
+ return f(device, pCreateInfo, pAllocator, pView);
+}
+
+static void vkDestroyImageView(PFN_vkDestroyImageView f, VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
+ f(device, imageView, pAllocator);
+}
+
+static VkResult vkCreateFramebuffer(PFN_vkCreateFramebuffer f, VkDevice device, VkFramebufferCreateInfo pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
+ return f(device, &pCreateInfo, pAllocator, pFramebuffer);
+}
+
+static void vkDestroyFramebuffer(PFN_vkDestroyFramebuffer f, VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
+ f(device, framebuffer, pAllocator);
+}
+
+static VkResult vkDeviceWaitIdle(PFN_vkDeviceWaitIdle f, VkDevice device) {
+ return f(device);
+}
+
+static VkResult vkQueueWaitIdle(PFN_vkQueueWaitIdle f, VkQueue queue) {
+ return f(queue);
+}
+
+static VkResult vkCreateSemaphore(PFN_vkCreateSemaphore f, VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
+ return f(device, pCreateInfo, pAllocator, pSemaphore);
+}
+
+static void vkDestroySemaphore(PFN_vkDestroySemaphore f, VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
+ f(device, semaphore, pAllocator);
+}
+
+static VkResult vkCreateRenderPass(PFN_vkCreateRenderPass f, VkDevice device, VkRenderPassCreateInfo pCreateInfo, VkSubpassDescription subpassInf, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
+ pCreateInfo.pSubpasses = &subpassInf;
+ return f(device, &pCreateInfo, pAllocator, pRenderPass);
+}
+
+static void vkDestroyRenderPass(PFN_vkDestroyRenderPass f, VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
+ f(device, renderPass, pAllocator);
+}
+
+static VkResult vkCreateCommandPool(PFN_vkCreateCommandPool f, VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
+ return f(device, pCreateInfo, pAllocator, pCommandPool);
+}
+
+static void vkDestroyCommandPool(PFN_vkDestroyCommandPool f, VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
+ f(device, commandPool, pAllocator);
+}
+
+static VkResult vkAllocateCommandBuffers(PFN_vkAllocateCommandBuffers f, VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo, VkCommandBuffer *pCommandBuffers) {
+ return f(device, pAllocateInfo, pCommandBuffers);
+}
+
+static void vkFreeCommandBuffers(PFN_vkFreeCommandBuffers f, VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
+ f(device, commandPool, commandBufferCount, pCommandBuffers);
+}
+
+static VkResult vkBeginCommandBuffer(PFN_vkBeginCommandBuffer f, VkCommandBuffer commandBuffer, VkCommandBufferBeginInfo pBeginInfo) {
+ return f(commandBuffer, &pBeginInfo);
+}
+
+static VkResult vkEndCommandBuffer(PFN_vkEndCommandBuffer f, VkCommandBuffer commandBuffer) {
+ return f(commandBuffer);
+}
+
+static VkResult vkQueueSubmit(PFN_vkQueueSubmit f, VkQueue queue, VkSubmitInfo pSubmits, VkFence fence) {
+ return f(queue, 1, &pSubmits, fence);
+}
+
+static void vkCmdBeginRenderPass(PFN_vkCmdBeginRenderPass f, VkCommandBuffer commandBuffer, VkRenderPassBeginInfo pRenderPassBegin, VkSubpassContents contents) {
+ f(commandBuffer, &pRenderPassBegin, contents);
+}
+
+static void vkCmdEndRenderPass(PFN_vkCmdEndRenderPass f, VkCommandBuffer commandBuffer) {
+ f(commandBuffer);
+}
+
+static void vkCmdCopyBuffer(PFN_vkCmdCopyBuffer f, VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy *pRegions) {
+ f(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
+}
+
+static void vkCmdCopyBufferToImage(PFN_vkCmdCopyBufferToImage f, VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
+ f(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
+}
+
+static void vkCmdPipelineBarrier(PFN_vkCmdPipelineBarrier f, VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
+ f(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
+}
+
+static void vkCmdPushConstants(PFN_vkCmdPushConstants f, VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void *pValues) {
+ f(commandBuffer, layout, stageFlags, offset, size, pValues);
+}
+
+static void vkCmdBindPipeline(PFN_vkCmdBindPipeline f, VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
+ f(commandBuffer, pipelineBindPoint, pipeline);
+}
+
+static void vkCmdBindVertexBuffers(PFN_vkCmdBindVertexBuffers f, VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
+ f(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
+}
+
+static void vkCmdSetViewport(PFN_vkCmdSetViewport f, VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
+ f(commandBuffer, firstViewport, viewportCount, pViewports);
+}
+
+static void vkCmdBindIndexBuffer(PFN_vkCmdBindIndexBuffer f, VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
+ f(commandBuffer, buffer, offset, indexType);
+}
+
+static void vkCmdDraw(PFN_vkCmdDraw f, VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) {
+ f(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
+}
+
+static void vkCmdDrawIndexed(PFN_vkCmdDrawIndexed f, VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
+ f(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
+}
+
+static void vkCmdBindDescriptorSets(PFN_vkCmdBindDescriptorSets f, VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
+ f(commandBuffer, pipelineBindPoint, layout, firstSet, descriptorSetCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
+}
+
+static void vkCmdCopyImageToBuffer(PFN_vkCmdCopyImageToBuffer f, VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
+ f(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
+}
+
+static void vkCmdDispatch(PFN_vkCmdDispatch f, VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) {
+ f(commandBuffer, groupCountX, groupCountY, groupCountZ);
+}
+
+static VkResult vkCreateImage(PFN_vkCreateImage f, VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
+ return f(device, pCreateInfo, pAllocator, pImage);
+}
+
+static void vkDestroyImage(PFN_vkDestroyImage f, VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
+ f(device, image, pAllocator);
+}
+
+static void vkGetImageMemoryRequirements(PFN_vkGetImageMemoryRequirements f, VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
+ f(device, image, pMemoryRequirements);
+}
+
+static VkResult vkAllocateMemory(PFN_vkAllocateMemory f, VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
+ return f(device, pAllocateInfo, pAllocator, pMemory);
+}
+
+static VkResult vkBindImageMemory(PFN_vkBindImageMemory f, VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset) {
+ return f(device, image, memory, memoryOffset);
+}
+
+static void vkFreeMemory(PFN_vkFreeMemory f, VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks *pAllocator) {
+ f(device, memory, pAllocator);
+}
+
+static void vkGetPhysicalDeviceMemoryProperties(PFN_vkGetPhysicalDeviceMemoryProperties f, VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
+ f(physicalDevice, pMemoryProperties);
+}
+
+static VkResult vkCreateSampler(PFN_vkCreateSampler f,VkDevice device, const VkSamplerCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
+ return f(device, pCreateInfo, pAllocator, pSampler);
+}
+
+static void vkDestroySampler(PFN_vkDestroySampler f, VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
+ f(device, sampler, pAllocator);
+}
+
+static VkResult vkCreateBuffer(PFN_vkCreateBuffer f, VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
+ return f(device, pCreateInfo, pAllocator, pBuffer);
+}
+
+static void vkDestroyBuffer(PFN_vkDestroyBuffer f, VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
+ f(device, buffer, pAllocator);
+}
+
+static void vkGetBufferMemoryRequirements(PFN_vkGetBufferMemoryRequirements f, VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
+ f(device, buffer, pMemoryRequirements);
+}
+
+static VkResult vkBindBufferMemory(PFN_vkBindBufferMemory f, VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset) {
+ return f(device, buffer, memory, memoryOffset);
+}
+
+static VkResult vkCreateShaderModule(PFN_vkCreateShaderModule f, VkDevice device, VkShaderModuleCreateInfo pCreateInfo, const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
+ return f(device, &pCreateInfo, pAllocator, pShaderModule);
+}
+
+static void vkDestroyShaderModule(PFN_vkDestroyShaderModule f, VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
+ f(device, shaderModule, pAllocator);
+}
+
+static VkResult vkCreateGraphicsPipelines(PFN_vkCreateGraphicsPipelines f, VkDevice device, VkPipelineCache pipelineCache, VkGraphicsPipelineCreateInfo pCreateInfo, VkPipelineDynamicStateCreateInfo dynInf, VkPipelineColorBlendStateCreateInfo blendInf, VkPipelineVertexInputStateCreateInfo vertexInf, VkPipelineViewportStateCreateInfo viewportInf, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
+ pCreateInfo.pDynamicState = &dynInf;
+ pCreateInfo.pViewportState = &viewportInf;
+ pCreateInfo.pColorBlendState = &blendInf;
+ pCreateInfo.pVertexInputState = &vertexInf;
+ return f(device, pipelineCache, 1, &pCreateInfo, pAllocator, pPipelines);
+}
+
+static void vkDestroyPipeline(PFN_vkDestroyPipeline f, VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
+ f(device, pipeline, pAllocator);
+}
+
+static VkResult vkCreatePipelineLayout(PFN_vkCreatePipelineLayout f, VkDevice device, VkPipelineLayoutCreateInfo pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
+ return f(device, &pCreateInfo, pAllocator, pPipelineLayout);
+}
+
+static void vkDestroyPipelineLayout(PFN_vkDestroyPipelineLayout f, VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
+ f(device, pipelineLayout, pAllocator);
+}
+
+static VkResult vkCreateDescriptorSetLayout(PFN_vkCreateDescriptorSetLayout f, VkDevice device, VkDescriptorSetLayoutCreateInfo pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
+ return f(device, &pCreateInfo, pAllocator, pSetLayout);
+}
+
+static void vkDestroyDescriptorSetLayout(PFN_vkDestroyDescriptorSetLayout f, VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
+ f(device, descriptorSetLayout, pAllocator);
+}
+
+static VkResult vkMapMemory(PFN_vkMapMemory f, VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void **ppData) {
+ return f(device, memory, offset, size, flags, ppData);
+}
+
+static void vkUnmapMemory(PFN_vkUnmapMemory f, VkDevice device, VkDeviceMemory memory) {
+ f(device, memory);
+}
+
+static VkResult vkResetCommandBuffer(PFN_vkResetCommandBuffer f, VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
+ return f(commandBuffer, flags);
+}
+
+static VkResult vkCreateDescriptorPool(PFN_vkCreateDescriptorPool f, VkDevice device, VkDescriptorPoolCreateInfo pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
+ return f(device, &pCreateInfo, pAllocator, pDescriptorPool);
+}
+
+static void vkDestroyDescriptorPool(PFN_vkDestroyDescriptorPool f, VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
+ f(device, descriptorPool, pAllocator);
+}
+
+static VkResult vkAllocateDescriptorSets(PFN_vkAllocateDescriptorSets f, VkDevice device, VkDescriptorSetAllocateInfo pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
+ return f(device, &pAllocateInfo, pDescriptorSets);
+}
+
+static VkResult vkFreeDescriptorSets(PFN_vkFreeDescriptorSets f, VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet *pDescriptorSets) {
+ return f(device, descriptorPool, descriptorSetCount, pDescriptorSets);
+}
+
+static void vkUpdateDescriptorSets(PFN_vkUpdateDescriptorSets f, VkDevice device, VkWriteDescriptorSet pDescriptorWrite, uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
+ f(device, 1, &pDescriptorWrite, descriptorCopyCount, pDescriptorCopies);
+}
+
+static VkResult vkResetDescriptorPool(PFN_vkResetDescriptorPool f, VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
+ return f(device, descriptorPool, flags);
+}
+
+static void vkCmdCopyImage(PFN_vkCmdCopyImage f, VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
+ f(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
+}
+
+static VkResult vkCreateComputePipelines(PFN_vkCreateComputePipelines f, VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
+ return f(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
+}
+
+static VkResult vkCreateFence(PFN_vkCreateFence f, VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
+ return f(device, pCreateInfo, pAllocator, pFence);
+}
+
+static void vkDestroyFence(PFN_vkDestroyFence f, VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
+ f(device, fence, pAllocator);
+}
+
+static VkResult vkWaitForFences(PFN_vkWaitForFences f, VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
+ return f(device, fenceCount, pFences, waitAll, timeout);
+}
+
+static VkResult vkResetFences(PFN_vkResetFences f, VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
+ return f(device, fenceCount, pFences);
+}
+
+static void vkGetPhysicalDeviceProperties(PFN_vkGetPhysicalDeviceProperties f, VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties) {
+ f(physicalDevice, pProperties);
+}
+
+static VkResult vkGetPhysicalDeviceSurfaceSupportKHR(PFN_vkGetPhysicalDeviceSurfaceSupportKHR f, VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32 *pSupported) {
+ return f(physicalDevice, queueFamilyIndex, surface, pSupported);
+}
+
+static void vkDestroySurfaceKHR(PFN_vkDestroySurfaceKHR f, VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
+ f(instance, surface, pAllocator);
+}
+
+static VkResult vkGetPhysicalDeviceSurfaceFormatsKHR(PFN_vkGetPhysicalDeviceSurfaceFormatsKHR f, VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pSurfaceFormatCount, VkSurfaceFormatKHR *pSurfaceFormats) {
+ return f(physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats);
+}
+
+static VkResult vkGetPhysicalDeviceSurfacePresentModesKHR(PFN_vkGetPhysicalDeviceSurfacePresentModesKHR f, VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pPresentModeCount, VkPresentModeKHR *pPresentModes) {
+ return f(physicalDevice, surface, pPresentModeCount, pPresentModes);
+}
+
+static VkResult vkGetPhysicalDeviceSurfaceCapabilitiesKHR(PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR f, VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
+ return f(physicalDevice, surface, pSurfaceCapabilities);
+}
+
+static VkResult vkCreateSwapchainKHR(PFN_vkCreateSwapchainKHR f, VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
+ return f(device, pCreateInfo, pAllocator, pSwapchain);
+}
+
+static void vkDestroySwapchainKHR(PFN_vkDestroySwapchainKHR f, VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
+ f(device, swapchain, pAllocator);
+}
+
+static VkResult vkGetSwapchainImagesKHR(PFN_vkGetSwapchainImagesKHR f, VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
+ return f(device, swapchain, pSwapchainImageCount, pSwapchainImages);
+}
+
+// indexAndResult holds both an integer and a result returned by value, to
+// avoid Go heap allocation of the integer with Vulkan's return style.
+struct intAndResult {
+ uint32_t uint;
+ VkResult res;
+};
+
+static struct intAndResult vkAcquireNextImageKHR(PFN_vkAcquireNextImageKHR f, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence) {
+ struct intAndResult res;
+ res.res = f(device, swapchain, timeout, semaphore, fence, &res.uint);
+ return res;
+}
+
+static VkResult vkQueuePresentKHR(PFN_vkQueuePresentKHR f, VkQueue queue, const VkPresentInfoKHR pPresentInfo) {
+ return f(queue, &pPresentInfo);
+}
+*/
+import "C"
+import (
+ "errors"
+ "fmt"
+ "image"
+ "math"
+ "reflect"
+ "runtime"
+ "sync"
+ "unsafe"
+)
+
+type (
+ AttachmentLoadOp = C.VkAttachmentLoadOp
+ AccessFlags = C.VkAccessFlags
+ BlendFactor = C.VkBlendFactor
+ Buffer = C.VkBuffer
+ BufferImageCopy = C.VkBufferImageCopy
+ BufferMemoryBarrier = C.VkBufferMemoryBarrier
+ BufferUsageFlags = C.VkBufferUsageFlags
+ CommandPool = C.VkCommandPool
+ CommandBuffer = C.VkCommandBuffer
+ DependencyFlags = C.VkDependencyFlags
+ DescriptorPool = C.VkDescriptorPool
+ DescriptorPoolSize = C.VkDescriptorPoolSize
+ DescriptorSet = C.VkDescriptorSet
+ DescriptorSetLayout = C.VkDescriptorSetLayout
+ DescriptorType = C.VkDescriptorType
+ Device = C.VkDevice
+ DeviceMemory = C.VkDeviceMemory
+ DeviceSize = C.VkDeviceSize
+ Fence = C.VkFence
+ Queue = C.VkQueue
+ IndexType = C.VkIndexType
+ Image = C.VkImage
+ ImageCopy = C.VkImageCopy
+ ImageLayout = C.VkImageLayout
+ ImageMemoryBarrier = C.VkImageMemoryBarrier
+ ImageUsageFlags = C.VkImageUsageFlags
+ ImageView = C.VkImageView
+ Instance = C.VkInstance
+ Filter = C.VkFilter
+ Format = C.VkFormat
+ FormatFeatureFlags = C.VkFormatFeatureFlags
+ Framebuffer = C.VkFramebuffer
+ MemoryBarrier = C.VkMemoryBarrier
+ MemoryPropertyFlags = C.VkMemoryPropertyFlags
+ Pipeline = C.VkPipeline
+ PipelineBindPoint = C.VkPipelineBindPoint
+ PipelineLayout = C.VkPipelineLayout
+ PipelineStageFlags = C.VkPipelineStageFlags
+ PhysicalDevice = C.VkPhysicalDevice
+ PrimitiveTopology = C.VkPrimitiveTopology
+ PushConstantRange = C.VkPushConstantRange
+ QueueFamilyProperties = C.VkQueueFamilyProperties
+ QueueFlags = C.VkQueueFlags
+ RenderPass = C.VkRenderPass
+ Sampler = C.VkSampler
+ SamplerMipmapMode = C.VkSamplerMipmapMode
+ Semaphore = C.VkSemaphore
+ ShaderModule = C.VkShaderModule
+ ShaderStageFlags = C.VkShaderStageFlags
+ SubpassDependency = C.VkSubpassDependency
+ Viewport = C.VkViewport
+ WriteDescriptorSet = C.VkWriteDescriptorSet
+
+ Surface = C.VkSurfaceKHR
+ SurfaceCapabilities = C.VkSurfaceCapabilitiesKHR
+
+ Swapchain = C.VkSwapchainKHR
+)
+
+type VertexInputBindingDescription struct {
+ Binding int
+ Stride int
+}
+
+type VertexInputAttributeDescription struct {
+ Location int
+ Binding int
+ Format Format
+ Offset int
+}
+
+type DescriptorSetLayoutBinding struct {
+ Binding int
+ DescriptorType DescriptorType
+ StageFlags ShaderStageFlags
+}
+
+type Error C.VkResult
+
+const (
+ FORMAT_R8G8B8A8_UNORM Format = C.VK_FORMAT_R8G8B8A8_UNORM
+ FORMAT_B8G8R8A8_SRGB Format = C.VK_FORMAT_B8G8R8A8_SRGB
+ FORMAT_R8G8B8A8_SRGB Format = C.VK_FORMAT_R8G8B8A8_SRGB
+ FORMAT_R16_SFLOAT Format = C.VK_FORMAT_R16_SFLOAT
+ FORMAT_R32_SFLOAT Format = C.VK_FORMAT_R32_SFLOAT
+ FORMAT_R32G32_SFLOAT Format = C.VK_FORMAT_R32G32_SFLOAT
+ FORMAT_R32G32B32_SFLOAT Format = C.VK_FORMAT_R32G32B32_SFLOAT
+ FORMAT_R32G32B32A32_SFLOAT Format = C.VK_FORMAT_R32G32B32A32_SFLOAT
+
+ FORMAT_FEATURE_COLOR_ATTACHMENT_BIT FormatFeatureFlags = C.VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT
+ FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT FormatFeatureFlags = C.VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT
+ FORMAT_FEATURE_SAMPLED_IMAGE_BIT FormatFeatureFlags = C.VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT
+
+ IMAGE_USAGE_SAMPLED_BIT ImageUsageFlags = C.VK_IMAGE_USAGE_SAMPLED_BIT
+ IMAGE_USAGE_COLOR_ATTACHMENT_BIT ImageUsageFlags = C.VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
+ IMAGE_USAGE_STORAGE_BIT ImageUsageFlags = C.VK_IMAGE_USAGE_STORAGE_BIT
+ IMAGE_USAGE_TRANSFER_DST_BIT ImageUsageFlags = C.VK_IMAGE_USAGE_TRANSFER_DST_BIT
+ IMAGE_USAGE_TRANSFER_SRC_BIT ImageUsageFlags = C.VK_IMAGE_USAGE_TRANSFER_SRC_BIT
+
+ FILTER_NEAREST Filter = C.VK_FILTER_NEAREST
+ FILTER_LINEAR Filter = C.VK_FILTER_LINEAR
+
+ ATTACHMENT_LOAD_OP_CLEAR AttachmentLoadOp = C.VK_ATTACHMENT_LOAD_OP_CLEAR
+ ATTACHMENT_LOAD_OP_DONT_CARE AttachmentLoadOp = C.VK_ATTACHMENT_LOAD_OP_DONT_CARE
+ ATTACHMENT_LOAD_OP_LOAD AttachmentLoadOp = C.VK_ATTACHMENT_LOAD_OP_LOAD
+
+ IMAGE_LAYOUT_UNDEFINED ImageLayout = C.VK_IMAGE_LAYOUT_UNDEFINED
+ IMAGE_LAYOUT_PRESENT_SRC_KHR ImageLayout = C.VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
+ IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL ImageLayout = C.VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL ImageLayout = C.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
+ IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL ImageLayout = C.VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
+ IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL ImageLayout = C.VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
+ IMAGE_LAYOUT_GENERAL ImageLayout = C.VK_IMAGE_LAYOUT_GENERAL
+
+ BUFFER_USAGE_TRANSFER_DST_BIT BufferUsageFlags = C.VK_BUFFER_USAGE_TRANSFER_DST_BIT
+ BUFFER_USAGE_TRANSFER_SRC_BIT BufferUsageFlags = C.VK_BUFFER_USAGE_TRANSFER_SRC_BIT
+ BUFFER_USAGE_UNIFORM_BUFFER_BIT BufferUsageFlags = C.VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT
+ BUFFER_USAGE_STORAGE_BUFFER_BIT BufferUsageFlags = C.VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
+ BUFFER_USAGE_INDEX_BUFFER_BIT BufferUsageFlags = C.VK_BUFFER_USAGE_INDEX_BUFFER_BIT
+ BUFFER_USAGE_VERTEX_BUFFER_BIT BufferUsageFlags = C.VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
+
+ ERROR_OUT_OF_DATE_KHR = Error(C.VK_ERROR_OUT_OF_DATE_KHR)
+ ERROR_SURFACE_LOST_KHR = Error(C.VK_ERROR_SURFACE_LOST_KHR)
+ ERROR_DEVICE_LOST = Error(C.VK_ERROR_DEVICE_LOST)
+ SUBOPTIMAL_KHR = Error(C.VK_SUBOPTIMAL_KHR)
+
+ BLEND_FACTOR_ZERO BlendFactor = C.VK_BLEND_FACTOR_ZERO
+ BLEND_FACTOR_ONE BlendFactor = C.VK_BLEND_FACTOR_ONE
+ BLEND_FACTOR_ONE_MINUS_SRC_ALPHA BlendFactor = C.VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA
+ BLEND_FACTOR_DST_COLOR BlendFactor = C.VK_BLEND_FACTOR_DST_COLOR
+
+ PRIMITIVE_TOPOLOGY_TRIANGLE_LIST PrimitiveTopology = C.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
+ PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP PrimitiveTopology = C.VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
+
+ SHADER_STAGE_VERTEX_BIT ShaderStageFlags = C.VK_SHADER_STAGE_VERTEX_BIT
+ SHADER_STAGE_FRAGMENT_BIT ShaderStageFlags = C.VK_SHADER_STAGE_FRAGMENT_BIT
+ SHADER_STAGE_COMPUTE_BIT ShaderStageFlags = C.VK_SHADER_STAGE_COMPUTE_BIT
+
+ DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER DescriptorType = C.VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
+ DESCRIPTOR_TYPE_UNIFORM_BUFFER DescriptorType = C.VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
+ DESCRIPTOR_TYPE_STORAGE_BUFFER DescriptorType = C.VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
+ DESCRIPTOR_TYPE_STORAGE_IMAGE DescriptorType = C.VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
+
+ MEMORY_PROPERTY_DEVICE_LOCAL_BIT MemoryPropertyFlags = C.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
+ MEMORY_PROPERTY_HOST_VISIBLE_BIT MemoryPropertyFlags = C.VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
+ MEMORY_PROPERTY_HOST_COHERENT_BIT MemoryPropertyFlags = C.VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
+
+ DEPENDENCY_BY_REGION_BIT DependencyFlags = C.VK_DEPENDENCY_BY_REGION_BIT
+
+ PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT PipelineStageFlags = C.VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
+ PIPELINE_STAGE_TRANSFER_BIT PipelineStageFlags = C.VK_PIPELINE_STAGE_TRANSFER_BIT
+ PIPELINE_STAGE_FRAGMENT_SHADER_BIT PipelineStageFlags = C.VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
+ PIPELINE_STAGE_COMPUTE_SHADER_BIT PipelineStageFlags = C.VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
+ PIPELINE_STAGE_TOP_OF_PIPE_BIT PipelineStageFlags = C.VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
+ PIPELINE_STAGE_HOST_BIT PipelineStageFlags = C.VK_PIPELINE_STAGE_HOST_BIT
+ PIPELINE_STAGE_VERTEX_INPUT_BIT PipelineStageFlags = C.VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
+ PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT PipelineStageFlags = C.VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
+
+ ACCESS_MEMORY_READ_BIT AccessFlags = C.VK_ACCESS_MEMORY_READ_BIT
+ ACCESS_MEMORY_WRITE_BIT AccessFlags = C.VK_ACCESS_MEMORY_WRITE_BIT
+ ACCESS_TRANSFER_READ_BIT AccessFlags = C.VK_ACCESS_TRANSFER_READ_BIT
+ ACCESS_TRANSFER_WRITE_BIT AccessFlags = C.VK_ACCESS_TRANSFER_WRITE_BIT
+ ACCESS_SHADER_READ_BIT AccessFlags = C.VK_ACCESS_SHADER_READ_BIT
+ ACCESS_SHADER_WRITE_BIT AccessFlags = C.VK_ACCESS_SHADER_WRITE_BIT
+ ACCESS_COLOR_ATTACHMENT_READ_BIT AccessFlags = C.VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
+ ACCESS_COLOR_ATTACHMENT_WRITE_BIT AccessFlags = C.VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
+ ACCESS_HOST_READ_BIT AccessFlags = C.VK_ACCESS_HOST_READ_BIT
+ ACCESS_HOST_WRITE_BIT AccessFlags = C.VK_ACCESS_HOST_WRITE_BIT
+ ACCESS_VERTEX_ATTRIBUTE_READ_BIT AccessFlags = C.VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
+ ACCESS_INDEX_READ_BIT AccessFlags = C.VK_ACCESS_INDEX_READ_BIT
+
+ PIPELINE_BIND_POINT_COMPUTE PipelineBindPoint = C.VK_PIPELINE_BIND_POINT_COMPUTE
+ PIPELINE_BIND_POINT_GRAPHICS PipelineBindPoint = C.VK_PIPELINE_BIND_POINT_GRAPHICS
+
+ INDEX_TYPE_UINT16 IndexType = C.VK_INDEX_TYPE_UINT16
+ INDEX_TYPE_UINT32 IndexType = C.VK_INDEX_TYPE_UINT32
+
+ QUEUE_GRAPHICS_BIT QueueFlags = C.VK_QUEUE_GRAPHICS_BIT
+ QUEUE_COMPUTE_BIT QueueFlags = C.VK_QUEUE_COMPUTE_BIT
+)
+
+var (
+ once sync.Once
+ loadErr error
+
+ loadFuncs []func(dlopen func(name string) *[0]byte)
+)
+
+var funcs struct {
+ vkCreateInstance C.PFN_vkCreateInstance
+ vkDestroyInstance C.PFN_vkDestroyInstance
+ vkEnumeratePhysicalDevices C.PFN_vkEnumeratePhysicalDevices
+ vkGetPhysicalDeviceQueueFamilyProperties C.PFN_vkGetPhysicalDeviceQueueFamilyProperties
+ vkGetPhysicalDeviceFormatProperties C.PFN_vkGetPhysicalDeviceFormatProperties
+ vkCreateDevice C.PFN_vkCreateDevice
+ vkDestroyDevice C.PFN_vkDestroyDevice
+ vkGetDeviceQueue C.PFN_vkGetDeviceQueue
+ vkCreateImageView C.PFN_vkCreateImageView
+ vkDestroyImageView C.PFN_vkDestroyImageView
+ vkCreateFramebuffer C.PFN_vkCreateFramebuffer
+ vkDestroyFramebuffer C.PFN_vkDestroyFramebuffer
+ vkDeviceWaitIdle C.PFN_vkDeviceWaitIdle
+ vkQueueWaitIdle C.PFN_vkQueueWaitIdle
+ vkCreateSemaphore C.PFN_vkCreateSemaphore
+ vkDestroySemaphore C.PFN_vkDestroySemaphore
+ vkCreateRenderPass C.PFN_vkCreateRenderPass
+ vkDestroyRenderPass C.PFN_vkDestroyRenderPass
+ vkCreateCommandPool C.PFN_vkCreateCommandPool
+ vkDestroyCommandPool C.PFN_vkDestroyCommandPool
+ vkAllocateCommandBuffers C.PFN_vkAllocateCommandBuffers
+ vkFreeCommandBuffers C.PFN_vkFreeCommandBuffers
+ vkBeginCommandBuffer C.PFN_vkBeginCommandBuffer
+ vkEndCommandBuffer C.PFN_vkEndCommandBuffer
+ vkQueueSubmit C.PFN_vkQueueSubmit
+ vkCmdBeginRenderPass C.PFN_vkCmdBeginRenderPass
+ vkCmdEndRenderPass C.PFN_vkCmdEndRenderPass
+ vkCmdCopyBuffer C.PFN_vkCmdCopyBuffer
+ vkCmdCopyBufferToImage C.PFN_vkCmdCopyBufferToImage
+ vkCmdPipelineBarrier C.PFN_vkCmdPipelineBarrier
+ vkCmdPushConstants C.PFN_vkCmdPushConstants
+ vkCmdBindPipeline C.PFN_vkCmdBindPipeline
+ vkCmdBindVertexBuffers C.PFN_vkCmdBindVertexBuffers
+ vkCmdSetViewport C.PFN_vkCmdSetViewport
+ vkCmdBindIndexBuffer C.PFN_vkCmdBindIndexBuffer
+ vkCmdDraw C.PFN_vkCmdDraw
+ vkCmdDrawIndexed C.PFN_vkCmdDrawIndexed
+ vkCmdBindDescriptorSets C.PFN_vkCmdBindDescriptorSets
+ vkCmdCopyImageToBuffer C.PFN_vkCmdCopyImageToBuffer
+ vkCmdDispatch C.PFN_vkCmdDispatch
+ vkCreateImage C.PFN_vkCreateImage
+ vkDestroyImage C.PFN_vkDestroyImage
+ vkGetImageMemoryRequirements C.PFN_vkGetImageMemoryRequirements
+ vkAllocateMemory C.PFN_vkAllocateMemory
+ vkBindImageMemory C.PFN_vkBindImageMemory
+ vkFreeMemory C.PFN_vkFreeMemory
+ vkGetPhysicalDeviceMemoryProperties C.PFN_vkGetPhysicalDeviceMemoryProperties
+ vkCreateSampler C.PFN_vkCreateSampler
+ vkDestroySampler C.PFN_vkDestroySampler
+ vkCreateBuffer C.PFN_vkCreateBuffer
+ vkDestroyBuffer C.PFN_vkDestroyBuffer
+ vkGetBufferMemoryRequirements C.PFN_vkGetBufferMemoryRequirements
+ vkBindBufferMemory C.PFN_vkBindBufferMemory
+ vkCreateShaderModule C.PFN_vkCreateShaderModule
+ vkDestroyShaderModule C.PFN_vkDestroyShaderModule
+ vkCreateGraphicsPipelines C.PFN_vkCreateGraphicsPipelines
+ vkDestroyPipeline C.PFN_vkDestroyPipeline
+ vkCreatePipelineLayout C.PFN_vkCreatePipelineLayout
+ vkDestroyPipelineLayout C.PFN_vkDestroyPipelineLayout
+ vkCreateDescriptorSetLayout C.PFN_vkCreateDescriptorSetLayout
+ vkDestroyDescriptorSetLayout C.PFN_vkDestroyDescriptorSetLayout
+ vkMapMemory C.PFN_vkMapMemory
+ vkUnmapMemory C.PFN_vkUnmapMemory
+ vkResetCommandBuffer C.PFN_vkResetCommandBuffer
+ vkCreateDescriptorPool C.PFN_vkCreateDescriptorPool
+ vkDestroyDescriptorPool C.PFN_vkDestroyDescriptorPool
+ vkAllocateDescriptorSets C.PFN_vkAllocateDescriptorSets
+ vkFreeDescriptorSets C.PFN_vkFreeDescriptorSets
+ vkUpdateDescriptorSets C.PFN_vkUpdateDescriptorSets
+ vkResetDescriptorPool C.PFN_vkResetDescriptorPool
+ vkCmdCopyImage C.PFN_vkCmdCopyImage
+ vkCreateComputePipelines C.PFN_vkCreateComputePipelines
+ vkCreateFence C.PFN_vkCreateFence
+ vkDestroyFence C.PFN_vkDestroyFence
+ vkWaitForFences C.PFN_vkWaitForFences
+ vkResetFences C.PFN_vkResetFences
+ vkGetPhysicalDeviceProperties C.PFN_vkGetPhysicalDeviceProperties
+
+ vkGetPhysicalDeviceSurfaceSupportKHR C.PFN_vkGetPhysicalDeviceSurfaceSupportKHR
+ vkDestroySurfaceKHR C.PFN_vkDestroySurfaceKHR
+ vkGetPhysicalDeviceSurfaceFormatsKHR C.PFN_vkGetPhysicalDeviceSurfaceFormatsKHR
+ vkGetPhysicalDeviceSurfacePresentModesKHR C.PFN_vkGetPhysicalDeviceSurfacePresentModesKHR
+ vkGetPhysicalDeviceSurfaceCapabilitiesKHR C.PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR
+
+ vkCreateSwapchainKHR C.PFN_vkCreateSwapchainKHR
+ vkDestroySwapchainKHR C.PFN_vkDestroySwapchainKHR
+ vkGetSwapchainImagesKHR C.PFN_vkGetSwapchainImagesKHR
+ vkAcquireNextImageKHR C.PFN_vkAcquireNextImageKHR
+ vkQueuePresentKHR C.PFN_vkQueuePresentKHR
+}
+
+var (
+ nilSurface C.VkSurfaceKHR
+ nilSwapchain C.VkSwapchainKHR
+ nilSemaphore C.VkSemaphore
+ nilImageView C.VkImageView
+ nilRenderPass C.VkRenderPass
+ nilFramebuffer C.VkFramebuffer
+ nilCommandPool C.VkCommandPool
+ nilImage C.VkImage
+ nilDeviceMemory C.VkDeviceMemory
+ nilSampler C.VkSampler
+ nilBuffer C.VkBuffer
+ nilShaderModule C.VkShaderModule
+ nilPipeline C.VkPipeline
+ nilPipelineCache C.VkPipelineCache
+ nilPipelineLayout C.VkPipelineLayout
+ nilDescriptorSetLayout C.VkDescriptorSetLayout
+ nilDescriptorPool C.VkDescriptorPool
+ nilDescriptorSet C.VkDescriptorSet
+ nilFence C.VkFence
+)
+
+func vkInit() error {
+ once.Do(func() {
+ var libName string
+ switch {
+ case runtime.GOOS == "android":
+ libName = "libvulkan.so"
+ default:
+ libName = "libvulkan.so.1"
+ }
+ lib := dlopen(libName)
+ if lib == nil {
+ loadErr = fmt.Errorf("vulkan: %s", C.GoString(C.dlerror()))
+ return
+ }
+ dlopen := func(name string) *[0]byte {
+ return (*[0]byte)(dlsym(lib, name))
+ }
+ must := func(name string) *[0]byte {
+ ptr := dlopen(name)
+ if ptr != nil {
+ return ptr
+ }
+ if loadErr == nil {
+ loadErr = fmt.Errorf("vulkan: function %q not found: %s", name, C.GoString(C.dlerror()))
+ }
+ return nil
+ }
+ funcs.vkCreateInstance = must("vkCreateInstance")
+ funcs.vkDestroyInstance = must("vkDestroyInstance")
+ funcs.vkEnumeratePhysicalDevices = must("vkEnumeratePhysicalDevices")
+ funcs.vkGetPhysicalDeviceQueueFamilyProperties = must("vkGetPhysicalDeviceQueueFamilyProperties")
+ funcs.vkGetPhysicalDeviceFormatProperties = must("vkGetPhysicalDeviceFormatProperties")
+ funcs.vkCreateDevice = must("vkCreateDevice")
+ funcs.vkDestroyDevice = must("vkDestroyDevice")
+ funcs.vkGetDeviceQueue = must("vkGetDeviceQueue")
+ funcs.vkCreateImageView = must("vkCreateImageView")
+ funcs.vkDestroyImageView = must("vkDestroyImageView")
+ funcs.vkCreateFramebuffer = must("vkCreateFramebuffer")
+ funcs.vkDestroyFramebuffer = must("vkDestroyFramebuffer")
+ funcs.vkDeviceWaitIdle = must("vkDeviceWaitIdle")
+ funcs.vkQueueWaitIdle = must("vkQueueWaitIdle")
+ funcs.vkCreateSemaphore = must("vkCreateSemaphore")
+ funcs.vkDestroySemaphore = must("vkDestroySemaphore")
+ funcs.vkCreateRenderPass = must("vkCreateRenderPass")
+ funcs.vkDestroyRenderPass = must("vkDestroyRenderPass")
+ funcs.vkCreateCommandPool = must("vkCreateCommandPool")
+ funcs.vkDestroyCommandPool = must("vkDestroyCommandPool")
+ funcs.vkAllocateCommandBuffers = must("vkAllocateCommandBuffers")
+ funcs.vkFreeCommandBuffers = must("vkFreeCommandBuffers")
+ funcs.vkBeginCommandBuffer = must("vkBeginCommandBuffer")
+ funcs.vkEndCommandBuffer = must("vkEndCommandBuffer")
+ funcs.vkQueueSubmit = must("vkQueueSubmit")
+ funcs.vkCmdBeginRenderPass = must("vkCmdBeginRenderPass")
+ funcs.vkCmdEndRenderPass = must("vkCmdEndRenderPass")
+ funcs.vkCmdCopyBuffer = must("vkCmdCopyBuffer")
+ funcs.vkCmdCopyBufferToImage = must("vkCmdCopyBufferToImage")
+ funcs.vkCmdPipelineBarrier = must("vkCmdPipelineBarrier")
+ funcs.vkCmdPushConstants = must("vkCmdPushConstants")
+ funcs.vkCmdBindPipeline = must("vkCmdBindPipeline")
+ funcs.vkCmdBindVertexBuffers = must("vkCmdBindVertexBuffers")
+ funcs.vkCmdSetViewport = must("vkCmdSetViewport")
+ funcs.vkCmdBindIndexBuffer = must("vkCmdBindIndexBuffer")
+ funcs.vkCmdDraw = must("vkCmdDraw")
+ funcs.vkCmdDrawIndexed = must("vkCmdDrawIndexed")
+ funcs.vkCmdBindDescriptorSets = must("vkCmdBindDescriptorSets")
+ funcs.vkCmdCopyImageToBuffer = must("vkCmdCopyImageToBuffer")
+ funcs.vkCmdDispatch = must("vkCmdDispatch")
+ funcs.vkCreateImage = must("vkCreateImage")
+ funcs.vkDestroyImage = must("vkDestroyImage")
+ funcs.vkGetImageMemoryRequirements = must("vkGetImageMemoryRequirements")
+ funcs.vkAllocateMemory = must("vkAllocateMemory")
+ funcs.vkBindImageMemory = must("vkBindImageMemory")
+ funcs.vkFreeMemory = must("vkFreeMemory")
+ funcs.vkGetPhysicalDeviceMemoryProperties = must("vkGetPhysicalDeviceMemoryProperties")
+ funcs.vkCreateSampler = must("vkCreateSampler")
+ funcs.vkDestroySampler = must("vkDestroySampler")
+ funcs.vkCreateBuffer = must("vkCreateBuffer")
+ funcs.vkDestroyBuffer = must("vkDestroyBuffer")
+ funcs.vkGetBufferMemoryRequirements = must("vkGetBufferMemoryRequirements")
+ funcs.vkBindBufferMemory = must("vkBindBufferMemory")
+ funcs.vkCreateShaderModule = must("vkCreateShaderModule")
+ funcs.vkDestroyShaderModule = must("vkDestroyShaderModule")
+ funcs.vkCreateGraphicsPipelines = must("vkCreateGraphicsPipelines")
+ funcs.vkDestroyPipeline = must("vkDestroyPipeline")
+ funcs.vkCreatePipelineLayout = must("vkCreatePipelineLayout")
+ funcs.vkDestroyPipelineLayout = must("vkDestroyPipelineLayout")
+ funcs.vkCreateDescriptorSetLayout = must("vkCreateDescriptorSetLayout")
+ funcs.vkDestroyDescriptorSetLayout = must("vkDestroyDescriptorSetLayout")
+ funcs.vkMapMemory = must("vkMapMemory")
+ funcs.vkUnmapMemory = must("vkUnmapMemory")
+ funcs.vkResetCommandBuffer = must("vkResetCommandBuffer")
+ funcs.vkCreateDescriptorPool = must("vkCreateDescriptorPool")
+ funcs.vkDestroyDescriptorPool = must("vkDestroyDescriptorPool")
+ funcs.vkAllocateDescriptorSets = must("vkAllocateDescriptorSets")
+ funcs.vkFreeDescriptorSets = must("vkFreeDescriptorSets")
+ funcs.vkUpdateDescriptorSets = must("vkUpdateDescriptorSets")
+ funcs.vkResetDescriptorPool = must("vkResetDescriptorPool")
+ funcs.vkCmdCopyImage = must("vkCmdCopyImage")
+ funcs.vkCreateComputePipelines = must("vkCreateComputePipelines")
+ funcs.vkCreateFence = must("vkCreateFence")
+ funcs.vkDestroyFence = must("vkDestroyFence")
+ funcs.vkWaitForFences = must("vkWaitForFences")
+ funcs.vkResetFences = must("vkResetFences")
+ funcs.vkGetPhysicalDeviceProperties = must("vkGetPhysicalDeviceProperties")
+
+ funcs.vkGetPhysicalDeviceSurfaceSupportKHR = dlopen("vkGetPhysicalDeviceSurfaceSupportKHR")
+ funcs.vkDestroySurfaceKHR = dlopen("vkDestroySurfaceKHR")
+ funcs.vkGetPhysicalDeviceSurfaceFormatsKHR = dlopen("vkGetPhysicalDeviceSurfaceFormatsKHR")
+ funcs.vkGetPhysicalDeviceSurfacePresentModesKHR = dlopen("vkGetPhysicalDeviceSurfacePresentModesKHR")
+ funcs.vkGetPhysicalDeviceSurfaceCapabilitiesKHR = dlopen("vkGetPhysicalDeviceSurfaceCapabilitiesKHR")
+
+ funcs.vkCreateSwapchainKHR = dlopen("vkCreateSwapchainKHR")
+ funcs.vkDestroySwapchainKHR = dlopen("vkDestroySwapchainKHR")
+ funcs.vkGetSwapchainImagesKHR = dlopen("vkGetSwapchainImagesKHR")
+ funcs.vkAcquireNextImageKHR = dlopen("vkAcquireNextImageKHR")
+ funcs.vkQueuePresentKHR = dlopen("vkQueuePresentKHR")
+
+ for _, f := range loadFuncs {
+ f(dlopen)
+ }
+ })
+ return loadErr
+}
+
+func CreateInstance(exts ...string) (Instance, error) {
+ if err := vkInit(); err != nil {
+ return nil, err
+ }
+ inf := C.VkInstanceCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+ }
+ if len(exts) > 0 {
+ cexts := mallocCStringArr(exts)
+ defer freeCStringArr(cexts)
+ inf.enabledExtensionCount = C.uint32_t(len(exts))
+ inf.ppEnabledExtensionNames = &cexts[0]
+ }
+ var inst Instance
+ if err := vkErr(C.vkCreateInstance(funcs.vkCreateInstance, inf, nil, &inst)); err != nil {
+ return nil, fmt.Errorf("vulkan: vkCreateInstance: %w", err)
+ }
+ return inst, nil
+}
+
+func mallocCStringArr(s []string) []*C.char {
+ carr := make([]*C.char, len(s))
+ for i, ext := range s {
+ carr[i] = C.CString(ext)
+ }
+ return carr
+}
+
+func freeCStringArr(s []*C.char) {
+ for i := range s {
+ C.free(unsafe.Pointer(s[i]))
+ s[i] = nil
+ }
+}
+
+func DestroyInstance(inst Instance) {
+ C.vkDestroyInstance(funcs.vkDestroyInstance, inst, nil)
+}
+
+func GetPhysicalDeviceQueueFamilyProperties(pd PhysicalDevice) []QueueFamilyProperties {
+ var count C.uint32_t
+ C.vkGetPhysicalDeviceQueueFamilyProperties(funcs.vkGetPhysicalDeviceQueueFamilyProperties, pd, &count, nil)
+ if count == 0 {
+ return nil
+ }
+ queues := make([]C.VkQueueFamilyProperties, count)
+ C.vkGetPhysicalDeviceQueueFamilyProperties(funcs.vkGetPhysicalDeviceQueueFamilyProperties, pd, &count, &queues[0])
+ return queues
+}
+
+func EnumeratePhysicalDevices(inst Instance) ([]PhysicalDevice, error) {
+ var count C.uint32_t
+ if err := vkErr(C.vkEnumeratePhysicalDevices(funcs.vkEnumeratePhysicalDevices, inst, &count, nil)); err != nil {
+ return nil, fmt.Errorf("vulkan: vkEnumeratePhysicalDevices: %w", err)
+ }
+ if count == 0 {
+ return nil, nil
+ }
+ devs := make([]C.VkPhysicalDevice, count)
+ if err := vkErr(C.vkEnumeratePhysicalDevices(funcs.vkEnumeratePhysicalDevices, inst, &count, &devs[0])); err != nil {
+ return nil, fmt.Errorf("vulkan: vkEnumeratePhysicalDevices: %w", err)
+ }
+ return devs, nil
+}
+
+func ChoosePhysicalDevice(inst Instance, surf Surface) (PhysicalDevice, int, error) {
+ devs, err := EnumeratePhysicalDevices(inst)
+ if err != nil {
+ return nil, 0, err
+ }
+ for _, pd := range devs {
+ var props C.VkPhysicalDeviceProperties
+ C.vkGetPhysicalDeviceProperties(funcs.vkGetPhysicalDeviceProperties, pd, &props)
+ // The lavapipe software implementation doesn't work well rendering to a surface.
+ // See https://gitlab.freedesktop.org/mesa/mesa/-/issues/5473.
+ if surf != 0 && props.deviceType == C.VK_PHYSICAL_DEVICE_TYPE_CPU {
+ continue
+ }
+ const caps = C.VK_QUEUE_GRAPHICS_BIT | C.VK_QUEUE_COMPUTE_BIT
+ queueIdx, ok, err := chooseQueue(pd, surf, caps)
+ if err != nil {
+ return nil, 0, err
+ }
+ if !ok {
+ continue
+ }
+ if surf != nilSurface {
+ _, fmtFound, err := chooseFormat(pd, surf)
+ if err != nil {
+ return nil, 0, err
+ }
+ _, modFound, err := choosePresentMode(pd, surf)
+ if err != nil {
+ return nil, 0, err
+ }
+ if !fmtFound || !modFound {
+ continue
+ }
+ }
+ return pd, queueIdx, nil
+ }
+ return nil, 0, errors.New("vulkan: no suitable device found")
+}
+
+func CreateDeviceAndQueue(pd C.VkPhysicalDevice, queueIdx int, exts ...string) (Device, error) {
+ priority := C.float(1.0)
+ qinf := C.VkDeviceQueueCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+ queueCount: 1,
+ queueFamilyIndex: C.uint32_t(queueIdx),
+ pQueuePriorities: &priority,
+ }
+ inf := C.VkDeviceCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ queueCreateInfoCount: 1,
+ enabledExtensionCount: C.uint32_t(len(exts)),
+ }
+ if len(exts) > 0 {
+ cexts := mallocCStringArr(exts)
+ defer freeCStringArr(cexts)
+ inf.ppEnabledExtensionNames = &cexts[0]
+ }
+ var dev Device
+ if err := vkErr(C.vkCreateDevice(funcs.vkCreateDevice, pd, inf, qinf, nil, &dev)); err != nil {
+ return nil, fmt.Errorf("vulkan: vkCreateDevice: %w", err)
+ }
+ return dev, nil
+}
+
+func GetDeviceQueue(d Device, queueFamily, queueIndex int) Queue {
+ var queue Queue
+ C.vkGetDeviceQueue(funcs.vkGetDeviceQueue, d, C.uint32_t(queueFamily), C.uint32_t(queueIndex), &queue)
+ return queue
+}
+
+func GetPhysicalDeviceSurfaceCapabilities(pd PhysicalDevice, surf Surface) (SurfaceCapabilities, error) {
+ var caps C.VkSurfaceCapabilitiesKHR
+ err := vkErr(C.vkGetPhysicalDeviceSurfaceCapabilitiesKHR(funcs.vkGetPhysicalDeviceSurfaceCapabilitiesKHR, pd, surf, &caps))
+ if err != nil {
+ return SurfaceCapabilities{}, fmt.Errorf("vulkan: vkGetPhysicalDeviceSurfaceCapabilitiesKHR: %w", err)
+ }
+ return caps, nil
+}
+
+func CreateSwapchain(pd PhysicalDevice, d Device, surf Surface, width, height int, old Swapchain) (Swapchain, []Image, Format, error) {
+ caps, err := GetPhysicalDeviceSurfaceCapabilities(pd, surf)
+ if err != nil {
+ return nilSwapchain, nil, 0, err
+ }
+ mode, modeOK, err := choosePresentMode(pd, surf)
+ if err != nil {
+ return nilSwapchain, nil, 0, err
+ }
+ format, fmtOK, err := chooseFormat(pd, surf)
+ if err != nil {
+ return nilSwapchain, nil, 0, err
+ }
+ if !modeOK || !fmtOK {
+ // This shouldn't happen because CreateDeviceAndQueue found at least
+ // one valid format and present mode.
+ return nilSwapchain, nil, 0, errors.New("vulkan: no valid format and present mode found")
+ }
+ // Find supported alpha composite mode. It doesn't matter which one, because rendering is
+ // always opaque.
+ alphaComp := C.VkCompositeAlphaFlagBitsKHR(1)
+ for caps.supportedCompositeAlpha&C.VkCompositeAlphaFlagsKHR(alphaComp) == 0 {
+ alphaComp <<= 1
+ }
+ trans := C.VkSurfaceTransformFlagBitsKHR(C.VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR)
+ if caps.supportedTransforms&C.VkSurfaceTransformFlagsKHR(trans) == 0 {
+ return nilSwapchain, nil, 0, errors.New("vulkan: VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR not supported")
+ }
+ inf := C.VkSwapchainCreateInfoKHR{
+ sType: C.VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ surface: surf,
+ minImageCount: caps.minImageCount,
+ imageFormat: format.format,
+ imageColorSpace: format.colorSpace,
+ imageExtent: C.VkExtent2D{width: C.uint32_t(width), height: C.uint32_t(height)},
+ imageArrayLayers: 1,
+ imageUsage: C.VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+ imageSharingMode: C.VK_SHARING_MODE_EXCLUSIVE,
+ preTransform: trans,
+ presentMode: mode,
+ compositeAlpha: C.VkCompositeAlphaFlagBitsKHR(alphaComp),
+ clipped: C.VK_TRUE,
+ oldSwapchain: old,
+ }
+ var swchain Swapchain
+ if err := vkErr(C.vkCreateSwapchainKHR(funcs.vkCreateSwapchainKHR, d, &inf, nil, &swchain)); err != nil {
+ return nilSwapchain, nil, 0, fmt.Errorf("vulkan: vkCreateSwapchainKHR: %w", err)
+ }
+ var count C.uint32_t
+ if err := vkErr(C.vkGetSwapchainImagesKHR(funcs.vkGetSwapchainImagesKHR, d, swchain, &count, nil)); err != nil {
+ DestroySwapchain(d, swchain)
+ return nilSwapchain, nil, 0, fmt.Errorf("vulkan: vkGetSwapchainImagesKHR: %w", err)
+ }
+ if count == 0 {
+ DestroySwapchain(d, swchain)
+ return nilSwapchain, nil, 0, errors.New("vulkan: vkGetSwapchainImagesKHR returned no images")
+ }
+ imgs := make([]Image, count)
+ if err := vkErr(C.vkGetSwapchainImagesKHR(funcs.vkGetSwapchainImagesKHR, d, swchain, &count, &imgs[0])); err != nil {
+ DestroySwapchain(d, swchain)
+ return nilSwapchain, nil, 0, fmt.Errorf("vulkan: vkGetSwapchainImagesKHR: %w", err)
+ }
+ return swchain, imgs, format.format, nil
+}
+
+func DestroySwapchain(d Device, swchain Swapchain) {
+ C.vkDestroySwapchainKHR(funcs.vkDestroySwapchainKHR, d, swchain, nil)
+}
+
+func AcquireNextImage(d Device, swchain Swapchain, sem Semaphore, fence Fence) (int, error) {
+ res := C.vkAcquireNextImageKHR(funcs.vkAcquireNextImageKHR, d, swchain, math.MaxUint64, sem, fence)
+ if err := vkErr(res.res); err != nil {
+ return 0, fmt.Errorf("vulkan: vkAcquireNextImageKHR: %w", err)
+ }
+ return int(res.uint), nil
+}
+
+func PresentQueue(q Queue, swchain Swapchain, sem Semaphore, imgIdx int) error {
+ cidx := C.uint32_t(imgIdx)
+ inf := C.VkPresentInfoKHR{
+ sType: C.VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ swapchainCount: 1,
+ pSwapchains: &swchain,
+ pImageIndices: &cidx,
+ }
+ if sem != nilSemaphore {
+ inf.waitSemaphoreCount = 1
+ inf.pWaitSemaphores = &sem
+ }
+ if err := vkErr(C.vkQueuePresentKHR(funcs.vkQueuePresentKHR, q, inf)); err != nil {
+ return fmt.Errorf("vulkan: vkQueuePresentKHR: %w", err)
+ }
+ return nil
+}
+
+func CreateImageView(d Device, img Image, format Format) (ImageView, error) {
+ inf := C.VkImageViewCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ image: img,
+ viewType: C.VK_IMAGE_VIEW_TYPE_2D,
+ format: format,
+ subresourceRange: C.VkImageSubresourceRange{
+ aspectMask: C.VK_IMAGE_ASPECT_COLOR_BIT,
+ levelCount: C.VK_REMAINING_MIP_LEVELS,
+ layerCount: C.VK_REMAINING_ARRAY_LAYERS,
+ },
+ }
+ var view C.VkImageView
+ if err := vkErr(C.vkCreateImageView(funcs.vkCreateImageView, d, &inf, nil, &view)); err != nil {
+ return nilImageView, fmt.Errorf("vulkan: vkCreateImageView: %w", err)
+ }
+ return view, nil
+}
+
+func DestroyImageView(d Device, view ImageView) {
+ C.vkDestroyImageView(funcs.vkDestroyImageView, d, view, nil)
+}
+
+func CreateRenderPass(d Device, format Format, loadOp AttachmentLoadOp, initialLayout, finalLayout ImageLayout, passDeps []SubpassDependency) (RenderPass, error) {
+ att := C.VkAttachmentDescription{
+ format: format,
+ samples: C.VK_SAMPLE_COUNT_1_BIT,
+ loadOp: loadOp,
+ storeOp: C.VK_ATTACHMENT_STORE_OP_STORE,
+ stencilLoadOp: C.VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ stencilStoreOp: C.VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ initialLayout: initialLayout,
+ finalLayout: finalLayout,
+ }
+
+ ref := C.VkAttachmentReference{
+ attachment: 0,
+ layout: C.VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ }
+
+ sub := C.VkSubpassDescription{
+ pipelineBindPoint: C.VK_PIPELINE_BIND_POINT_GRAPHICS,
+ colorAttachmentCount: 1,
+ pColorAttachments: &ref,
+ }
+
+ inf := C.VkRenderPassCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ attachmentCount: 1,
+ pAttachments: &att,
+ subpassCount: 1,
+ }
+ if n := len(passDeps); n > 0 {
+ inf.dependencyCount = C.uint32_t(n)
+ inf.pDependencies = &passDeps[0]
+ }
+
+ var pass RenderPass
+ if err := vkErr(C.vkCreateRenderPass(funcs.vkCreateRenderPass, d, inf, sub, nil, &pass)); err != nil {
+ return nilRenderPass, fmt.Errorf("vulkan: vkCreateRenderPass: %w", err)
+ }
+ return pass, nil
+}
+
+func DestroyRenderPass(d Device, r RenderPass) {
+ C.vkDestroyRenderPass(funcs.vkDestroyRenderPass, d, r, nil)
+}
+
+func CreateFramebuffer(d Device, rp RenderPass, view ImageView, width, height int) (Framebuffer, error) {
+ inf := C.VkFramebufferCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ renderPass: rp,
+ attachmentCount: 1,
+ pAttachments: &view,
+ width: C.uint32_t(width),
+ height: C.uint32_t(height),
+ layers: 1,
+ }
+ var fbo Framebuffer
+ if err := vkErr(C.vkCreateFramebuffer(funcs.vkCreateFramebuffer, d, inf, nil, &fbo)); err != nil {
+ return nilFramebuffer, fmt.Errorf("vulkan: vkCreateFramebuffer: %w", err)
+ }
+ return fbo, nil
+
+}
+
+func DestroyFramebuffer(d Device, f Framebuffer) {
+ C.vkDestroyFramebuffer(funcs.vkDestroyFramebuffer, d, f, nil)
+}
+
+func DeviceWaitIdle(d Device) error {
+ if err := vkErr(C.vkDeviceWaitIdle(funcs.vkDeviceWaitIdle, d)); err != nil {
+ return fmt.Errorf("vulkan: vkDeviceWaitIdle: %w", err)
+ }
+ return nil
+}
+
+func QueueWaitIdle(q Queue) error {
+ if err := vkErr(C.vkQueueWaitIdle(funcs.vkQueueWaitIdle, q)); err != nil {
+ return fmt.Errorf("vulkan: vkQueueWaitIdle: %w", err)
+ }
+ return nil
+}
+
+func CreateSemaphore(d Device) (Semaphore, error) {
+ inf := C.VkSemaphoreCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ }
+ var sem Semaphore
+ err := vkErr(C.vkCreateSemaphore(funcs.vkCreateSemaphore, d, &inf, nil, &sem))
+ if err != nil {
+ return nilSemaphore, fmt.Errorf("vulkan: vkCreateSemaphore: %w", err)
+ }
+ return sem, err
+}
+
+func DestroySemaphore(d Device, sem Semaphore) {
+ C.vkDestroySemaphore(funcs.vkDestroySemaphore, d, sem, nil)
+}
+
+func DestroyDevice(dev Device) {
+ C.vkDestroyDevice(funcs.vkDestroyDevice, dev, nil)
+}
+
+func DestroySurface(inst Instance, s Surface) {
+ C.vkDestroySurfaceKHR(funcs.vkDestroySurfaceKHR, inst, s, nil)
+}
+
+func CreateCommandPool(d Device, queueIndex int) (CommandPool, error) {
+ inf := C.VkCommandPoolCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+ queueFamilyIndex: C.uint32_t(queueIndex),
+ flags: C.VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | C.VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
+ }
+
+ var pool CommandPool
+ if err := vkErr(C.vkCreateCommandPool(funcs.vkCreateCommandPool, d, &inf, nil, &pool)); err != nil {
+ return nilCommandPool, fmt.Errorf("vulkan: vkCreateCommandPool: %w", err)
+ }
+ return pool, nil
+}
+
+func DestroyCommandPool(d Device, pool CommandPool) {
+ C.vkDestroyCommandPool(funcs.vkDestroyCommandPool, d, pool, nil)
+}
+
+func AllocateCommandBuffer(d Device, pool CommandPool) (CommandBuffer, error) {
+ inf := C.VkCommandBufferAllocateInfo{
+ sType: C.VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ commandPool: pool,
+ level: C.VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ commandBufferCount: 1,
+ }
+
+ var buf CommandBuffer
+ if err := vkErr(C.vkAllocateCommandBuffers(funcs.vkAllocateCommandBuffers, d, &inf, &buf)); err != nil {
+ return nil, fmt.Errorf("vulkan: vkAllocateCommandBuffers: %w", err)
+ }
+ return buf, nil
+}
+
+func FreeCommandBuffers(d Device, pool CommandPool, bufs ...CommandBuffer) {
+ if len(bufs) == 0 {
+ return
+ }
+ C.vkFreeCommandBuffers(funcs.vkFreeCommandBuffers, d, pool, C.uint32_t(len(bufs)), &bufs[0])
+}
+
+func BeginCommandBuffer(buf CommandBuffer) error {
+ inf := C.VkCommandBufferBeginInfo{
+ sType: C.VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ flags: C.VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
+ }
+ if err := vkErr(C.vkBeginCommandBuffer(funcs.vkBeginCommandBuffer, buf, inf)); err != nil {
+ return fmt.Errorf("vulkan: vkBeginCommandBuffer: %w", err)
+ }
+ return nil
+}
+
+func EndCommandBuffer(buf CommandBuffer) error {
+ if err := vkErr(C.vkEndCommandBuffer(funcs.vkEndCommandBuffer, buf)); err != nil {
+ return fmt.Errorf("vulkan: vkEndCommandBuffer: %w", err)
+ }
+ return nil
+}
+
+func QueueSubmit(q Queue, buf CommandBuffer, waitSems []Semaphore, waitStages []PipelineStageFlags, sigSems []Semaphore, fence Fence) error {
+ inf := C.VkSubmitInfo{
+ sType: C.VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ commandBufferCount: 1,
+ pCommandBuffers: &buf,
+ }
+ if len(waitSems) > 0 {
+ if len(waitSems) != len(waitStages) {
+ panic("len(waitSems) != len(waitStages)")
+ }
+ inf.waitSemaphoreCount = C.uint32_t(len(waitSems))
+ inf.pWaitSemaphores = &waitSems[0]
+ inf.pWaitDstStageMask = &waitStages[0]
+ }
+ if len(sigSems) > 0 {
+ inf.signalSemaphoreCount = C.uint32_t(len(sigSems))
+ inf.pSignalSemaphores = &sigSems[0]
+ }
+ if err := vkErr(C.vkQueueSubmit(funcs.vkQueueSubmit, q, inf, fence)); err != nil {
+ return fmt.Errorf("vulkan: vkQueueSubmit: %w", err)
+ }
+ return nil
+}
+
+func CmdBeginRenderPass(buf CommandBuffer, rp RenderPass, fbo Framebuffer, width, height int, clearCol [4]float32) {
+ cclearCol := [4]C.float{C.float(clearCol[0]), C.float(clearCol[1]), C.float(clearCol[2]), C.float(clearCol[3])}
+ inf := C.VkRenderPassBeginInfo{
+ sType: C.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+ renderPass: rp,
+ framebuffer: fbo,
+ renderArea: C.VkRect2D{extent: C.VkExtent2D{width: C.uint32_t(width), height: C.uint32_t(height)}},
+ clearValueCount: 1,
+ pClearValues: (*C.VkClearValue)(unsafe.Pointer(&cclearCol)),
+ }
+ C.vkCmdBeginRenderPass(funcs.vkCmdBeginRenderPass, buf, inf, C.VK_SUBPASS_CONTENTS_INLINE)
+}
+
+func CmdEndRenderPass(buf CommandBuffer) {
+ C.vkCmdEndRenderPass(funcs.vkCmdEndRenderPass, buf)
+}
+
+func CmdCopyBuffer(cmdBuf CommandBuffer, src, dst Buffer, srcOff, dstOff, size int) {
+ C.vkCmdCopyBuffer(funcs.vkCmdCopyBuffer, cmdBuf, src, dst, 1, &C.VkBufferCopy{
+ srcOffset: C.VkDeviceSize(srcOff),
+ dstOffset: C.VkDeviceSize(dstOff),
+ size: C.VkDeviceSize(size),
+ })
+}
+
+func CmdCopyBufferToImage(cmdBuf CommandBuffer, src Buffer, dst Image, layout ImageLayout, copy BufferImageCopy) {
+ C.vkCmdCopyBufferToImage(funcs.vkCmdCopyBufferToImage, cmdBuf, src, dst, layout, 1, ©)
+}
+
+func CmdPipelineBarrier(cmdBuf CommandBuffer, srcStage, dstStage PipelineStageFlags, flags DependencyFlags, memBarriers []MemoryBarrier, bufBarriers []BufferMemoryBarrier, imgBarriers []ImageMemoryBarrier) {
+ var memPtr *MemoryBarrier
+ if len(memBarriers) > 0 {
+ memPtr = &memBarriers[0]
+ }
+ var bufPtr *BufferMemoryBarrier
+ if len(bufBarriers) > 0 {
+ bufPtr = &bufBarriers[0]
+ }
+ var imgPtr *ImageMemoryBarrier
+ if len(imgBarriers) > 0 {
+ imgPtr = &imgBarriers[0]
+ }
+ C.vkCmdPipelineBarrier(funcs.vkCmdPipelineBarrier, cmdBuf, srcStage, dstStage, flags,
+ C.uint32_t(len(memBarriers)), memPtr,
+ C.uint32_t(len(bufBarriers)), bufPtr,
+ C.uint32_t(len(imgBarriers)), imgPtr)
+}
+
+func CmdPushConstants(cmdBuf CommandBuffer, layout PipelineLayout, stages ShaderStageFlags, offset int, data []byte) {
+ if len(data) == 0 {
+ return
+ }
+ C.vkCmdPushConstants(funcs.vkCmdPushConstants, cmdBuf, layout, stages, C.uint32_t(offset), C.uint32_t(len(data)), unsafe.Pointer(&data[0]))
+}
+
+func CmdBindPipeline(cmdBuf CommandBuffer, bindPoint PipelineBindPoint, pipe Pipeline) {
+ C.vkCmdBindPipeline(funcs.vkCmdBindPipeline, cmdBuf, bindPoint, pipe)
+}
+
+func CmdBindVertexBuffers(cmdBuf CommandBuffer, first int, buffers []Buffer, sizes []DeviceSize) {
+ if len(buffers) == 0 {
+ return
+ }
+ C.vkCmdBindVertexBuffers(funcs.vkCmdBindVertexBuffers, cmdBuf, C.uint32_t(first), C.uint32_t(len(buffers)), &buffers[0], &sizes[0])
+}
+
+func CmdSetViewport(cmdBuf CommandBuffer, first int, viewports ...Viewport) {
+ if len(viewports) == 0 {
+ return
+ }
+ C.vkCmdSetViewport(funcs.vkCmdSetViewport, cmdBuf, C.uint32_t(first), C.uint32_t(len(viewports)), &viewports[0])
+}
+
+func CmdBindIndexBuffer(cmdBuf CommandBuffer, buffer Buffer, offset int, typ IndexType) {
+ C.vkCmdBindIndexBuffer(funcs.vkCmdBindIndexBuffer, cmdBuf, buffer, C.VkDeviceSize(offset), typ)
+}
+
+func CmdDraw(cmdBuf CommandBuffer, vertCount, instCount, firstVert, firstInst int) {
+ C.vkCmdDraw(funcs.vkCmdDraw, cmdBuf, C.uint32_t(vertCount), C.uint32_t(instCount), C.uint32_t(firstVert), C.uint32_t(firstInst))
+}
+
+func CmdDrawIndexed(cmdBuf CommandBuffer, idxCount, instCount, firstIdx, vertOff, firstInst int) {
+ C.vkCmdDrawIndexed(funcs.vkCmdDrawIndexed, cmdBuf, C.uint32_t(idxCount), C.uint32_t(instCount), C.uint32_t(firstIdx), C.int32_t(vertOff), C.uint32_t(firstInst))
+}
+
+func GetPhysicalDeviceFormatProperties(physDev PhysicalDevice, format Format) FormatFeatureFlags {
+ var props C.VkFormatProperties
+ C.vkGetPhysicalDeviceFormatProperties(funcs.vkGetPhysicalDeviceFormatProperties, physDev, format, &props)
+ return FormatFeatureFlags(props.optimalTilingFeatures)
+}
+
+func CmdBindDescriptorSets(cmdBuf CommandBuffer, point PipelineBindPoint, layout PipelineLayout, firstSet int, sets []DescriptorSet) {
+ C.vkCmdBindDescriptorSets(funcs.vkCmdBindDescriptorSets, cmdBuf, point, layout, C.uint32_t(firstSet), C.uint32_t(len(sets)), &sets[0], 0, nil)
+}
+
+func CmdCopyImage(cmdBuf CommandBuffer, src Image, srcLayout ImageLayout, dst Image, dstLayout ImageLayout, regions []ImageCopy) {
+ if len(regions) == 0 {
+ return
+ }
+ C.vkCmdCopyImage(funcs.vkCmdCopyImage, cmdBuf, src, srcLayout, dst, dstLayout, C.uint32_t(len(regions)), ®ions[0])
+}
+
+func CmdCopyImageToBuffer(cmdBuf CommandBuffer, src Image, srcLayout ImageLayout, dst Buffer, regions []BufferImageCopy) {
+ if len(regions) == 0 {
+ return
+ }
+ C.vkCmdCopyImageToBuffer(funcs.vkCmdCopyImageToBuffer, cmdBuf, src, srcLayout, dst, C.uint32_t(len(regions)), ®ions[0])
+}
+
+func CmdDispatch(cmdBuf CommandBuffer, x, y, z int) {
+ C.vkCmdDispatch(funcs.vkCmdDispatch, cmdBuf, C.uint32_t(x), C.uint32_t(y), C.uint32_t(z))
+}
+
+func CreateImage(pd PhysicalDevice, d Device, format Format, width, height int, usage ImageUsageFlags) (Image, DeviceMemory, error) {
+ inf := C.VkImageCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ imageType: C.VK_IMAGE_TYPE_2D,
+ format: format,
+ extent: C.VkExtent3D{
+ width: C.uint32_t(width),
+ height: C.uint32_t(height),
+ depth: 1,
+ },
+ mipLevels: 1,
+ arrayLayers: 1,
+ samples: C.VK_SAMPLE_COUNT_1_BIT,
+ tiling: C.VK_IMAGE_TILING_OPTIMAL,
+ usage: usage,
+ initialLayout: C.VK_IMAGE_LAYOUT_UNDEFINED,
+ }
+ var img C.VkImage
+ if err := vkErr(C.vkCreateImage(funcs.vkCreateImage, d, &inf, nil, &img)); err != nil {
+ return nilImage, nilDeviceMemory, fmt.Errorf("vulkan: vkCreateImage: %w", err)
+ }
+ var memReqs C.VkMemoryRequirements
+ C.vkGetImageMemoryRequirements(funcs.vkGetImageMemoryRequirements, d, img, &memReqs)
+
+ memIdx, found := findMemoryTypeIndex(pd, memReqs.memoryTypeBits, C.VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
+ if !found {
+ DestroyImage(d, img)
+ return nilImage, nilDeviceMemory, errors.New("vulkan: no memory type suitable for images found")
+ }
+
+ memInf := C.VkMemoryAllocateInfo{
+ sType: C.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ allocationSize: memReqs.size,
+ memoryTypeIndex: C.uint32_t(memIdx),
+ }
+ var imgMem C.VkDeviceMemory
+ if err := vkErr(C.vkAllocateMemory(funcs.vkAllocateMemory, d, &memInf, nil, &imgMem)); err != nil {
+ DestroyImage(d, img)
+ return nilImage, nilDeviceMemory, fmt.Errorf("vulkan: vkAllocateMemory: %w", err)
+ }
+
+ if err := vkErr(C.vkBindImageMemory(funcs.vkBindImageMemory, d, img, imgMem, 0)); err != nil {
+ FreeMemory(d, imgMem)
+ DestroyImage(d, img)
+ return nilImage, nilDeviceMemory, fmt.Errorf("vulkan: vkBindImageMemory: %w", err)
+ }
+ return img, imgMem, nil
+}
+
+func DestroyImage(d Device, img Image) {
+ C.vkDestroyImage(funcs.vkDestroyImage, d, img, nil)
+}
+
+func FreeMemory(d Device, mem DeviceMemory) {
+ C.vkFreeMemory(funcs.vkFreeMemory, d, mem, nil)
+}
+
+func CreateSampler(d Device, minFilter, magFilter Filter) (Sampler, error) {
+ inf := C.VkSamplerCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
+ minFilter: minFilter,
+ magFilter: magFilter,
+ addressModeU: C.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
+ addressModeV: C.VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
+ }
+ var s C.VkSampler
+ if err := vkErr(C.vkCreateSampler(funcs.vkCreateSampler, d, &inf, nil, &s)); err != nil {
+ return nilSampler, fmt.Errorf("vulkan: vkCreateSampler: %w", err)
+ }
+ return s, nil
+}
+
+func DestroySampler(d Device, sampler Sampler) {
+ C.vkDestroySampler(funcs.vkDestroySampler, d, sampler, nil)
+}
+
+func CreateBuffer(pd PhysicalDevice, d Device, size int, usage BufferUsageFlags, props MemoryPropertyFlags) (Buffer, DeviceMemory, error) {
+ inf := C.VkBufferCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ size: C.VkDeviceSize(size),
+ usage: usage,
+ }
+ var buf C.VkBuffer
+ if err := vkErr(C.vkCreateBuffer(funcs.vkCreateBuffer, d, &inf, nil, &buf)); err != nil {
+ return nilBuffer, nilDeviceMemory, fmt.Errorf("vulkan: vkCreateBuffer: %w", err)
+ }
+
+ var memReqs C.VkMemoryRequirements
+ C.vkGetBufferMemoryRequirements(funcs.vkGetBufferMemoryRequirements, d, buf, &memReqs)
+
+ memIdx, found := findMemoryTypeIndex(pd, memReqs.memoryTypeBits, props)
+ if !found {
+ DestroyBuffer(d, buf)
+ return nilBuffer, nilDeviceMemory, errors.New("vulkan: no memory suitable for buffers found")
+ }
+ memInf := C.VkMemoryAllocateInfo{
+ sType: C.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ allocationSize: memReqs.size,
+ memoryTypeIndex: C.uint32_t(memIdx),
+ }
+
+ var mem C.VkDeviceMemory
+ if err := vkErr(C.vkAllocateMemory(funcs.vkAllocateMemory, d, &memInf, nil, &mem)); err != nil {
+ DestroyBuffer(d, buf)
+ return nilBuffer, nilDeviceMemory, fmt.Errorf("vulkan: vkAllocateMemory: %w", err)
+ }
+
+ if err := vkErr(C.vkBindBufferMemory(funcs.vkBindBufferMemory, d, buf, mem, 0)); err != nil {
+ FreeMemory(d, mem)
+ DestroyBuffer(d, buf)
+ return nilBuffer, nilDeviceMemory, fmt.Errorf("vulkan: vkBindBufferMemory: %w", err)
+ }
+ return buf, mem, nil
+}
+
+func DestroyBuffer(d Device, buf Buffer) {
+ C.vkDestroyBuffer(funcs.vkDestroyBuffer, d, buf, nil)
+}
+
+func CreateShaderModule(d Device, spirv string) (ShaderModule, error) {
+ ptr := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&spirv)).Data)
+ inf := C.VkShaderModuleCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
+ codeSize: C.size_t(len(spirv)),
+ pCode: (*C.uint32_t)(ptr),
+ }
+
+ var mod C.VkShaderModule
+ if err := vkErr(C.vkCreateShaderModule(funcs.vkCreateShaderModule, d, inf, nil, &mod)); err != nil {
+ return nilShaderModule, fmt.Errorf("vulkan: vkCreateShaderModule: %w", err)
+ }
+ return mod, nil
+}
+
+func DestroyShaderModule(d Device, mod ShaderModule) {
+ C.vkDestroyShaderModule(funcs.vkDestroyShaderModule, d, mod, nil)
+}
+
+func CreateGraphicsPipeline(d Device, pass RenderPass, vmod, fmod ShaderModule, blend bool, srcFactor, dstFactor BlendFactor, topology PrimitiveTopology, bindings []VertexInputBindingDescription, attrs []VertexInputAttributeDescription, layout PipelineLayout) (Pipeline, error) {
+ main := C.CString("main")
+ defer C.free(unsafe.Pointer(main))
+ stages := []C.VkPipelineShaderStageCreateInfo{
+ {
+ sType: C.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ stage: C.VK_SHADER_STAGE_VERTEX_BIT,
+ module: vmod,
+ pName: main,
+ },
+ {
+ sType: C.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ stage: C.VK_SHADER_STAGE_FRAGMENT_BIT,
+ module: fmod,
+ pName: main,
+ },
+ }
+ dynStates := []C.VkDynamicState{C.VK_DYNAMIC_STATE_VIEWPORT}
+ dynInf := C.VkPipelineDynamicStateCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ dynamicStateCount: C.uint32_t(len(dynStates)),
+ pDynamicStates: &dynStates[0],
+ }
+ const maxDim = 0x7fffffff
+ scissors := []C.VkRect2D{{extent: C.VkExtent2D{width: maxDim, height: maxDim}}}
+ viewportInf := C.VkPipelineViewportStateCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ viewportCount: 1,
+ scissorCount: C.uint32_t(len(scissors)),
+ pScissors: &scissors[0],
+ }
+ enable := C.VkBool32(0)
+ if blend {
+ enable = 1
+ }
+ attBlendInf := C.VkPipelineColorBlendAttachmentState{
+ blendEnable: enable,
+ srcColorBlendFactor: srcFactor,
+ srcAlphaBlendFactor: srcFactor,
+ dstColorBlendFactor: dstFactor,
+ dstAlphaBlendFactor: dstFactor,
+ colorBlendOp: C.VK_BLEND_OP_ADD,
+ alphaBlendOp: C.VK_BLEND_OP_ADD,
+ colorWriteMask: C.VK_COLOR_COMPONENT_R_BIT | C.VK_COLOR_COMPONENT_G_BIT | C.VK_COLOR_COMPONENT_B_BIT | C.VK_COLOR_COMPONENT_A_BIT,
+ }
+ blendInf := C.VkPipelineColorBlendStateCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+ attachmentCount: 1,
+ pAttachments: &attBlendInf,
+ }
+ var vkBinds []C.VkVertexInputBindingDescription
+ var vkAttrs []C.VkVertexInputAttributeDescription
+ for _, b := range bindings {
+ vkBinds = append(vkBinds, C.VkVertexInputBindingDescription{
+ binding: C.uint32_t(b.Binding),
+ stride: C.uint32_t(b.Stride),
+ })
+ }
+ for _, a := range attrs {
+ vkAttrs = append(vkAttrs, C.VkVertexInputAttributeDescription{
+ location: C.uint32_t(a.Location),
+ binding: C.uint32_t(a.Binding),
+ format: a.Format,
+ offset: C.uint32_t(a.Offset),
+ })
+ }
+ vertexInf := C.VkPipelineVertexInputStateCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ }
+ if n := len(vkBinds); n > 0 {
+ vertexInf.vertexBindingDescriptionCount = C.uint32_t(n)
+ vertexInf.pVertexBindingDescriptions = &vkBinds[0]
+ }
+ if n := len(vkAttrs); n > 0 {
+ vertexInf.vertexAttributeDescriptionCount = C.uint32_t(n)
+ vertexInf.pVertexAttributeDescriptions = &vkAttrs[0]
+ }
+ inf := C.VkGraphicsPipelineCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ stageCount: C.uint32_t(len(stages)),
+ pStages: &stages[0],
+ renderPass: pass,
+ layout: layout,
+ pRasterizationState: &C.VkPipelineRasterizationStateCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+ lineWidth: 1.0,
+ },
+ pMultisampleState: &C.VkPipelineMultisampleStateCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ rasterizationSamples: C.VK_SAMPLE_COUNT_1_BIT,
+ },
+ pInputAssemblyState: &C.VkPipelineInputAssemblyStateCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ topology: topology,
+ },
+ }
+
+ var pipe C.VkPipeline
+ if err := vkErr(C.vkCreateGraphicsPipelines(funcs.vkCreateGraphicsPipelines, d, nilPipelineCache, inf, dynInf, blendInf, vertexInf, viewportInf, nil, &pipe)); err != nil {
+ return nilPipeline, fmt.Errorf("vulkan: vkCreateGraphicsPipelines: %w", err)
+ }
+ return pipe, nil
+}
+
+func DestroyPipeline(d Device, p Pipeline) {
+ C.vkDestroyPipeline(funcs.vkDestroyPipeline, d, p, nil)
+}
+
+func CreatePipelineLayout(d Device, pushRanges []PushConstantRange, sets []DescriptorSetLayout) (PipelineLayout, error) {
+ inf := C.VkPipelineLayoutCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ }
+ if n := len(sets); n > 0 {
+ inf.setLayoutCount = C.uint32_t(n)
+ inf.pSetLayouts = &sets[0]
+ }
+ if n := len(pushRanges); n > 0 {
+ inf.pushConstantRangeCount = C.uint32_t(n)
+ inf.pPushConstantRanges = &pushRanges[0]
+ }
+ var l C.VkPipelineLayout
+ if err := vkErr(C.vkCreatePipelineLayout(funcs.vkCreatePipelineLayout, d, inf, nil, &l)); err != nil {
+ return nilPipelineLayout, fmt.Errorf("vulkan: vkCreatePipelineLayout: %w", err)
+ }
+ return l, nil
+}
+
+func DestroyPipelineLayout(d Device, l PipelineLayout) {
+ C.vkDestroyPipelineLayout(funcs.vkDestroyPipelineLayout, d, l, nil)
+}
+
+func CreateDescriptorSetLayout(d Device, bindings []DescriptorSetLayoutBinding) (DescriptorSetLayout, error) {
+ var vkbinds []C.VkDescriptorSetLayoutBinding
+ for _, b := range bindings {
+ vkbinds = append(vkbinds, C.VkDescriptorSetLayoutBinding{
+ binding: C.uint32_t(b.Binding),
+ descriptorType: b.DescriptorType,
+ descriptorCount: 1,
+ stageFlags: b.StageFlags,
+ })
+ }
+ inf := C.VkDescriptorSetLayoutCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ }
+ if n := len(vkbinds); n > 0 {
+ inf.bindingCount = C.uint32_t(len(vkbinds))
+ inf.pBindings = &vkbinds[0]
+ }
+ var l C.VkDescriptorSetLayout
+ if err := vkErr(C.vkCreateDescriptorSetLayout(funcs.vkCreateDescriptorSetLayout, d, inf, nil, &l)); err != nil {
+ return nilDescriptorSetLayout, fmt.Errorf("vulkan: vkCreateDescriptorSetLayout: %w", err)
+ }
+ return l, nil
+}
+
+func DestroyDescriptorSetLayout(d Device, l DescriptorSetLayout) {
+ C.vkDestroyDescriptorSetLayout(funcs.vkDestroyDescriptorSetLayout, d, l, nil)
+}
+
+func MapMemory(d Device, mem DeviceMemory, offset, size int) ([]byte, error) {
+ var ptr unsafe.Pointer
+ if err := vkErr(C.vkMapMemory(funcs.vkMapMemory, d, mem, C.VkDeviceSize(offset), C.VkDeviceSize(size), 0, &ptr)); err != nil {
+ return nil, fmt.Errorf("vulkan: vkMapMemory: %w", err)
+ }
+ return ((*[1 << 30]byte)(ptr))[:size:size], nil
+}
+
+func UnmapMemory(d Device, mem DeviceMemory) {
+ C.vkUnmapMemory(funcs.vkUnmapMemory, d, mem)
+}
+
+func ResetCommandBuffer(buf CommandBuffer) error {
+ if err := vkErr(C.vkResetCommandBuffer(funcs.vkResetCommandBuffer, buf, 0)); err != nil {
+ return fmt.Errorf("vulkan: vkResetCommandBuffer. %w", err)
+ }
+ return nil
+}
+
+func CreateDescriptorPool(d Device, maxSets int, sizes []DescriptorPoolSize) (DescriptorPool, error) {
+ inf := C.VkDescriptorPoolCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+ maxSets: C.uint32_t(maxSets),
+ poolSizeCount: C.uint32_t(len(sizes)),
+ pPoolSizes: &sizes[0],
+ }
+ var pool C.VkDescriptorPool
+ if err := vkErr(C.vkCreateDescriptorPool(funcs.vkCreateDescriptorPool, d, inf, nil, &pool)); err != nil {
+ return nilDescriptorPool, fmt.Errorf("vulkan: vkCreateDescriptorPool: %w", err)
+ }
+ return pool, nil
+}
+
+func DestroyDescriptorPool(d Device, pool DescriptorPool) {
+ C.vkDestroyDescriptorPool(funcs.vkDestroyDescriptorPool, d, pool, nil)
+}
+
+func ResetDescriptorPool(d Device, pool DescriptorPool) error {
+ if err := vkErr(C.vkResetDescriptorPool(funcs.vkResetDescriptorPool, d, pool, 0)); err != nil {
+ return fmt.Errorf("vulkan: vkResetDescriptorPool: %w", err)
+ }
+ return nil
+}
+
+func UpdateDescriptorSet(d Device, write WriteDescriptorSet) {
+ C.vkUpdateDescriptorSets(funcs.vkUpdateDescriptorSets, d, write, 0, nil)
+}
+
+func AllocateDescriptorSet(d Device, pool DescriptorPool, layout DescriptorSetLayout) (DescriptorSet, error) {
+ inf := C.VkDescriptorSetAllocateInfo{
+ sType: C.VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+ descriptorPool: pool,
+ descriptorSetCount: 1,
+ pSetLayouts: &layout,
+ }
+ var set C.VkDescriptorSet
+ if err := vkErr(C.vkAllocateDescriptorSets(funcs.vkAllocateDescriptorSets, d, inf, &set)); err != nil {
+ return nilDescriptorSet, fmt.Errorf("vulkan: vkAllocateDescriptorSets: %w", err)
+ }
+ return set, nil
+}
+
+func CreateComputePipeline(d Device, mod ShaderModule, layout PipelineLayout) (Pipeline, error) {
+ main := C.CString("main")
+ defer C.free(unsafe.Pointer(main))
+ inf := C.VkComputePipelineCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
+ stage: C.VkPipelineShaderStageCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ stage: C.VK_SHADER_STAGE_COMPUTE_BIT,
+ module: mod,
+ pName: main,
+ },
+ layout: layout,
+ }
+ var pipe C.VkPipeline
+ if err := vkErr(C.vkCreateComputePipelines(funcs.vkCreateComputePipelines, d, nilPipelineCache, 1, &inf, nil, &pipe)); err != nil {
+ return nilPipeline, fmt.Errorf("vulkan: vkCreateComputePipelines: %w", err)
+ }
+ return pipe, nil
+}
+
+func CreateFence(d Device) (Fence, error) {
+ inf := C.VkFenceCreateInfo{
+ sType: C.VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ }
+ var f C.VkFence
+ if err := vkErr(C.vkCreateFence(funcs.vkCreateFence, d, &inf, nil, &f)); err != nil {
+ return nilFence, fmt.Errorf("vulkan: vkCreateFence: %w", err)
+ }
+ return f, nil
+}
+
+func DestroyFence(d Device, f Fence) {
+ C.vkDestroyFence(funcs.vkDestroyFence, d, f, nil)
+}
+
+func WaitForFences(d Device, fences ...Fence) error {
+ if len(fences) == 0 {
+ return nil
+ }
+ err := vkErr(C.vkWaitForFences(funcs.vkWaitForFences, d, C.uint32_t(len(fences)), &fences[0], C.VK_TRUE, 0xffffffffffffffff))
+ if err != nil {
+ return fmt.Errorf("vulkan: vkWaitForFences: %w", err)
+ }
+ return nil
+}
+
+func ResetFences(d Device, fences ...Fence) error {
+ if len(fences) == 0 {
+ return nil
+ }
+ err := vkErr(C.vkResetFences(funcs.vkResetFences, d, C.uint32_t(len(fences)), &fences[0]))
+ if err != nil {
+ return fmt.Errorf("vulkan: vkResetFences: %w", err)
+ }
+ return nil
+}
+
+func BuildSubpassDependency(srcStage, dstStage PipelineStageFlags, srcMask, dstMask AccessFlags, flags DependencyFlags) SubpassDependency {
+ return C.VkSubpassDependency{
+ srcSubpass: C.VK_SUBPASS_EXTERNAL,
+ srcStageMask: srcStage,
+ srcAccessMask: srcMask,
+ dstSubpass: 0,
+ dstStageMask: dstStage,
+ dstAccessMask: dstMask,
+ dependencyFlags: flags,
+ }
+}
+
+func BuildPushConstantRange(stages ShaderStageFlags, offset, size int) PushConstantRange {
+ return C.VkPushConstantRange{
+ stageFlags: stages,
+ offset: C.uint32_t(offset),
+ size: C.uint32_t(size),
+ }
+}
+
+func BuildDescriptorPoolSize(typ DescriptorType, count int) DescriptorPoolSize {
+ return C.VkDescriptorPoolSize{
+ _type: typ,
+ descriptorCount: C.uint32_t(count),
+ }
+}
+
+func BuildWriteDescriptorSetImage(set DescriptorSet, binding int, typ DescriptorType, sampler Sampler, view ImageView, layout ImageLayout) WriteDescriptorSet {
+ return C.VkWriteDescriptorSet{
+ sType: C.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ dstSet: set,
+ dstBinding: C.uint32_t(binding),
+ descriptorCount: 1,
+ descriptorType: typ,
+ pImageInfo: &C.VkDescriptorImageInfo{
+ sampler: sampler,
+ imageView: view,
+ imageLayout: layout,
+ },
+ }
+}
+
+func BuildWriteDescriptorSetBuffer(set DescriptorSet, binding int, typ DescriptorType, buf Buffer) WriteDescriptorSet {
+ return C.VkWriteDescriptorSet{
+ sType: C.VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ dstSet: set,
+ dstBinding: C.uint32_t(binding),
+ descriptorCount: 1,
+ descriptorType: typ,
+ pBufferInfo: &C.VkDescriptorBufferInfo{
+ buffer: buf,
+ _range: C.VK_WHOLE_SIZE,
+ },
+ }
+}
+
+func (r PushConstantRange) StageFlags() ShaderStageFlags {
+ return r.stageFlags
+}
+
+func (r PushConstantRange) Offset() int {
+ return int(r.offset)
+}
+
+func (r PushConstantRange) Size() int {
+ return int(r.size)
+}
+
+func (p QueueFamilyProperties) Flags() QueueFlags {
+ return p.queueFlags
+}
+
+func (c SurfaceCapabilities) MinExtent() image.Point {
+ return image.Pt(int(c.minImageExtent.width), int(c.minImageExtent.height))
+}
+
+func (c SurfaceCapabilities) MaxExtent() image.Point {
+ return image.Pt(int(c.maxImageExtent.width), int(c.maxImageExtent.height))
+}
+
+func BuildViewport(x, y, width, height float32) Viewport {
+ return C.VkViewport{
+ x: C.float(x),
+ y: C.float(y),
+ width: C.float(width),
+ height: C.float(height),
+ maxDepth: 1.0,
+ }
+}
+
+func BuildImageMemoryBarrier(img Image, srcMask, dstMask AccessFlags, oldLayout, newLayout ImageLayout) ImageMemoryBarrier {
+ return C.VkImageMemoryBarrier{
+ sType: C.VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ srcAccessMask: srcMask,
+ dstAccessMask: dstMask,
+ oldLayout: oldLayout,
+ newLayout: newLayout,
+ image: img,
+ subresourceRange: C.VkImageSubresourceRange{
+ aspectMask: C.VK_IMAGE_ASPECT_COLOR_BIT,
+ levelCount: C.VK_REMAINING_MIP_LEVELS,
+ layerCount: C.VK_REMAINING_ARRAY_LAYERS,
+ },
+ }
+}
+
+func BuildBufferMemoryBarrier(buf Buffer, srcMask, dstMask AccessFlags) BufferMemoryBarrier {
+ return C.VkBufferMemoryBarrier{
+ sType: C.VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
+ srcAccessMask: srcMask,
+ dstAccessMask: dstMask,
+ buffer: buf,
+ size: C.VK_WHOLE_SIZE,
+ }
+}
+
+func BuildMemoryBarrier(srcMask, dstMask AccessFlags) MemoryBarrier {
+ return C.VkMemoryBarrier{
+ sType: C.VK_STRUCTURE_TYPE_MEMORY_BARRIER,
+ srcAccessMask: srcMask,
+ dstAccessMask: dstMask,
+ }
+}
+
+func BuildBufferImageCopy(bufOff, bufStride, x, y, width, height int) BufferImageCopy {
+ return C.VkBufferImageCopy{
+ bufferOffset: C.VkDeviceSize(bufOff),
+ bufferRowLength: C.uint32_t(bufStride),
+ imageSubresource: C.VkImageSubresourceLayers{
+ aspectMask: C.VK_IMAGE_ASPECT_COLOR_BIT,
+ layerCount: 1,
+ },
+ imageOffset: C.VkOffset3D{
+ x: C.int32_t(x), y: C.int32_t(y), z: 0,
+ },
+ imageExtent: C.VkExtent3D{
+ width: C.uint32_t(width), height: C.uint32_t(height), depth: 1,
+ },
+ }
+}
+
+func BuildImageCopy(srcX, srcY, dstX, dstY, width, height int) ImageCopy {
+ return C.VkImageCopy{
+ srcSubresource: C.VkImageSubresourceLayers{
+ aspectMask: C.VK_IMAGE_ASPECT_COLOR_BIT,
+ layerCount: 1,
+ },
+ srcOffset: C.VkOffset3D{
+ x: C.int32_t(srcX),
+ y: C.int32_t(srcY),
+ },
+ dstSubresource: C.VkImageSubresourceLayers{
+ aspectMask: C.VK_IMAGE_ASPECT_COLOR_BIT,
+ layerCount: 1,
+ },
+ dstOffset: C.VkOffset3D{
+ x: C.int32_t(dstX),
+ y: C.int32_t(dstY),
+ },
+ extent: C.VkExtent3D{
+ width: C.uint32_t(width),
+ height: C.uint32_t(height),
+ depth: 1,
+ },
+ }
+}
+
+func findMemoryTypeIndex(pd C.VkPhysicalDevice, constraints C.uint32_t, wantProps C.VkMemoryPropertyFlags) (int, bool) {
+ var memProps C.VkPhysicalDeviceMemoryProperties
+ C.vkGetPhysicalDeviceMemoryProperties(funcs.vkGetPhysicalDeviceMemoryProperties, pd, &memProps)
+
+ for i := 0; i < int(memProps.memoryTypeCount); i++ {
+ if (constraints & (1 << i)) == 0 {
+ continue
+ }
+ if (memProps.memoryTypes[i].propertyFlags & wantProps) != wantProps {
+ continue
+ }
+ return i, true
+ }
+
+ return 0, false
+}
+
+func choosePresentMode(pd C.VkPhysicalDevice, surf Surface) (C.VkPresentModeKHR, bool, error) {
+ var count C.uint32_t
+ err := vkErr(C.vkGetPhysicalDeviceSurfacePresentModesKHR(funcs.vkGetPhysicalDeviceSurfacePresentModesKHR, pd, surf, &count, nil))
+ if err != nil {
+ return 0, false, fmt.Errorf("vulkan: vkGetPhysicalDeviceSurfacePresentModesKHR: %w", err)
+ }
+ if count == 0 {
+ return 0, false, nil
+ }
+ modes := make([]C.VkPresentModeKHR, count)
+ err = vkErr(C.vkGetPhysicalDeviceSurfacePresentModesKHR(funcs.vkGetPhysicalDeviceSurfacePresentModesKHR, pd, surf, &count, &modes[0]))
+ if err != nil {
+ return 0, false, fmt.Errorf("vulkan: kGetPhysicalDeviceSurfacePresentModesKHR: %w", err)
+ }
+ for _, m := range modes {
+ if m == C.VK_PRESENT_MODE_MAILBOX_KHR || m == C.VK_PRESENT_MODE_FIFO_KHR {
+ return m, true, nil
+ }
+ }
+ return 0, false, nil
+}
+
+func chooseFormat(pd C.VkPhysicalDevice, surf Surface) (C.VkSurfaceFormatKHR, bool, error) {
+ var count C.uint32_t
+ err := vkErr(C.vkGetPhysicalDeviceSurfaceFormatsKHR(funcs.vkGetPhysicalDeviceSurfaceFormatsKHR, pd, surf, &count, nil))
+ if err != nil {
+ return C.VkSurfaceFormatKHR{}, false, fmt.Errorf("vulkan: vkGetPhysicalDeviceSurfaceFormatsKHR: %w", err)
+ }
+ if count == 0 {
+ return C.VkSurfaceFormatKHR{}, false, nil
+ }
+ formats := make([]C.VkSurfaceFormatKHR, count)
+ err = vkErr(C.vkGetPhysicalDeviceSurfaceFormatsKHR(funcs.vkGetPhysicalDeviceSurfaceFormatsKHR, pd, surf, &count, &formats[0]))
+ if err != nil {
+ return C.VkSurfaceFormatKHR{}, false, fmt.Errorf("vulkan: vkGetPhysicalDeviceSurfaceFormatsKHR: %w", err)
+ }
+ // Query for format with sRGB support.
+ // TODO: Support devices without sRGB.
+ for _, f := range formats {
+ if f.colorSpace != C.VK_COLOR_SPACE_SRGB_NONLINEAR_KHR {
+ continue
+ }
+ switch f.format {
+ case C.VK_FORMAT_B8G8R8A8_SRGB, C.VK_FORMAT_R8G8B8A8_SRGB:
+ return f, true, nil
+ }
+ }
+ return C.VkSurfaceFormatKHR{}, false, nil
+}
+
+func chooseQueue(pd C.VkPhysicalDevice, surf Surface, flags C.VkQueueFlags) (int, bool, error) {
+ queues := GetPhysicalDeviceQueueFamilyProperties(pd)
+ for i, q := range queues {
+ // Check for presentation and feature support.
+ if q.queueFlags&flags != flags {
+ continue
+ }
+ if surf != nilSurface {
+ // Check for presentation support. It is possible that a device has no
+ // queue with both rendering and presentation support, but not in reality.
+ // See https://github.com/KhronosGroup/Vulkan-Docs/issues/1234.
+ var support C.VkBool32
+ if err := vkErr(C.vkGetPhysicalDeviceSurfaceSupportKHR(funcs.vkGetPhysicalDeviceSurfaceSupportKHR, pd, C.uint32_t(i), surf, &support)); err != nil {
+ return 0, false, fmt.Errorf("vulkan: vkGetPhysicalDeviceSurfaceSupportKHR: %w", err)
+ }
+ if support != C.VK_TRUE {
+ continue
+ }
+ }
+ return i, true, nil
+ }
+ return 0, false, nil
+}
+
+func dlsym(handle unsafe.Pointer, s string) unsafe.Pointer {
+ cs := C.CString(s)
+ defer C.free(unsafe.Pointer(cs))
+ return C.dlsym(handle, cs)
+}
+
+func dlopen(lib string) unsafe.Pointer {
+ clib := C.CString(lib)
+ defer C.free(unsafe.Pointer(clib))
+ return C.dlopen(clib, C.RTLD_NOW|C.RTLD_LOCAL)
+}
+
+func vkErr(res C.VkResult) error {
+ switch res {
+ case C.VK_SUCCESS:
+ return nil
+ default:
+ return Error(res)
+ }
+}
+
+func (e Error) Error() string {
+ return fmt.Sprintf("error %d", e)
+}
diff --git a/vendor/gioui.org/internal/vk/vulkan_android.go b/vendor/gioui.org/internal/vk/vulkan_android.go
new file mode 100644
index 0000000..143146e
--- /dev/null
+++ b/vendor/gioui.org/internal/vk/vulkan_android.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+//go:build !nowayland
+// +build !nowayland
+
+package vk
+
+/*
+#define VK_USE_PLATFORM_ANDROID_KHR
+#define VK_NO_PROTOTYPES 1
+#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object;
+#include
+#include
+
+static VkResult vkCreateAndroidSurfaceKHR(PFN_vkCreateAndroidSurfaceKHR f, VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
+ return f(instance, pCreateInfo, pAllocator, pSurface);
+}
+*/
+import "C"
+import (
+ "fmt"
+ "unsafe"
+)
+
+var wlFuncs struct {
+ vkCreateAndroidSurfaceKHR C.PFN_vkCreateAndroidSurfaceKHR
+}
+
+func init() {
+ loadFuncs = append(loadFuncs, func(dlopen func(name string) *[0]byte) {
+ wlFuncs.vkCreateAndroidSurfaceKHR = dlopen("vkCreateAndroidSurfaceKHR")
+ })
+}
+
+func CreateAndroidSurface(inst Instance, window unsafe.Pointer) (Surface, error) {
+ inf := C.VkAndroidSurfaceCreateInfoKHR{
+ sType: C.VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR,
+ window: (*C.ANativeWindow)(window),
+ }
+ var surf Surface
+ if err := vkErr(C.vkCreateAndroidSurfaceKHR(wlFuncs.vkCreateAndroidSurfaceKHR, inst, &inf, nil, &surf)); err != nil {
+ return 0, fmt.Errorf("vulkan: vkCreateAndroidSurfaceKHR: %w", err)
+ }
+ return surf, nil
+}
diff --git a/vendor/gioui.org/internal/vk/vulkan_wayland.go b/vendor/gioui.org/internal/vk/vulkan_wayland.go
new file mode 100644
index 0000000..cb057bc
--- /dev/null
+++ b/vendor/gioui.org/internal/vk/vulkan_wayland.go
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+//go:build ((linux && !android) || freebsd) && !nowayland
+// +build linux,!android freebsd
+// +build !nowayland
+
+package vk
+
+/*
+#define VK_USE_PLATFORM_WAYLAND_KHR
+#define VK_NO_PROTOTYPES 1
+#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object;
+#include
+
+static VkResult vkCreateWaylandSurfaceKHR(PFN_vkCreateWaylandSurfaceKHR f, VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
+ return f(instance, pCreateInfo, pAllocator, pSurface);
+}
+*/
+import "C"
+import (
+ "fmt"
+ "unsafe"
+)
+
+var wlFuncs struct {
+ vkCreateWaylandSurfaceKHR C.PFN_vkCreateWaylandSurfaceKHR
+}
+
+func init() {
+ loadFuncs = append(loadFuncs, func(dlopen func(name string) *[0]byte) {
+ wlFuncs.vkCreateWaylandSurfaceKHR = dlopen("vkCreateWaylandSurfaceKHR")
+ })
+}
+
+func CreateWaylandSurface(inst Instance, disp unsafe.Pointer, wlSurf unsafe.Pointer) (Surface, error) {
+ inf := C.VkWaylandSurfaceCreateInfoKHR{
+ sType: C.VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR,
+ display: (*C.struct_wl_display)(disp),
+ surface: (*C.struct_wl_surface)(wlSurf),
+ }
+ var surf Surface
+ if err := vkErr(C.vkCreateWaylandSurfaceKHR(wlFuncs.vkCreateWaylandSurfaceKHR, inst, &inf, nil, &surf)); err != nil {
+ return 0, fmt.Errorf("vulkan: vkCreateWaylandSurfaceKHR: %w", err)
+ }
+ return surf, nil
+}
diff --git a/vendor/gioui.org/internal/vk/vulkan_x11.go b/vendor/gioui.org/internal/vk/vulkan_x11.go
new file mode 100644
index 0000000..780a5d5
--- /dev/null
+++ b/vendor/gioui.org/internal/vk/vulkan_x11.go
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+//go:build ((linux && !android) || freebsd) && !nox11
+// +build linux,!android freebsd
+// +build !nox11
+
+package vk
+
+/*
+#define VK_USE_PLATFORM_XLIB_KHR
+#define VK_NO_PROTOTYPES 1
+#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object;
+#include
+
+static VkResult vkCreateXlibSurfaceKHR(PFN_vkCreateXlibSurfaceKHR f, VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
+ return f(instance, pCreateInfo, pAllocator, pSurface);
+}
+*/
+import "C"
+import (
+ "fmt"
+ "unsafe"
+)
+
+var x11Funcs struct {
+ vkCreateXlibSurfaceKHR C.PFN_vkCreateXlibSurfaceKHR
+}
+
+func init() {
+ loadFuncs = append(loadFuncs, func(dlopen func(name string) *[0]byte) {
+ x11Funcs.vkCreateXlibSurfaceKHR = dlopen("vkCreateXlibSurfaceKHR")
+ })
+}
+
+func CreateXlibSurface(inst Instance, dpy unsafe.Pointer, window uintptr) (Surface, error) {
+ inf := C.VkXlibSurfaceCreateInfoKHR{
+ sType: C.VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR,
+ dpy: (*C.Display)(dpy),
+ window: (C.Window)(window),
+ }
+ var surf Surface
+ if err := vkErr(C.vkCreateXlibSurfaceKHR(x11Funcs.vkCreateXlibSurfaceKHR, inst, &inf, nil, &surf)); err != nil {
+ return 0, fmt.Errorf("vulkan: vkCreateXlibSurfaceKHR: %w", err)
+ }
+ return surf, nil
+}
diff --git a/vendor/gioui.org/io/clipboard/clipboard.go b/vendor/gioui.org/io/clipboard/clipboard.go
new file mode 100644
index 0000000..ae4a435
--- /dev/null
+++ b/vendor/gioui.org/io/clipboard/clipboard.go
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package clipboard
+
+import (
+ "gioui.org/internal/ops"
+ "gioui.org/io/event"
+ "gioui.org/op"
+)
+
+// Event is generated when the clipboard content is requested.
+type Event struct {
+ Text string
+}
+
+// ReadOp requests the text of the clipboard, delivered to
+// the current handler through an Event.
+type ReadOp struct {
+ Tag event.Tag
+}
+
+// WriteOp copies Text to the clipboard.
+type WriteOp struct {
+ Text string
+}
+
+func (h ReadOp) Add(o *op.Ops) {
+ data := ops.Write1(&o.Internal, ops.TypeClipboardReadLen, h.Tag)
+ data[0] = byte(ops.TypeClipboardRead)
+}
+
+func (h WriteOp) Add(o *op.Ops) {
+ data := ops.Write1(&o.Internal, ops.TypeClipboardWriteLen, &h.Text)
+ data[0] = byte(ops.TypeClipboardWrite)
+}
+
+func (Event) ImplementsEvent() {}
diff --git a/vendor/gioui.org/io/event/event.go b/vendor/gioui.org/io/event/event.go
new file mode 100644
index 0000000..08bfbd6
--- /dev/null
+++ b/vendor/gioui.org/io/event/event.go
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+/*
+Package event contains the types for event handling.
+
+The Queue interface is the protocol for receiving external events.
+
+For example:
+
+ var queue event.Queue = ...
+
+ for _, e := range queue.Events(h) {
+ switch e.(type) {
+ ...
+ }
+ }
+
+In general, handlers must be declared before events become
+available. Other packages such as pointer and key provide
+the means for declaring handlers for specific event types.
+
+The following example declares a handler ready for key input:
+
+ import gioui.org/io/key
+
+ ops := new(op.Ops)
+ var h *Handler = ...
+ key.InputOp{Tag: h}.Add(ops)
+
+*/
+package event
+
+// Queue maps an event handler key to the events
+// available to the handler.
+type Queue interface {
+ // Events returns the available events for an
+ // event handler tag.
+ Events(t Tag) []Event
+}
+
+// Tag is the stable identifier for an event handler.
+// For a handler h, the tag is typically &h.
+type Tag interface{}
+
+// Event is the marker interface for events.
+type Event interface {
+ ImplementsEvent()
+}
diff --git a/vendor/gioui.org/io/key/key.go b/vendor/gioui.org/io/key/key.go
new file mode 100644
index 0000000..7402eff
--- /dev/null
+++ b/vendor/gioui.org/io/key/key.go
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+/*
+Package key implements key and text events and operations.
+
+The InputOp operations is used for declaring key input handlers. Use
+an implementation of the Queue interface from package ui to receive
+events.
+*/
+package key
+
+import (
+ "fmt"
+ "strings"
+
+ "gioui.org/internal/ops"
+ "gioui.org/io/event"
+ "gioui.org/op"
+)
+
+// InputOp declares a handler ready for key events.
+// Key events are in general only delivered to the
+// focused key handler.
+type InputOp struct {
+ Tag event.Tag
+ Hint InputHint
+}
+
+// SoftKeyboardOp shows or hide the on-screen keyboard, if available.
+// It replaces any previous SoftKeyboardOp.
+type SoftKeyboardOp struct {
+ Show bool
+}
+
+// FocusOp sets or clears the keyboard focus. It replaces any previous
+// FocusOp in the same frame.
+type FocusOp struct {
+ // Tag is the new focus. The focus is cleared if Tag is nil, or if Tag
+ // has no InputOp in the same frame.
+ Tag event.Tag
+}
+
+// A FocusEvent is generated when a handler gains or loses
+// focus.
+type FocusEvent struct {
+ Focus bool
+}
+
+// An Event is generated when a key is pressed. For text input
+// use EditEvent.
+type Event struct {
+ // Name of the key. For letters, the upper case form is used, via
+ // unicode.ToUpper. The shift modifier is taken into account, all other
+ // modifiers are ignored. For example, the "shift-1" and "ctrl-shift-1"
+ // combinations both give the Name "!" with the US keyboard layout.
+ Name string
+ // Modifiers is the set of active modifiers when the key was pressed.
+ Modifiers Modifiers
+ // State is the state of the key when the event was fired.
+ State State
+}
+
+// An EditEvent is generated when text is input.
+type EditEvent struct {
+ Text string
+}
+
+// InputHint changes the on-screen-keyboard type. That hints the
+// type of data that might be entered by the user.
+type InputHint uint8
+
+const (
+ // HintAny hints that any input is expected.
+ HintAny InputHint = iota
+ // HintText hints that text input is expected. It may activate auto-correction and suggestions.
+ HintText
+ // HintNumeric hints that numeric input is expected. It may activate shortcuts for 0-9, "." and ",".
+ HintNumeric
+ // HintEmail hints that email input is expected. It may activate shortcuts for common email characters, such as "@" and ".com".
+ HintEmail
+ // HintURL hints that URL input is expected. It may activate shortcuts for common URL fragments such as "/" and ".com".
+ HintURL
+ // HintTelephone hints that telephone number input is expected. It may activate shortcuts for 0-9, "#" and "*".
+ HintTelephone
+)
+
+// State is the state of a key during an event.
+type State uint8
+
+const (
+ // Press is the state of a pressed key.
+ Press State = iota
+ // Release is the state of a key that has been released.
+ //
+ // Note: release events are only implemented on the following platforms:
+ // macOS, Linux, Windows, WebAssembly.
+ Release
+)
+
+// Modifiers
+type Modifiers uint32
+
+const (
+ // ModCtrl is the ctrl modifier key.
+ ModCtrl Modifiers = 1 << iota
+ // ModCommand is the command modifier key
+ // found on Apple keyboards.
+ ModCommand
+ // ModShift is the shift modifier key.
+ ModShift
+ // ModAlt is the alt modifier key, or the option
+ // key on Apple keyboards.
+ ModAlt
+ // ModSuper is the "logo" modifier key, often
+ // represented by a Windows logo.
+ ModSuper
+)
+
+const (
+ // Names for special keys.
+ NameLeftArrow = "←"
+ NameRightArrow = "→"
+ NameUpArrow = "↑"
+ NameDownArrow = "↓"
+ NameReturn = "⏎"
+ NameEnter = "⌤"
+ NameEscape = "⎋"
+ NameHome = "⇱"
+ NameEnd = "⇲"
+ NameDeleteBackward = "⌫"
+ NameDeleteForward = "⌦"
+ NamePageUp = "⇞"
+ NamePageDown = "⇟"
+ NameTab = "⇥"
+ NameSpace = "Space"
+)
+
+// Contain reports whether m contains all modifiers
+// in m2.
+func (m Modifiers) Contain(m2 Modifiers) bool {
+ return m&m2 == m2
+}
+
+func (h InputOp) Add(o *op.Ops) {
+ if h.Tag == nil {
+ panic("Tag must be non-nil")
+ }
+ data := ops.Write1(&o.Internal, ops.TypeKeyInputLen, h.Tag)
+ data[0] = byte(ops.TypeKeyInput)
+ data[1] = byte(h.Hint)
+}
+
+func (h SoftKeyboardOp) Add(o *op.Ops) {
+ data := ops.Write(&o.Internal, ops.TypeKeySoftKeyboardLen)
+ data[0] = byte(ops.TypeKeySoftKeyboard)
+ if h.Show {
+ data[1] = 1
+ }
+}
+
+func (h FocusOp) Add(o *op.Ops) {
+ data := ops.Write1(&o.Internal, ops.TypeKeyFocusLen, h.Tag)
+ data[0] = byte(ops.TypeKeyFocus)
+}
+
+func (EditEvent) ImplementsEvent() {}
+func (Event) ImplementsEvent() {}
+func (FocusEvent) ImplementsEvent() {}
+
+func (e Event) String() string {
+ return fmt.Sprintf("%v %v %v}", e.Name, e.Modifiers, e.State)
+}
+
+func (m Modifiers) String() string {
+ var strs []string
+ if m.Contain(ModCtrl) {
+ strs = append(strs, "ModCtrl")
+ }
+ if m.Contain(ModCommand) {
+ strs = append(strs, "ModCommand")
+ }
+ if m.Contain(ModShift) {
+ strs = append(strs, "ModShift")
+ }
+ if m.Contain(ModAlt) {
+ strs = append(strs, "ModAlt")
+ }
+ if m.Contain(ModSuper) {
+ strs = append(strs, "ModSuper")
+ }
+ return strings.Join(strs, "|")
+}
+
+func (s State) String() string {
+ switch s {
+ case Press:
+ return "Press"
+ case Release:
+ return "Release"
+ default:
+ panic("invalid State")
+ }
+}
diff --git a/vendor/gioui.org/io/key/mod.go b/vendor/gioui.org/io/key/mod.go
new file mode 100644
index 0000000..4b23d32
--- /dev/null
+++ b/vendor/gioui.org/io/key/mod.go
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+//go:build !darwin
+// +build !darwin
+
+package key
+
+// ModShortcut is the platform's shortcut modifier, usually the Ctrl
+// key. On Apple platforms it is the Cmd key.
+const ModShortcut = ModCtrl
diff --git a/vendor/gioui.org/io/key/mod_darwin.go b/vendor/gioui.org/io/key/mod_darwin.go
new file mode 100644
index 0000000..c0f1437
--- /dev/null
+++ b/vendor/gioui.org/io/key/mod_darwin.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package key
+
+// ModShortcut is the platform's shortcut modifier, usually the Ctrl
+// key. On Apple platforms it is the Cmd key.
+const ModShortcut = ModCommand
diff --git a/vendor/gioui.org/io/pointer/doc.go b/vendor/gioui.org/io/pointer/doc.go
new file mode 100644
index 0000000..2f521c2
--- /dev/null
+++ b/vendor/gioui.org/io/pointer/doc.go
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+/*
+Package pointer implements pointer events and operations.
+A pointer is either a mouse controlled cursor or a touch
+object such as a finger.
+
+The InputOp operation is used to declare a handler ready for pointer
+events. Use an event.Queue to receive events.
+
+Types
+
+Only events that match a specified list of types are delivered to a handler.
+
+For example, to receive Press, Drag, and Release events (but not Move, Enter,
+Leave, or Scroll):
+
+ var ops op.Ops
+ var h *Handler = ...
+
+ pointer.InputOp{
+ Tag: h,
+ Types: pointer.Press | pointer.Drag | pointer.Release,
+ }.Add(ops)
+
+Cancel events are always delivered.
+
+Hit areas
+
+Clip operations from package op/clip are used for specifying
+hit areas where subsequent InputOps are active.
+
+For example, to set up a handler with a rectangular hit area:
+
+ r := image.Rectangle{...}
+ area := clip.Rect(r).Push(ops)
+ pointer.InputOp{Tag: h}.Add(ops)
+ area.Pop()
+
+Note that hit areas behave similar to painting: the effective area of a stack
+of multiple area operations is the intersection of the areas.
+
+BUG: Clip operations other than clip.Rect and clip.Ellipse are approximated
+with their bounding boxes.
+
+Matching events
+
+Areas form an implicit tree, with input handlers as leaves. The children of
+an area is every area and handler added between its Push and corresponding Pop.
+
+For example:
+
+ ops := new(op.Ops)
+ var h1, h2 *Handler
+
+ area := clip.Rect(...).Push(ops)
+ pointer.InputOp{Tag: h1}.Add(Ops)
+ area.Pop()
+
+ area := clip.Rect(...).Push(ops)
+ pointer.InputOp{Tag: h2}.Add(ops)
+ area.Pop()
+
+implies a tree of two inner nodes, each with one pointer handler attached.
+
+The matching proceeds as follows.
+
+First, the foremost area that contains the event is found. Only areas whose
+parent areas all contain the event is considered.
+
+Then, every handler attached to the area is matched with the event.
+
+If all attached handlers are marked pass-through or if no handlers are
+attached, the matching repeats with the next foremost (sibling) area. Otherwise
+the matching repeats with the parent area.
+
+In the example above, all events will go to h2 because it and h1 are siblings
+and none are pass-through.
+
+Pass-through
+
+The PassOp operations controls the pass-through setting. All handlers added
+inside one or more PassOp scopes are marked pass-through.
+
+Pass-through is useful for overlay widgets. Consider a hidden side drawer: when
+the user touches the side, both the (transparent) drawer handle and the
+interface below should receive pointer events. This effect is achieved by
+marking the drawer handle pass-through.
+
+Disambiguation
+
+When more than one handler matches a pointer event, the event queue
+follows a set of rules for distributing the event.
+
+As long as the pointer has not received a Press event, all
+matching handlers receive all events.
+
+When a pointer is pressed, the set of matching handlers is
+recorded. The set is not updated according to the pointer position
+and hit areas. Rather, handlers stay in the matching set until they
+no longer appear in a InputOp or when another handler in the set
+grabs the pointer.
+
+A handler can exclude all other handler from its matching sets
+by setting the Grab flag in its InputOp. The Grab flag is sticky
+and stays in effect until the handler no longer appears in any
+matching sets.
+
+The losing handlers are notified by a Cancel event.
+
+For multiple grabbing handlers, the foremost handler wins.
+
+Priorities
+
+Handlers know their position in a matching set of a pointer through
+event priorities. The Shared priority is for matching sets with
+multiple handlers; the Grabbed priority indicate exclusive access.
+
+Priorities are useful for deferred gesture matching.
+
+Consider a scrollable list of clickable elements. When the user touches an
+element, it is unknown whether the gesture is a click on the element
+or a drag (scroll) of the list. While the click handler might light up
+the element in anticipation of a click, the scrolling handler does not
+scroll on finger movements with lower than Grabbed priority.
+
+Should the user release the finger, the click handler registers a click.
+
+However, if the finger moves beyond a threshold, the scrolling handler
+determines that the gesture is a drag and sets its Grab flag. The
+click handler receives a Cancel (removing the highlight) and further
+movements for the scroll handler has priority Grabbed, scrolling the
+list.
+*/
+package pointer
diff --git a/vendor/gioui.org/io/pointer/pointer.go b/vendor/gioui.org/io/pointer/pointer.go
new file mode 100644
index 0000000..476878f
--- /dev/null
+++ b/vendor/gioui.org/io/pointer/pointer.go
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package pointer
+
+import (
+ "encoding/binary"
+ "fmt"
+ "image"
+ "strings"
+ "time"
+
+ "gioui.org/f32"
+ "gioui.org/internal/ops"
+ "gioui.org/io/event"
+ "gioui.org/io/key"
+ "gioui.org/op"
+ "gioui.org/op/clip"
+)
+
+// Event is a pointer event.
+type Event struct {
+ Type Type
+ Source Source
+ // PointerID is the id for the pointer and can be used
+ // to track a particular pointer from Press to
+ // Release or Cancel.
+ PointerID ID
+ // Priority is the priority of the receiving handler
+ // for this event.
+ Priority Priority
+ // Time is when the event was received. The
+ // timestamp is relative to an undefined base.
+ Time time.Duration
+ // Buttons are the set of pressed mouse buttons for this event.
+ Buttons Buttons
+ // Position is the position of the event, relative to
+ // the current transformation, as set by op.TransformOp.
+ Position f32.Point
+ // Scroll is the scroll amount, if any.
+ Scroll f32.Point
+ // Modifiers is the set of active modifiers when
+ // the mouse button was pressed.
+ Modifiers key.Modifiers
+}
+
+// PassOp sets the pass-through mode. InputOps added while the pass-through
+// mode is set don't block events to siblings.
+type PassOp struct {
+}
+
+// PassStack represents a PassOp on the pass stack.
+type PassStack struct {
+ ops *ops.Ops
+ id ops.StackID
+ macroID int
+}
+
+// CursorNameOp sets the cursor for the current area.
+type CursorNameOp struct {
+ Name CursorName
+}
+
+// InputOp declares an input handler ready for pointer
+// events.
+type InputOp struct {
+ Tag event.Tag
+ // Grab, if set, request that the handler get
+ // Grabbed priority.
+ Grab bool
+ // Types is a bitwise-or of event types to receive.
+ Types Type
+ // ScrollBounds describe the maximum scrollable distances in both
+ // axes. Specifically, any Event e delivered to Tag will satisfy
+ //
+ // ScrollBounds.Min.X <= e.Scroll.X <= ScrollBounds.Max.X (horizontal axis)
+ // ScrollBounds.Min.Y <= e.Scroll.Y <= ScrollBounds.Max.Y (vertical axis)
+ ScrollBounds image.Rectangle
+}
+
+type ID uint16
+
+// Type of an Event.
+type Type uint
+
+// Priority of an Event.
+type Priority uint8
+
+// Source of an Event.
+type Source uint8
+
+// Buttons is a set of mouse buttons
+type Buttons uint8
+
+// CursorName is the name of a cursor.
+type CursorName string
+
+const (
+ // CursorDefault is the default cursor.
+ CursorDefault CursorName = ""
+ // CursorText is the cursor for text.
+ CursorText CursorName = "text"
+ // CursorPointer is the cursor for a link.
+ CursorPointer CursorName = "pointer"
+ // CursorCrossHair is the cursor for precise location.
+ CursorCrossHair CursorName = "crosshair"
+ // CursorColResize is the cursor for vertical resize.
+ CursorColResize CursorName = "col-resize"
+ // CursorRowResize is the cursor for horizontal resize.
+ CursorRowResize CursorName = "row-resize"
+ // CursorGrab is the cursor for moving object in any direction.
+ CursorGrab CursorName = "grab"
+ // CursorNone hides the cursor. To show it again, use any other cursor.
+ CursorNone CursorName = "none"
+)
+
+const (
+ // A Cancel event is generated when the current gesture is
+ // interrupted by other handlers or the system.
+ Cancel Type = (1 << iota) >> 1
+ // Press of a pointer.
+ Press
+ // Release of a pointer.
+ Release
+ // Move of a pointer.
+ Move
+ // Drag of a pointer.
+ Drag
+ // Pointer enters an area watching for pointer input
+ Enter
+ // Pointer leaves an area watching for pointer input
+ Leave
+ // Scroll of a pointer.
+ Scroll
+)
+
+const (
+ // Mouse generated event.
+ Mouse Source = iota
+ // Touch generated event.
+ Touch
+)
+
+const (
+ // Shared priority is for handlers that
+ // are part of a matching set larger than 1.
+ Shared Priority = iota
+ // Foremost priority is like Shared, but the
+ // handler is the foremost of the matching set.
+ Foremost
+ // Grabbed is used for matching sets of size 1.
+ Grabbed
+)
+
+const (
+ // ButtonPrimary is the primary button, usually the left button for a
+ // right-handed user.
+ ButtonPrimary Buttons = 1 << iota
+ // ButtonSecondary is the secondary button, usually the right button for a
+ // right-handed user.
+ ButtonSecondary
+ // ButtonTertiary is the tertiary button, usually the middle button.
+ ButtonTertiary
+)
+
+// Rect constructs a rectangular hit area.
+//
+// Deprecated: use clip.Rect instead.
+func Rect(size image.Rectangle) clip.Op {
+ return clip.Rect(size).Op()
+}
+
+// Ellipse constructs an ellipsoid hit area.
+//
+// Deprecated: use clip.Ellipse instead.
+func Ellipse(size image.Rectangle) clip.Ellipse {
+ return clip.Ellipse(frect(size))
+}
+
+// frect converts a rectangle to a f32.Rectangle.
+func frect(r image.Rectangle) f32.Rectangle {
+ return f32.Rectangle{
+ Min: fpt(r.Min), Max: fpt(r.Max),
+ }
+}
+
+// fpt converts an point to a f32.Point.
+func fpt(p image.Point) f32.Point {
+ return f32.Point{
+ X: float32(p.X), Y: float32(p.Y),
+ }
+}
+
+// Push the current pass mode to the pass stack and set the pass mode.
+func (p PassOp) Push(o *op.Ops) PassStack {
+ id, mid := ops.PushOp(&o.Internal, ops.PassStack)
+ data := ops.Write(&o.Internal, ops.TypePassLen)
+ data[0] = byte(ops.TypePass)
+ return PassStack{ops: &o.Internal, id: id, macroID: mid}
+}
+
+func (p PassStack) Pop() {
+ ops.PopOp(p.ops, ops.PassStack, p.id, p.macroID)
+ data := ops.Write(p.ops, ops.TypePopPassLen)
+ data[0] = byte(ops.TypePopPass)
+}
+
+func (op CursorNameOp) Add(o *op.Ops) {
+ data := ops.Write1(&o.Internal, ops.TypeCursorLen, op.Name)
+ data[0] = byte(ops.TypeCursor)
+}
+
+// Add panics if the scroll range does not contain zero.
+func (op InputOp) Add(o *op.Ops) {
+ if op.Tag == nil {
+ panic("Tag must be non-nil")
+ }
+ if b := op.ScrollBounds; b.Min.X > 0 || b.Max.X < 0 || b.Min.Y > 0 || b.Max.Y < 0 {
+ panic(fmt.Errorf("invalid scroll range value %v", b))
+ }
+ if op.Types>>16 > 0 {
+ panic(fmt.Errorf("value in Types overflows uint16"))
+ }
+ data := ops.Write1(&o.Internal, ops.TypePointerInputLen, op.Tag)
+ data[0] = byte(ops.TypePointerInput)
+ if op.Grab {
+ data[1] = 1
+ }
+ bo := binary.LittleEndian
+ bo.PutUint16(data[2:], uint16(op.Types))
+ bo.PutUint32(data[4:], uint32(op.ScrollBounds.Min.X))
+ bo.PutUint32(data[8:], uint32(op.ScrollBounds.Min.Y))
+ bo.PutUint32(data[12:], uint32(op.ScrollBounds.Max.X))
+ bo.PutUint32(data[16:], uint32(op.ScrollBounds.Max.Y))
+}
+
+func (t Type) String() string {
+ if t == Cancel {
+ return "Cancel"
+ }
+ var buf strings.Builder
+ for tt := Type(1); tt > 0; tt <<= 1 {
+ if t&tt > 0 {
+ if buf.Len() > 0 {
+ buf.WriteByte('|')
+ }
+ buf.WriteString((t & tt).string())
+ }
+ }
+ return buf.String()
+}
+
+func (t Type) string() string {
+ switch t {
+ case Press:
+ return "Press"
+ case Release:
+ return "Release"
+ case Cancel:
+ return "Cancel"
+ case Move:
+ return "Move"
+ case Drag:
+ return "Drag"
+ case Enter:
+ return "Enter"
+ case Leave:
+ return "Leave"
+ case Scroll:
+ return "Scroll"
+ default:
+ panic("unknown Type")
+ }
+}
+
+func (p Priority) String() string {
+ switch p {
+ case Shared:
+ return "Shared"
+ case Foremost:
+ return "Foremost"
+ case Grabbed:
+ return "Grabbed"
+ default:
+ panic("unknown priority")
+ }
+}
+
+func (s Source) String() string {
+ switch s {
+ case Mouse:
+ return "Mouse"
+ case Touch:
+ return "Touch"
+ default:
+ panic("unknown source")
+ }
+}
+
+// Contain reports whether the set b contains
+// all of the buttons.
+func (b Buttons) Contain(buttons Buttons) bool {
+ return b&buttons == buttons
+}
+
+func (b Buttons) String() string {
+ var strs []string
+ if b.Contain(ButtonPrimary) {
+ strs = append(strs, "ButtonPrimary")
+ }
+ if b.Contain(ButtonSecondary) {
+ strs = append(strs, "ButtonSecondary")
+ }
+ if b.Contain(ButtonTertiary) {
+ strs = append(strs, "ButtonTertiary")
+ }
+ return strings.Join(strs, "|")
+}
+
+func (c CursorName) String() string {
+ if c == CursorDefault {
+ return "default"
+ }
+ return string(c)
+}
+
+func (Event) ImplementsEvent() {}
diff --git a/vendor/gioui.org/io/profile/profile.go b/vendor/gioui.org/io/profile/profile.go
new file mode 100644
index 0000000..b9a4476
--- /dev/null
+++ b/vendor/gioui.org/io/profile/profile.go
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+// Package profiles provides access to rendering
+// profiles.
+package profile
+
+import (
+ "gioui.org/internal/ops"
+ "gioui.org/io/event"
+ "gioui.org/op"
+)
+
+// Op registers a handler for receiving
+// Events.
+type Op struct {
+ Tag event.Tag
+}
+
+// Event contains profile data from a single
+// rendered frame.
+type Event struct {
+ // Timings. Very likely to change.
+ Timings string
+}
+
+func (p Op) Add(o *op.Ops) {
+ data := ops.Write1(&o.Internal, ops.TypeProfileLen, p.Tag)
+ data[0] = byte(ops.TypeProfile)
+}
+
+func (p Event) ImplementsEvent() {}
diff --git a/vendor/gioui.org/io/router/clipboard.go b/vendor/gioui.org/io/router/clipboard.go
new file mode 100644
index 0000000..5f1623c
--- /dev/null
+++ b/vendor/gioui.org/io/router/clipboard.go
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package router
+
+import (
+ "gioui.org/io/event"
+)
+
+type clipboardQueue struct {
+ receivers map[event.Tag]struct{}
+ // request avoid read clipboard every frame while waiting.
+ requested bool
+ text *string
+}
+
+// WriteClipboard returns the most recent text to be copied
+// to the clipboard, if any.
+func (q *clipboardQueue) WriteClipboard() (string, bool) {
+ if q.text == nil {
+ return "", false
+ }
+ text := *q.text
+ q.text = nil
+ return text, true
+}
+
+// ReadClipboard reports if any new handler is waiting
+// to read the clipboard.
+func (q *clipboardQueue) ReadClipboard() bool {
+ if len(q.receivers) == 0 || q.requested {
+ return false
+ }
+ q.requested = true
+ return true
+}
+
+func (q *clipboardQueue) Push(e event.Event, events *handlerEvents) {
+ for r := range q.receivers {
+ events.Add(r, e)
+ delete(q.receivers, r)
+ }
+}
+
+func (q *clipboardQueue) ProcessWriteClipboard(refs []interface{}) {
+ q.text = refs[0].(*string)
+}
+
+func (q *clipboardQueue) ProcessReadClipboard(refs []interface{}) {
+ if q.receivers == nil {
+ q.receivers = make(map[event.Tag]struct{})
+ }
+ tag := refs[0].(event.Tag)
+ if _, ok := q.receivers[tag]; !ok {
+ q.receivers[tag] = struct{}{}
+ q.requested = false
+ }
+}
diff --git a/vendor/gioui.org/io/router/key.go b/vendor/gioui.org/io/router/key.go
new file mode 100644
index 0000000..9fef7dc
--- /dev/null
+++ b/vendor/gioui.org/io/router/key.go
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package router
+
+import (
+ "gioui.org/io/event"
+ "gioui.org/io/key"
+)
+
+type TextInputState uint8
+
+type keyQueue struct {
+ focus event.Tag
+ handlers map[event.Tag]*keyHandler
+ state TextInputState
+ hint key.InputHint
+}
+
+type keyHandler struct {
+ // visible will be true if the InputOp is present
+ // in the current frame.
+ visible bool
+ new bool
+ hint key.InputHint
+}
+
+// keyCollector tracks state required to update a keyQueue
+// from key ops.
+type keyCollector struct {
+ q *keyQueue
+ focus event.Tag
+ changed bool
+}
+
+const (
+ TextInputKeep TextInputState = iota
+ TextInputClose
+ TextInputOpen
+)
+
+// InputState returns the last text input state as
+// determined in Frame.
+func (q *keyQueue) InputState() TextInputState {
+ return q.state
+}
+
+// InputHint returns the input mode from the most recent key.InputOp.
+func (q *keyQueue) InputHint() (key.InputHint, bool) {
+ if q.focus == nil {
+ return q.hint, false
+ }
+ focused, ok := q.handlers[q.focus]
+ if !ok {
+ return q.hint, false
+ }
+ old := q.hint
+ q.hint = focused.hint
+ return q.hint, old != q.hint
+}
+
+func (q *keyQueue) Reset() {
+ if q.handlers == nil {
+ q.handlers = make(map[event.Tag]*keyHandler)
+ }
+ for _, h := range q.handlers {
+ h.visible, h.new = false, false
+ }
+ q.state = TextInputKeep
+}
+
+func (q *keyQueue) Frame(events *handlerEvents, collector keyCollector) {
+ for k, h := range q.handlers {
+ if !h.visible {
+ delete(q.handlers, k)
+ if q.focus == k {
+ // Remove the focus from the handler that is no longer visible.
+ q.focus = nil
+ q.state = TextInputClose
+ }
+ } else if h.new && k != collector.focus {
+ // Reset the handler on (each) first appearance, but don't trigger redraw.
+ events.AddNoRedraw(k, key.FocusEvent{Focus: false})
+ }
+ }
+ if collector.changed && collector.focus != nil {
+ if _, exists := q.handlers[collector.focus]; !exists {
+ collector.focus = nil
+ }
+ }
+ if collector.changed && collector.focus != q.focus {
+ if q.focus != nil {
+ events.Add(q.focus, key.FocusEvent{Focus: false})
+ }
+ q.focus = collector.focus
+ if q.focus != nil {
+ events.Add(q.focus, key.FocusEvent{Focus: true})
+ } else {
+ q.state = TextInputClose
+ }
+ }
+}
+
+func (q *keyQueue) Push(e event.Event, events *handlerEvents) {
+ if q.focus != nil {
+ events.Add(q.focus, e)
+ }
+}
+
+func (k *keyCollector) focusOp(tag event.Tag) {
+ k.focus = tag
+ k.changed = true
+}
+
+func (k *keyCollector) softKeyboard(show bool) {
+ if show {
+ k.q.state = TextInputOpen
+ } else {
+ k.q.state = TextInputClose
+ }
+}
+
+func (k *keyCollector) inputOp(op key.InputOp) {
+ h, ok := k.q.handlers[op.Tag]
+ if !ok {
+ h = &keyHandler{new: true}
+ k.q.handlers[op.Tag] = h
+ }
+ h.visible = true
+ h.hint = op.Hint
+}
+
+func (t TextInputState) String() string {
+ switch t {
+ case TextInputKeep:
+ return "Keep"
+ case TextInputClose:
+ return "Close"
+ case TextInputOpen:
+ return "Open"
+ default:
+ panic("unexpected value")
+ }
+}
diff --git a/vendor/gioui.org/io/router/pointer.go b/vendor/gioui.org/io/router/pointer.go
new file mode 100644
index 0000000..2cef1cd
--- /dev/null
+++ b/vendor/gioui.org/io/router/pointer.go
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package router
+
+import (
+ "image"
+
+ "gioui.org/f32"
+ "gioui.org/internal/ops"
+ "gioui.org/io/event"
+ "gioui.org/io/pointer"
+ "gioui.org/io/semantic"
+)
+
+type pointerQueue struct {
+ hitTree []hitNode
+ areas []areaNode
+ cursors []cursorNode
+ cursor pointer.CursorName
+ handlers map[event.Tag]*pointerHandler
+ pointers []pointerInfo
+
+ scratch []event.Tag
+
+ semantic struct {
+ idsAssigned bool
+ lastID SemanticID
+ // contentIDs maps semantic content to a list of semantic IDs
+ // previously assigned. It is used to maintain stable IDs across
+ // frames.
+ contentIDs map[semanticContent][]semanticID
+ }
+}
+
+type hitNode struct {
+ next int
+ area int
+
+ // For handler nodes.
+ tag event.Tag
+ pass bool
+}
+
+type cursorNode struct {
+ name pointer.CursorName
+ area int
+}
+
+type pointerInfo struct {
+ id pointer.ID
+ pressed bool
+ handlers []event.Tag
+ // last tracks the last pointer event received,
+ // used while processing frame events.
+ last pointer.Event
+
+ // entered tracks the tags that contain the pointer.
+ entered []event.Tag
+}
+
+type pointerHandler struct {
+ area int
+ active bool
+ wantsGrab bool
+ types pointer.Type
+ // min and max horizontal/vertical scroll
+ scrollRange image.Rectangle
+}
+
+type areaOp struct {
+ kind areaKind
+ rect f32.Rectangle
+}
+
+type areaNode struct {
+ trans f32.Affine2D
+ area areaOp
+
+ // Tree indices, with -1 being the sentinel.
+ parent int
+ firstChild int
+ lastChild int
+ sibling int
+
+ semantic struct {
+ valid bool
+ id SemanticID
+ content semanticContent
+ }
+}
+
+type areaKind uint8
+
+// collectState represents the state for pointerCollector.
+type collectState struct {
+ t f32.Affine2D
+ // nodePlusOne is the current node index, plus one to
+ // make the zero value collectState the initial state.
+ nodePlusOne int
+ pass int
+}
+
+// pointerCollector tracks the state needed to update an pointerQueue
+// from pointer ops.
+type pointerCollector struct {
+ q *pointerQueue
+ state collectState
+ nodeStack []int
+}
+
+type semanticContent struct {
+ tag event.Tag
+ label string
+ desc string
+ class semantic.ClassOp
+ gestures SemanticGestures
+ selected bool
+ disabled bool
+}
+
+type semanticID struct {
+ id SemanticID
+ used bool
+}
+
+const (
+ areaRect areaKind = iota
+ areaEllipse
+)
+
+func (c *pointerCollector) resetState() {
+ c.state = collectState{}
+}
+
+func (c *pointerCollector) setTrans(t f32.Affine2D) {
+ c.state.t = t
+}
+
+func (c *pointerCollector) clip(op ops.ClipOp) {
+ kind := areaRect
+ if op.Shape == ops.Ellipse {
+ kind = areaEllipse
+ }
+ c.pushArea(kind, frect(op.Bounds))
+}
+
+func (c *pointerCollector) pushArea(kind areaKind, bounds f32.Rectangle) {
+ parentID := c.currentArea()
+ areaID := len(c.q.areas)
+ areaOp := areaOp{kind: kind, rect: bounds}
+ if parentID != -1 {
+ parent := &c.q.areas[parentID]
+ if parent.firstChild == -1 {
+ parent.firstChild = areaID
+ }
+ if siblingID := parent.lastChild; siblingID != -1 {
+ c.q.areas[siblingID].sibling = areaID
+ }
+ parent.lastChild = areaID
+ }
+ an := areaNode{
+ trans: c.state.t,
+ area: areaOp,
+ parent: parentID,
+ sibling: -1,
+ firstChild: -1,
+ lastChild: -1,
+ }
+
+ c.q.areas = append(c.q.areas, an)
+ c.nodeStack = append(c.nodeStack, c.state.nodePlusOne-1)
+ c.addHitNode(hitNode{
+ area: areaID,
+ pass: true,
+ })
+}
+
+// frect converts a rectangle to a f32.Rectangle.
+func frect(r image.Rectangle) f32.Rectangle {
+ return f32.Rectangle{
+ Min: fpt(r.Min), Max: fpt(r.Max),
+ }
+}
+
+// fpt converts an point to a f32.Point.
+func fpt(p image.Point) f32.Point {
+ return f32.Point{
+ X: float32(p.X), Y: float32(p.Y),
+ }
+}
+
+func (c *pointerCollector) popArea() {
+ n := len(c.nodeStack)
+ c.state.nodePlusOne = c.nodeStack[n-1] + 1
+ c.nodeStack = c.nodeStack[:n-1]
+}
+
+func (c *pointerCollector) pass() {
+ c.state.pass++
+}
+
+func (c *pointerCollector) popPass() {
+ c.state.pass--
+}
+
+func (c *pointerCollector) currentArea() int {
+ if i := c.state.nodePlusOne - 1; i != -1 {
+ n := c.q.hitTree[i]
+ return n.area
+ }
+ return -1
+}
+
+func (c *pointerCollector) addHitNode(n hitNode) {
+ n.next = c.state.nodePlusOne - 1
+ c.q.hitTree = append(c.q.hitTree, n)
+ c.state.nodePlusOne = len(c.q.hitTree) - 1 + 1
+}
+
+func (c *pointerCollector) inputOp(op pointer.InputOp, events *handlerEvents) {
+ areaID := c.currentArea()
+ area := &c.q.areas[areaID]
+ area.semantic.content.tag = op.Tag
+ if op.Types&(pointer.Press|pointer.Release) != 0 {
+ area.semantic.content.gestures |= ClickGesture
+ }
+ area.semantic.valid = area.semantic.content.gestures != 0
+ c.addHitNode(hitNode{
+ area: areaID,
+ tag: op.Tag,
+ pass: c.state.pass > 0,
+ })
+ h, ok := c.q.handlers[op.Tag]
+ if !ok {
+ h = new(pointerHandler)
+ c.q.handlers[op.Tag] = h
+ // Cancel handlers on (each) first appearance, but don't
+ // trigger redraw.
+ events.AddNoRedraw(op.Tag, pointer.Event{Type: pointer.Cancel})
+ }
+ h.active = true
+ h.area = areaID
+ h.wantsGrab = h.wantsGrab || op.Grab
+ h.types = h.types | op.Types
+ h.scrollRange = op.ScrollBounds
+}
+
+func (c *pointerCollector) semanticLabel(lbl string) {
+ areaID := c.currentArea()
+ area := &c.q.areas[areaID]
+ area.semantic.valid = true
+ area.semantic.content.label = lbl
+}
+
+func (c *pointerCollector) semanticDesc(desc string) {
+ areaID := c.currentArea()
+ area := &c.q.areas[areaID]
+ area.semantic.valid = true
+ area.semantic.content.desc = desc
+}
+
+func (c *pointerCollector) semanticClass(class semantic.ClassOp) {
+ areaID := c.currentArea()
+ area := &c.q.areas[areaID]
+ area.semantic.valid = true
+ area.semantic.content.class = class
+}
+
+func (c *pointerCollector) semanticSelected(selected bool) {
+ areaID := c.currentArea()
+ area := &c.q.areas[areaID]
+ area.semantic.valid = true
+ area.semantic.content.selected = selected
+}
+
+func (c *pointerCollector) semanticDisabled(disabled bool) {
+ areaID := c.currentArea()
+ area := &c.q.areas[areaID]
+ area.semantic.valid = true
+ area.semantic.content.disabled = disabled
+}
+
+func (c *pointerCollector) cursor(name pointer.CursorName) {
+ c.q.cursors = append(c.q.cursors, cursorNode{
+ name: name,
+ area: len(c.q.areas) - 1,
+ })
+}
+
+func (c *pointerCollector) reset(q *pointerQueue) {
+ q.reset()
+ c.resetState()
+ c.nodeStack = c.nodeStack[:0]
+ c.q = q
+ // Add implicit root area for semantic descriptions to hang onto.
+ c.pushArea(areaRect, f32.Rect(-1e6, -1e6, 1e6, 1e6))
+ // Make it semantic to ensure a single semantic root.
+ c.q.areas[0].semantic.valid = true
+}
+
+func (q *pointerQueue) assignSemIDs() {
+ if q.semantic.idsAssigned {
+ return
+ }
+ q.semantic.idsAssigned = true
+ for i, a := range q.areas {
+ if a.semantic.valid {
+ q.areas[i].semantic.id = q.semanticIDFor(a.semantic.content)
+ }
+ }
+}
+
+func (q *pointerQueue) AppendSemantics(nodes []SemanticNode) []SemanticNode {
+ q.assignSemIDs()
+ nodes = q.appendSemanticChildren(nodes, 0)
+ nodes = q.appendSemanticArea(nodes, 0, 0)
+ return nodes
+}
+
+func (q *pointerQueue) appendSemanticArea(nodes []SemanticNode, parentID SemanticID, nodeIdx int) []SemanticNode {
+ areaIdx := nodes[nodeIdx].areaIdx
+ a := q.areas[areaIdx]
+ childStart := len(nodes)
+ nodes = q.appendSemanticChildren(nodes, a.firstChild)
+ childEnd := len(nodes)
+ for i := childStart; i < childEnd; i++ {
+ nodes = q.appendSemanticArea(nodes, a.semantic.id, i)
+ }
+ n := &nodes[nodeIdx]
+ n.ParentID = parentID
+ n.Children = nodes[childStart:childEnd]
+ return nodes
+}
+
+func (q *pointerQueue) appendSemanticChildren(nodes []SemanticNode, areaIdx int) []SemanticNode {
+ if areaIdx == -1 {
+ return nodes
+ }
+ a := q.areas[areaIdx]
+ if semID := a.semantic.id; semID != 0 {
+ cnt := a.semantic.content
+ nodes = append(nodes, SemanticNode{
+ ID: semID,
+ Desc: SemanticDesc{
+ Bounds: f32.Rectangle{
+ Min: a.trans.Transform(a.area.rect.Min),
+ Max: a.trans.Transform(a.area.rect.Max),
+ },
+ Label: cnt.label,
+ Description: cnt.desc,
+ Class: cnt.class,
+ Gestures: cnt.gestures,
+ Selected: cnt.selected,
+ Disabled: cnt.disabled,
+ },
+ areaIdx: areaIdx,
+ })
+ } else {
+ nodes = q.appendSemanticChildren(nodes, a.firstChild)
+ }
+ return q.appendSemanticChildren(nodes, a.sibling)
+}
+
+func (q *pointerQueue) semanticIDFor(content semanticContent) SemanticID {
+ ids := q.semantic.contentIDs[content]
+ for i, id := range ids {
+ if !id.used {
+ ids[i].used = true
+ return id.id
+ }
+ }
+ // No prior assigned ID; allocate a new one.
+ q.semantic.lastID++
+ id := semanticID{id: q.semantic.lastID, used: true}
+ if q.semantic.contentIDs == nil {
+ q.semantic.contentIDs = make(map[semanticContent][]semanticID)
+ }
+ q.semantic.contentIDs[content] = append(q.semantic.contentIDs[content], id)
+ return id.id
+}
+
+func (q *pointerQueue) SemanticAt(pos f32.Point) (SemanticID, bool) {
+ q.assignSemIDs()
+ for i := len(q.hitTree) - 1; i >= 0; i-- {
+ n := &q.hitTree[i]
+ hit := q.hit(n.area, pos)
+ if !hit {
+ continue
+ }
+ area := q.areas[n.area]
+ if area.semantic.id != 0 {
+ return area.semantic.id, true
+ }
+ }
+ return 0, false
+}
+
+func (q *pointerQueue) opHit(handlers *[]event.Tag, pos f32.Point) {
+ // Track whether we're passing through hits.
+ pass := true
+ idx := len(q.hitTree) - 1
+ for idx >= 0 {
+ n := &q.hitTree[idx]
+ hit := q.hit(n.area, pos)
+ if !hit {
+ idx--
+ continue
+ }
+ pass = pass && n.pass
+ if pass {
+ idx--
+ } else {
+ idx = n.next
+ }
+ if n.tag != nil {
+ if _, exists := q.handlers[n.tag]; exists {
+ *handlers = addHandler(*handlers, n.tag)
+ }
+ }
+ }
+}
+
+func (q *pointerQueue) invTransform(areaIdx int, p f32.Point) f32.Point {
+ if areaIdx == -1 {
+ return p
+ }
+ return q.areas[areaIdx].trans.Invert().Transform(p)
+}
+
+func (q *pointerQueue) hit(areaIdx int, p f32.Point) bool {
+ for areaIdx != -1 {
+ a := &q.areas[areaIdx]
+ p := a.trans.Invert().Transform(p)
+ if !a.area.Hit(p) {
+ return false
+ }
+ areaIdx = a.parent
+ }
+ return true
+}
+
+func (q *pointerQueue) reset() {
+ if q.handlers == nil {
+ q.handlers = make(map[event.Tag]*pointerHandler)
+ }
+ for _, h := range q.handlers {
+ // Reset handler.
+ h.active = false
+ h.wantsGrab = false
+ h.types = 0
+ }
+ q.hitTree = q.hitTree[:0]
+ q.areas = q.areas[:0]
+ q.cursors = q.cursors[:0]
+ q.semantic.idsAssigned = false
+ for k, ids := range q.semantic.contentIDs {
+ for i := len(ids) - 1; i >= 0; i-- {
+ if !ids[i].used {
+ ids = append(ids[:i], ids[i+1:]...)
+ } else {
+ ids[i].used = false
+ }
+ }
+ if len(ids) > 0 {
+ q.semantic.contentIDs[k] = ids
+ } else {
+ delete(q.semantic.contentIDs, k)
+ }
+ }
+}
+
+func (q *pointerQueue) Frame(events *handlerEvents) {
+ for k, h := range q.handlers {
+ if !h.active {
+ q.dropHandler(nil, k)
+ delete(q.handlers, k)
+ }
+ if h.wantsGrab {
+ for _, p := range q.pointers {
+ if !p.pressed {
+ continue
+ }
+ for i, k2 := range p.handlers {
+ if k2 == k {
+ // Drop other handlers that lost their grab.
+ dropped := q.scratch[:0]
+ dropped = append(dropped, p.handlers[:i]...)
+ dropped = append(dropped, p.handlers[i+1:]...)
+ for _, tag := range dropped {
+ q.dropHandler(events, tag)
+ }
+ break
+ }
+ }
+ }
+ }
+ }
+ for i := range q.pointers {
+ p := &q.pointers[i]
+ q.deliverEnterLeaveEvents(p, events, p.last)
+ }
+}
+
+func (q *pointerQueue) dropHandler(events *handlerEvents, tag event.Tag) {
+ if events != nil {
+ events.Add(tag, pointer.Event{Type: pointer.Cancel})
+ }
+ for i := range q.pointers {
+ p := &q.pointers[i]
+ for i := len(p.handlers) - 1; i >= 0; i-- {
+ if p.handlers[i] == tag {
+ p.handlers = append(p.handlers[:i], p.handlers[i+1:]...)
+ }
+ }
+ for i := len(p.entered) - 1; i >= 0; i-- {
+ if p.entered[i] == tag {
+ p.entered = append(p.entered[:i], p.entered[i+1:]...)
+ }
+ }
+ }
+}
+
+// pointerOf returns the pointerInfo index corresponding to the pointer in e.
+func (q *pointerQueue) pointerOf(e pointer.Event) int {
+ for i, p := range q.pointers {
+ if p.id == e.PointerID {
+ return i
+ }
+ }
+ q.pointers = append(q.pointers, pointerInfo{id: e.PointerID})
+ return len(q.pointers) - 1
+}
+
+func (q *pointerQueue) Push(e pointer.Event, events *handlerEvents) {
+ if e.Type == pointer.Cancel {
+ q.pointers = q.pointers[:0]
+ for k := range q.handlers {
+ q.dropHandler(events, k)
+ }
+ return
+ }
+ pidx := q.pointerOf(e)
+ p := &q.pointers[pidx]
+ p.last = e
+
+ switch e.Type {
+ case pointer.Press:
+ q.deliverEnterLeaveEvents(p, events, e)
+ p.pressed = true
+ q.deliverEvent(p, events, e)
+ case pointer.Move:
+ if p.pressed {
+ e.Type = pointer.Drag
+ }
+ q.deliverEnterLeaveEvents(p, events, e)
+ q.deliverEvent(p, events, e)
+ case pointer.Release:
+ q.deliverEvent(p, events, e)
+ p.pressed = false
+ q.deliverEnterLeaveEvents(p, events, e)
+ case pointer.Scroll:
+ q.deliverEnterLeaveEvents(p, events, e)
+ q.deliverScrollEvent(p, events, e)
+ default:
+ panic("unsupported pointer event type")
+ }
+
+ if !p.pressed && len(p.entered) == 0 {
+ // No longer need to track pointer.
+ q.pointers = append(q.pointers[:pidx], q.pointers[pidx+1:]...)
+ }
+}
+
+func (q *pointerQueue) deliverEvent(p *pointerInfo, events *handlerEvents, e pointer.Event) {
+ foremost := true
+ if p.pressed && len(p.handlers) == 1 {
+ e.Priority = pointer.Grabbed
+ foremost = false
+ }
+ for _, k := range p.handlers {
+ h := q.handlers[k]
+ if e.Type&h.types == 0 {
+ continue
+ }
+ e := e
+ if foremost {
+ foremost = false
+ e.Priority = pointer.Foremost
+ }
+ e.Position = q.invTransform(h.area, e.Position)
+ events.Add(k, e)
+ }
+}
+
+func (q *pointerQueue) deliverScrollEvent(p *pointerInfo, events *handlerEvents, e pointer.Event) {
+ foremost := true
+ if p.pressed && len(p.handlers) == 1 {
+ e.Priority = pointer.Grabbed
+ foremost = false
+ }
+ var sx, sy = e.Scroll.X, e.Scroll.Y
+ for _, k := range p.handlers {
+ if sx == 0 && sy == 0 {
+ return
+ }
+ h := q.handlers[k]
+ // Distribute the scroll to the handler based on its ScrollRange.
+ sx, e.Scroll.X = setScrollEvent(sx, h.scrollRange.Min.X, h.scrollRange.Max.X)
+ sy, e.Scroll.Y = setScrollEvent(sy, h.scrollRange.Min.Y, h.scrollRange.Max.Y)
+ e := e
+ if foremost {
+ foremost = false
+ e.Priority = pointer.Foremost
+ }
+ e.Position = q.invTransform(h.area, e.Position)
+ events.Add(k, e)
+ }
+}
+
+func (q *pointerQueue) deliverEnterLeaveEvents(p *pointerInfo, events *handlerEvents, e pointer.Event) {
+ q.scratch = q.scratch[:0]
+ q.opHit(&q.scratch, e.Position)
+ if p.pressed {
+ // Filter out non-participating handlers.
+ for i := len(q.scratch) - 1; i >= 0; i-- {
+ if _, found := searchTag(p.handlers, q.scratch[i]); !found {
+ q.scratch = append(q.scratch[:i], q.scratch[i+1:]...)
+ }
+ }
+ } else {
+ p.handlers = append(p.handlers[:0], q.scratch...)
+ }
+ hits := q.scratch
+ if e.Source != pointer.Mouse && !p.pressed && e.Type != pointer.Press {
+ // Consider non-mouse pointers leaving when they're released.
+ hits = nil
+ }
+ // Deliver Leave events.
+ for _, k := range p.entered {
+ if _, found := searchTag(hits, k); found {
+ continue
+ }
+ h := q.handlers[k]
+ e.Type = pointer.Leave
+
+ if e.Type&h.types != 0 {
+ e.Position = q.invTransform(h.area, e.Position)
+ events.Add(k, e)
+ }
+ }
+ // Deliver Enter events and update cursor.
+ q.cursor = pointer.CursorDefault
+ for _, k := range hits {
+ h := q.handlers[k]
+ for i := len(q.cursors) - 1; i >= 0; i-- {
+ if c := q.cursors[i]; c.area == h.area {
+ q.cursor = c.name
+ break
+ }
+ }
+ if _, found := searchTag(p.entered, k); found {
+ continue
+ }
+ e.Type = pointer.Enter
+
+ if e.Type&h.types != 0 {
+ e.Position = q.invTransform(h.area, e.Position)
+ events.Add(k, e)
+ }
+ }
+ p.entered = append(p.entered[:0], hits...)
+}
+
+func searchTag(tags []event.Tag, tag event.Tag) (int, bool) {
+ for i, t := range tags {
+ if t == tag {
+ return i, true
+ }
+ }
+ return 0, false
+}
+
+// addHandler adds tag to the slice if not present.
+func addHandler(tags []event.Tag, tag event.Tag) []event.Tag {
+ for _, t := range tags {
+ if t == tag {
+ return tags
+ }
+ }
+ return append(tags, tag)
+}
+
+func (op *areaOp) Hit(pos f32.Point) bool {
+ pos = pos.Sub(op.rect.Min)
+ size := op.rect.Size()
+ switch op.kind {
+ case areaRect:
+ return 0 <= pos.X && pos.X < size.X &&
+ 0 <= pos.Y && pos.Y < size.Y
+ case areaEllipse:
+ rx := size.X / 2
+ ry := size.Y / 2
+ xh := pos.X - rx
+ yk := pos.Y - ry
+ // The ellipse function works in all cases because
+ // 0/0 is not <= 1.
+ return (xh*xh)/(rx*rx)+(yk*yk)/(ry*ry) <= 1
+ default:
+ panic("invalid area kind")
+ }
+}
+
+func setScrollEvent(scroll float32, min, max int) (left, scrolled float32) {
+ if v := float32(max); scroll > v {
+ return scroll - v, v
+ }
+ if v := float32(min); scroll < v {
+ return scroll - v, v
+ }
+ return 0, scroll
+}
diff --git a/vendor/gioui.org/io/router/router.go b/vendor/gioui.org/io/router/router.go
new file mode 100644
index 0000000..e6906a4
--- /dev/null
+++ b/vendor/gioui.org/io/router/router.go
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+/*
+Package router implements Router, a event.Queue implementation
+that that disambiguates and routes events to handlers declared
+in operation lists.
+
+Router is used by app.Window and is otherwise only useful for
+using Gio with external window implementations.
+*/
+package router
+
+import (
+ "encoding/binary"
+ "image"
+ "strings"
+ "time"
+
+ "gioui.org/f32"
+ "gioui.org/internal/ops"
+ "gioui.org/io/clipboard"
+ "gioui.org/io/event"
+ "gioui.org/io/key"
+ "gioui.org/io/pointer"
+ "gioui.org/io/profile"
+ "gioui.org/io/semantic"
+ "gioui.org/op"
+)
+
+// Router is a Queue implementation that routes events
+// to handlers declared in operation lists.
+type Router struct {
+ savedTrans []f32.Affine2D
+ transStack []f32.Affine2D
+ pointer struct {
+ queue pointerQueue
+ collector pointerCollector
+ }
+ key struct {
+ queue keyQueue
+ collector keyCollector
+ }
+ cqueue clipboardQueue
+
+ handlers handlerEvents
+
+ reader ops.Reader
+
+ // InvalidateOp summary.
+ wakeup bool
+ wakeupTime time.Time
+
+ // ProfileOp summary.
+ profHandlers map[event.Tag]struct{}
+ profile profile.Event
+}
+
+// SemanticNode represents a node in the tree describing the components
+// contained in a frame.
+type SemanticNode struct {
+ ID SemanticID
+ ParentID SemanticID
+ Children []SemanticNode
+ Desc SemanticDesc
+
+ areaIdx int
+}
+
+// SemanticDesc provides a semantic description of a UI component.
+type SemanticDesc struct {
+ Class semantic.ClassOp
+ Description string
+ Label string
+ Selected bool
+ Disabled bool
+ Gestures SemanticGestures
+ Bounds f32.Rectangle
+}
+
+// SemanticGestures is a bit-set of supported gestures.
+type SemanticGestures int
+
+const (
+ ClickGesture SemanticGestures = 1 << iota
+)
+
+// SemanticID uniquely identifies a SemanticDescription.
+//
+// By convention, the zero value denotes the non-existent ID.
+type SemanticID uint64
+
+type handlerEvents struct {
+ handlers map[event.Tag][]event.Event
+ hadEvents bool
+}
+
+// Events returns the available events for the handler key.
+func (q *Router) Events(k event.Tag) []event.Event {
+ events := q.handlers.Events(k)
+ if _, isprof := q.profHandlers[k]; isprof {
+ delete(q.profHandlers, k)
+ events = append(events, q.profile)
+ }
+ return events
+}
+
+// Frame replaces the declared handlers from the supplied
+// operation list. The text input state, wakeup time and whether
+// there are active profile handlers is also saved.
+func (q *Router) Frame(frame *op.Ops) {
+ q.handlers.Clear()
+ q.wakeup = false
+ for k := range q.profHandlers {
+ delete(q.profHandlers, k)
+ }
+ var ops *ops.Ops
+ if frame != nil {
+ ops = &frame.Internal
+ }
+ q.reader.Reset(ops)
+ q.collect()
+
+ q.pointer.queue.Frame(&q.handlers)
+ q.key.queue.Frame(&q.handlers, q.key.collector)
+ if q.handlers.HadEvents() {
+ q.wakeup = true
+ q.wakeupTime = time.Time{}
+ }
+}
+
+// Queue an event and report whether at least one handler had an event queued.
+func (q *Router) Queue(events ...event.Event) bool {
+ for _, e := range events {
+ switch e := e.(type) {
+ case profile.Event:
+ q.profile = e
+ case pointer.Event:
+ q.pointer.queue.Push(e, &q.handlers)
+ case key.EditEvent, key.Event, key.FocusEvent:
+ q.key.queue.Push(e, &q.handlers)
+ case clipboard.Event:
+ q.cqueue.Push(e, &q.handlers)
+ }
+ }
+ return q.handlers.HadEvents()
+}
+
+// TextInputState returns the input state from the most recent
+// call to Frame.
+func (q *Router) TextInputState() TextInputState {
+ return q.key.queue.InputState()
+}
+
+// TextInputHint returns the input mode from the most recent key.InputOp.
+func (q *Router) TextInputHint() (key.InputHint, bool) {
+ return q.key.queue.InputHint()
+}
+
+// WriteClipboard returns the most recent text to be copied
+// to the clipboard, if any.
+func (q *Router) WriteClipboard() (string, bool) {
+ return q.cqueue.WriteClipboard()
+}
+
+// ReadClipboard reports if any new handler is waiting
+// to read the clipboard.
+func (q *Router) ReadClipboard() bool {
+ return q.cqueue.ReadClipboard()
+}
+
+// Cursor returns the last cursor set.
+func (q *Router) Cursor() pointer.CursorName {
+ return q.pointer.queue.cursor
+}
+
+// SemanticAt returns the first semantic description under pos, if any.
+func (q *Router) SemanticAt(pos f32.Point) (SemanticID, bool) {
+ return q.pointer.queue.SemanticAt(pos)
+}
+
+// AppendSemantics appends the semantic tree to nodes, and returns the result.
+// The root node is the first added.
+func (q *Router) AppendSemantics(nodes []SemanticNode) []SemanticNode {
+ return q.pointer.queue.AppendSemantics(nodes)
+}
+
+func (q *Router) collect() {
+ q.transStack = q.transStack[:0]
+ pc := &q.pointer.collector
+ pc.reset(&q.pointer.queue)
+ kc := &q.key.collector
+ *kc = keyCollector{q: &q.key.queue}
+ q.key.queue.Reset()
+ var t f32.Affine2D
+ for encOp, ok := q.reader.Decode(); ok; encOp, ok = q.reader.Decode() {
+ switch ops.OpType(encOp.Data[0]) {
+ case ops.TypeInvalidate:
+ op := decodeInvalidateOp(encOp.Data)
+ if !q.wakeup || op.At.Before(q.wakeupTime) {
+ q.wakeup = true
+ q.wakeupTime = op.At
+ }
+ case ops.TypeProfile:
+ op := decodeProfileOp(encOp.Data, encOp.Refs)
+ if q.profHandlers == nil {
+ q.profHandlers = make(map[event.Tag]struct{})
+ }
+ q.profHandlers[op.Tag] = struct{}{}
+ case ops.TypeClipboardRead:
+ q.cqueue.ProcessReadClipboard(encOp.Refs)
+ case ops.TypeClipboardWrite:
+ q.cqueue.ProcessWriteClipboard(encOp.Refs)
+ case ops.TypeSave:
+ id := ops.DecodeSave(encOp.Data)
+ if extra := id - len(q.savedTrans) + 1; extra > 0 {
+ q.savedTrans = append(q.savedTrans, make([]f32.Affine2D, extra)...)
+ }
+ q.savedTrans[id] = t
+ case ops.TypeLoad:
+ id := ops.DecodeLoad(encOp.Data)
+ t = q.savedTrans[id]
+ pc.resetState()
+ pc.setTrans(t)
+
+ case ops.TypeClip:
+ var op ops.ClipOp
+ op.Decode(encOp.Data)
+ pc.clip(op)
+ case ops.TypePopClip:
+ pc.popArea()
+ case ops.TypeTransform:
+ t2, push := ops.DecodeTransform(encOp.Data)
+ if push {
+ q.transStack = append(q.transStack, t)
+ }
+ t = t.Mul(t2)
+ pc.setTrans(t)
+ case ops.TypePopTransform:
+ n := len(q.transStack)
+ t = q.transStack[n-1]
+ q.transStack = q.transStack[:n-1]
+ pc.setTrans(t)
+
+ // Pointer ops.
+ case ops.TypePass:
+ pc.pass()
+ case ops.TypePopPass:
+ pc.popPass()
+ case ops.TypePointerInput:
+ bo := binary.LittleEndian
+ op := pointer.InputOp{
+ Tag: encOp.Refs[0].(event.Tag),
+ Grab: encOp.Data[1] != 0,
+ Types: pointer.Type(bo.Uint16(encOp.Data[2:])),
+ ScrollBounds: image.Rectangle{
+ Min: image.Point{
+ X: int(int32(bo.Uint32(encOp.Data[4:]))),
+ Y: int(int32(bo.Uint32(encOp.Data[8:]))),
+ },
+ Max: image.Point{
+ X: int(int32(bo.Uint32(encOp.Data[12:]))),
+ Y: int(int32(bo.Uint32(encOp.Data[16:]))),
+ },
+ },
+ }
+ pc.inputOp(op, &q.handlers)
+ case ops.TypeCursor:
+ name := encOp.Refs[0].(pointer.CursorName)
+ pc.cursor(name)
+
+ // Key ops.
+ case ops.TypeKeyFocus:
+ tag, _ := encOp.Refs[0].(event.Tag)
+ op := key.FocusOp{
+ Tag: tag,
+ }
+ kc.focusOp(op.Tag)
+ case ops.TypeKeySoftKeyboard:
+ op := key.SoftKeyboardOp{
+ Show: encOp.Data[1] != 0,
+ }
+ kc.softKeyboard(op.Show)
+ case ops.TypeKeyInput:
+ op := key.InputOp{
+ Tag: encOp.Refs[0].(event.Tag),
+ Hint: key.InputHint(encOp.Data[1]),
+ }
+ kc.inputOp(op)
+
+ // Semantic ops.
+ case ops.TypeSemanticLabel:
+ lbl := encOp.Refs[0].(*string)
+ pc.semanticLabel(*lbl)
+ case ops.TypeSemanticDesc:
+ desc := encOp.Refs[0].(*string)
+ pc.semanticDesc(*desc)
+ case ops.TypeSemanticClass:
+ class := semantic.ClassOp(encOp.Data[1])
+ pc.semanticClass(class)
+ case ops.TypeSemanticSelected:
+ if encOp.Data[1] != 0 {
+ pc.semanticSelected(true)
+ } else {
+ pc.semanticSelected(false)
+ }
+ case ops.TypeSemanticDisabled:
+ if encOp.Data[1] != 0 {
+ pc.semanticDisabled(true)
+ } else {
+ pc.semanticDisabled(false)
+ }
+ }
+ }
+}
+
+// Profiling reports whether there was profile handlers in the
+// most recent Frame call.
+func (q *Router) Profiling() bool {
+ return len(q.profHandlers) > 0
+}
+
+// WakeupTime returns the most recent time for doing another frame,
+// as determined from the last call to Frame.
+func (q *Router) WakeupTime() (time.Time, bool) {
+ return q.wakeupTime, q.wakeup
+}
+
+func (h *handlerEvents) init() {
+ if h.handlers == nil {
+ h.handlers = make(map[event.Tag][]event.Event)
+ }
+}
+
+func (h *handlerEvents) AddNoRedraw(k event.Tag, e event.Event) {
+ h.init()
+ h.handlers[k] = append(h.handlers[k], e)
+}
+
+func (h *handlerEvents) Add(k event.Tag, e event.Event) {
+ h.AddNoRedraw(k, e)
+ h.hadEvents = true
+}
+
+func (h *handlerEvents) HadEvents() bool {
+ u := h.hadEvents
+ h.hadEvents = false
+ return u
+}
+
+func (h *handlerEvents) Events(k event.Tag) []event.Event {
+ if events, ok := h.handlers[k]; ok {
+ h.handlers[k] = h.handlers[k][:0]
+ // Schedule another frame if we delivered events to the user
+ // to flush half-updated state. This is important when an
+ // event changes UI state that has already been laid out. In
+ // the worst case, we waste a frame, increasing power usage.
+ //
+ // Gio is expected to grow the ability to construct
+ // frame-to-frame differences and only render to changed
+ // areas. In that case, the waste of a spurious frame should
+ // be minimal.
+ h.hadEvents = h.hadEvents || len(events) > 0
+ return events
+ }
+ return nil
+}
+
+func (h *handlerEvents) Clear() {
+ for k := range h.handlers {
+ delete(h.handlers, k)
+ }
+}
+
+func decodeProfileOp(d []byte, refs []interface{}) profile.Op {
+ if ops.OpType(d[0]) != ops.TypeProfile {
+ panic("invalid op")
+ }
+ return profile.Op{
+ Tag: refs[0].(event.Tag),
+ }
+}
+
+func decodeInvalidateOp(d []byte) op.InvalidateOp {
+ bo := binary.LittleEndian
+ if ops.OpType(d[0]) != ops.TypeInvalidate {
+ panic("invalid op")
+ }
+ var o op.InvalidateOp
+ if nanos := bo.Uint64(d[1:]); nanos > 0 {
+ o.At = time.Unix(0, int64(nanos))
+ }
+ return o
+}
+
+func (s SemanticGestures) String() string {
+ var gestures []string
+ if s&ClickGesture != 0 {
+ gestures = append(gestures, "Click")
+ }
+ return strings.Join(gestures, ",")
+}
diff --git a/vendor/gioui.org/io/system/system.go b/vendor/gioui.org/io/system/system.go
new file mode 100644
index 0000000..e145825
--- /dev/null
+++ b/vendor/gioui.org/io/system/system.go
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+// Package system contains events usually handled at the top-level
+// program level.
+package system
+
+import (
+ "image"
+ "time"
+
+ "gioui.org/io/event"
+ "gioui.org/op"
+ "gioui.org/unit"
+)
+
+// A FrameEvent requests a new frame in the form of a list of
+// operations that describes what to display and how to handle
+// input.
+type FrameEvent struct {
+ // Now is the current animation. Use Now instead of time.Now to
+ // synchronize animation and to avoid the time.Now call overhead.
+ Now time.Time
+ // Metric converts device independent dp and sp to device pixels.
+ Metric unit.Metric
+ // Size is the dimensions of the window.
+ Size image.Point
+ // Insets is the insets to apply.
+ Insets Insets
+ // Frame completes the FrameEvent by drawing the graphical operations
+ // from ops into the window.
+ Frame func(frame *op.Ops)
+ // Queue supplies the events for event handlers.
+ Queue event.Queue
+}
+
+// DestroyEvent is the last event sent through
+// a window event channel.
+type DestroyEvent struct {
+ // Err is nil for normal window closures. If a
+ // window is prematurely closed, Err is the cause.
+ Err error
+}
+
+// Insets is the space taken up by
+// system decoration such as translucent
+// system bars and software keyboards.
+type Insets struct {
+ Top, Bottom, Left, Right unit.Value
+}
+
+// A StageEvent is generated whenever the stage of a
+// Window changes.
+type StageEvent struct {
+ Stage Stage
+}
+
+// CommandEvent is a system event. Unlike most other events, CommandEvent is
+// delivered as a pointer to allow Cancel to suppress it.
+type CommandEvent struct {
+ Type CommandType
+ // Cancel suppress the default action of the command.
+ Cancel bool
+}
+
+// Stage of a Window.
+type Stage uint8
+
+// CommandType is the type of a CommandEvent.
+type CommandType uint8
+
+const (
+ // StagePaused is the Stage for inactive Windows.
+ // Inactive Windows don't receive FrameEvents.
+ StagePaused Stage = iota
+ // StateRunning is for active Windows.
+ StageRunning
+)
+
+const (
+ // CommandBack is the command for a back action
+ // such as the Android back button.
+ CommandBack CommandType = iota
+)
+
+func (l Stage) String() string {
+ switch l {
+ case StagePaused:
+ return "StagePaused"
+ case StageRunning:
+ return "StageRunning"
+ default:
+ panic("unexpected Stage value")
+ }
+}
+
+func (FrameEvent) ImplementsEvent() {}
+func (StageEvent) ImplementsEvent() {}
+func (*CommandEvent) ImplementsEvent() {}
+func (DestroyEvent) ImplementsEvent() {}
diff --git a/vendor/gioui.org/layout/context.go b/vendor/gioui.org/layout/context.go
new file mode 100644
index 0000000..5d31496
--- /dev/null
+++ b/vendor/gioui.org/layout/context.go
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package layout
+
+import (
+ "time"
+
+ "gioui.org/f32"
+ "gioui.org/io/event"
+ "gioui.org/io/system"
+ "gioui.org/op"
+ "gioui.org/unit"
+)
+
+// Context carries the state needed by almost all layouts and widgets.
+// A zero value Context never returns events, map units to pixels
+// with a scale of 1.0, and returns the zero time from Now.
+type Context struct {
+ // Constraints track the constraints for the active widget or
+ // layout.
+ Constraints Constraints
+
+ Metric unit.Metric
+ // By convention, a nil Queue is a signal to widgets to draw themselves
+ // in a disabled state.
+ Queue event.Queue
+ // Now is the animation time.
+ Now time.Time
+
+ *op.Ops
+}
+
+// NewContext is a shorthand for
+//
+// Context{
+// Ops: ops,
+// Now: e.Now,
+// Queue: e.Queue,
+// Config: e.Config,
+// Constraints: Exact(e.Size),
+// }
+//
+// NewContext calls ops.Reset and adjusts ops for e.Insets.
+func NewContext(ops *op.Ops, e system.FrameEvent) Context {
+ ops.Reset()
+
+ size := e.Size
+
+ if e.Insets != (system.Insets{}) {
+ left := e.Metric.Px(e.Insets.Left)
+ top := e.Metric.Px(e.Insets.Top)
+ op.Offset(f32.Point{
+ X: float32(left),
+ Y: float32(top),
+ }).Add(ops)
+
+ size.X -= left + e.Metric.Px(e.Insets.Right)
+ size.Y -= top + e.Metric.Px(e.Insets.Bottom)
+ }
+
+ return Context{
+ Ops: ops,
+ Now: e.Now,
+ Queue: e.Queue,
+ Metric: e.Metric,
+ Constraints: Exact(size),
+ }
+}
+
+// Px maps the value to pixels.
+func (c Context) Px(v unit.Value) int {
+ return c.Metric.Px(v)
+}
+
+// Events returns the events available for the key. If no
+// queue is configured, Events returns nil.
+func (c Context) Events(k event.Tag) []event.Event {
+ if c.Queue == nil {
+ return nil
+ }
+ return c.Queue.Events(k)
+}
+
+// Disabled returns a copy of this context with a nil Queue,
+// blocking events to widgets using it.
+//
+// By convention, a nil Queue is a signal to widgets to draw themselves
+// in a disabled state.
+func (c Context) Disabled() Context {
+ c.Queue = nil
+ return c
+}
diff --git a/vendor/gioui.org/layout/doc.go b/vendor/gioui.org/layout/doc.go
new file mode 100644
index 0000000..3824084
--- /dev/null
+++ b/vendor/gioui.org/layout/doc.go
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+/*
+Package layout implements layouts common to GUI programs.
+
+Constraints and dimensions
+
+Constraints and dimensions form the interface between layouts and
+interface child elements. This package operates on Widgets, functions
+that compute Dimensions from a a set of constraints for acceptable
+widths and heights. Both the constraints and dimensions are maintained
+in an implicit Context to keep the Widget declaration short.
+
+For example, to add space above a widget:
+
+ var gtx layout.Context
+
+ // Configure a top inset.
+ inset := layout.Inset{Top: unit.Dp(8), ...}
+ // Use the inset to lay out a widget.
+ inset.Layout(gtx, func() {
+ // Lay out widget and determine its size given the constraints
+ // in gtx.Constraints.
+ ...
+ return layout.Dimensions{...}
+ })
+
+Note that the example does not generate any garbage even though the
+Inset is transient. Layouts that don't accept user input are designed
+to not escape to the heap during their use.
+
+Layout operations are recursive: a child in a layout operation can
+itself be another layout. That way, complex user interfaces can
+be created from a few generic layouts.
+
+This example both aligns and insets a child:
+
+ inset := layout.Inset{...}
+ inset.Layout(gtx, func(gtx layout.Context) layout.Dimensions {
+ align := layout.Alignment(...)
+ return align.Layout(gtx, func(gtx layout.Context) layout.Dimensions {
+ return widget.Layout(gtx, ...)
+ })
+ })
+
+More complex layouts such as Stack and Flex lay out multiple children,
+and stateful layouts such as List accept user input.
+
+*/
+package layout
diff --git a/vendor/gioui.org/layout/flex.go b/vendor/gioui.org/layout/flex.go
new file mode 100644
index 0000000..7d66f8f
--- /dev/null
+++ b/vendor/gioui.org/layout/flex.go
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package layout
+
+import (
+ "image"
+
+ "gioui.org/op"
+)
+
+// Flex lays out child elements along an axis,
+// according to alignment and weights.
+type Flex struct {
+ // Axis is the main axis, either Horizontal or Vertical.
+ Axis Axis
+ // Spacing controls the distribution of space left after
+ // layout.
+ Spacing Spacing
+ // Alignment is the alignment in the cross axis.
+ Alignment Alignment
+ // WeightSum is the sum of weights used for the weighted
+ // size of Flexed children. If WeightSum is zero, the sum
+ // of all Flexed weights is used.
+ WeightSum float32
+}
+
+// FlexChild is the descriptor for a Flex child.
+type FlexChild struct {
+ flex bool
+ weight float32
+
+ widget Widget
+
+ // Scratch space.
+ call op.CallOp
+ dims Dimensions
+}
+
+// Spacing determine the spacing mode for a Flex.
+type Spacing uint8
+
+const (
+ // SpaceEnd leaves space at the end.
+ SpaceEnd Spacing = iota
+ // SpaceStart leaves space at the start.
+ SpaceStart
+ // SpaceSides shares space between the start and end.
+ SpaceSides
+ // SpaceAround distributes space evenly between children,
+ // with half as much space at the start and end.
+ SpaceAround
+ // SpaceBetween distributes space evenly between children,
+ // leaving no space at the start and end.
+ SpaceBetween
+ // SpaceEvenly distributes space evenly between children and
+ // at the start and end.
+ SpaceEvenly
+)
+
+// Rigid returns a Flex child with a maximal constraint of the
+// remaining space.
+func Rigid(widget Widget) FlexChild {
+ return FlexChild{
+ widget: widget,
+ }
+}
+
+// Flexed returns a Flex child forced to take up weight fraction of the
+// space left over from Rigid children. The fraction is weight
+// divided by either the weight sum of all Flexed children or the Flex
+// WeightSum if non zero.
+func Flexed(weight float32, widget Widget) FlexChild {
+ return FlexChild{
+ flex: true,
+ weight: weight,
+ widget: widget,
+ }
+}
+
+// Layout a list of children. The position of the children are
+// determined by the specified order, but Rigid children are laid out
+// before Flexed children.
+func (f Flex) Layout(gtx Context, children ...FlexChild) Dimensions {
+ size := 0
+ cs := gtx.Constraints
+ mainMin, mainMax := f.Axis.mainConstraint(cs)
+ crossMin, crossMax := f.Axis.crossConstraint(cs)
+ remaining := mainMax
+ var totalWeight float32
+ cgtx := gtx
+ // Lay out Rigid children.
+ for i, child := range children {
+ if child.flex {
+ totalWeight += child.weight
+ continue
+ }
+ macro := op.Record(gtx.Ops)
+ cgtx.Constraints = f.Axis.constraints(0, remaining, crossMin, crossMax)
+ dims := child.widget(cgtx)
+ c := macro.Stop()
+ sz := f.Axis.Convert(dims.Size).X
+ size += sz
+ remaining -= sz
+ if remaining < 0 {
+ remaining = 0
+ }
+ children[i].call = c
+ children[i].dims = dims
+ }
+ if w := f.WeightSum; w != 0 {
+ totalWeight = w
+ }
+ // fraction is the rounding error from a Flex weighting.
+ var fraction float32
+ flexTotal := remaining
+ // Lay out Flexed children.
+ for i, child := range children {
+ if !child.flex {
+ continue
+ }
+ var flexSize int
+ if remaining > 0 && totalWeight > 0 {
+ // Apply weight and add any leftover fraction from a
+ // previous Flexed.
+ childSize := float32(flexTotal) * child.weight / totalWeight
+ flexSize = int(childSize + fraction + .5)
+ fraction = childSize - float32(flexSize)
+ if flexSize > remaining {
+ flexSize = remaining
+ }
+ }
+ macro := op.Record(gtx.Ops)
+ cgtx.Constraints = f.Axis.constraints(flexSize, flexSize, crossMin, crossMax)
+ dims := child.widget(cgtx)
+ c := macro.Stop()
+ sz := f.Axis.Convert(dims.Size).X
+ size += sz
+ remaining -= sz
+ if remaining < 0 {
+ remaining = 0
+ }
+ children[i].call = c
+ children[i].dims = dims
+ }
+ var maxCross int
+ var maxBaseline int
+ for _, child := range children {
+ if c := f.Axis.Convert(child.dims.Size).Y; c > maxCross {
+ maxCross = c
+ }
+ if b := child.dims.Size.Y - child.dims.Baseline; b > maxBaseline {
+ maxBaseline = b
+ }
+ }
+ var space int
+ if mainMin > size {
+ space = mainMin - size
+ }
+ var mainSize int
+ switch f.Spacing {
+ case SpaceSides:
+ mainSize += space / 2
+ case SpaceStart:
+ mainSize += space
+ case SpaceEvenly:
+ mainSize += space / (1 + len(children))
+ case SpaceAround:
+ if len(children) > 0 {
+ mainSize += space / (len(children) * 2)
+ }
+ }
+ for i, child := range children {
+ dims := child.dims
+ b := dims.Size.Y - dims.Baseline
+ var cross int
+ switch f.Alignment {
+ case End:
+ cross = maxCross - f.Axis.Convert(dims.Size).Y
+ case Middle:
+ cross = (maxCross - f.Axis.Convert(dims.Size).Y) / 2
+ case Baseline:
+ if f.Axis == Horizontal {
+ cross = maxBaseline - b
+ }
+ }
+ pt := f.Axis.Convert(image.Pt(mainSize, cross))
+ trans := op.Offset(FPt(pt)).Push(gtx.Ops)
+ child.call.Add(gtx.Ops)
+ trans.Pop()
+ mainSize += f.Axis.Convert(dims.Size).X
+ if i < len(children)-1 {
+ switch f.Spacing {
+ case SpaceEvenly:
+ mainSize += space / (1 + len(children))
+ case SpaceAround:
+ if len(children) > 0 {
+ mainSize += space / len(children)
+ }
+ case SpaceBetween:
+ if len(children) > 1 {
+ mainSize += space / (len(children) - 1)
+ }
+ }
+ }
+ }
+ switch f.Spacing {
+ case SpaceSides:
+ mainSize += space / 2
+ case SpaceEnd:
+ mainSize += space
+ case SpaceEvenly:
+ mainSize += space / (1 + len(children))
+ case SpaceAround:
+ if len(children) > 0 {
+ mainSize += space / (len(children) * 2)
+ }
+ }
+ sz := f.Axis.Convert(image.Pt(mainSize, maxCross))
+ return Dimensions{Size: sz, Baseline: sz.Y - maxBaseline}
+}
+
+func (s Spacing) String() string {
+ switch s {
+ case SpaceEnd:
+ return "SpaceEnd"
+ case SpaceStart:
+ return "SpaceStart"
+ case SpaceSides:
+ return "SpaceSides"
+ case SpaceAround:
+ return "SpaceAround"
+ case SpaceBetween:
+ return "SpaceAround"
+ case SpaceEvenly:
+ return "SpaceEvenly"
+ default:
+ panic("unreachable")
+ }
+}
diff --git a/vendor/gioui.org/layout/layout.go b/vendor/gioui.org/layout/layout.go
new file mode 100644
index 0000000..2318c22
--- /dev/null
+++ b/vendor/gioui.org/layout/layout.go
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package layout
+
+import (
+ "image"
+
+ "gioui.org/f32"
+ "gioui.org/op"
+ "gioui.org/unit"
+)
+
+// Constraints represent the minimum and maximum size of a widget.
+//
+// A widget does not have to treat its constraints as "hard". For
+// example, if it's passed a constraint with a minimum size that's
+// smaller than its actual minimum size, it should return its minimum
+// size dimensions instead. Parent widgets should deal appropriately
+// with child widgets that return dimensions that do not fit their
+// constraints (for example, by clipping).
+type Constraints struct {
+ Min, Max image.Point
+}
+
+// Dimensions are the resolved size and baseline for a widget.
+//
+// Baseline is the distance from the bottom of a widget to the baseline of
+// any text it contains (or 0). The purpose is to be able to align text
+// that span multiple widgets.
+type Dimensions struct {
+ Size image.Point
+ Baseline int
+}
+
+// Axis is the Horizontal or Vertical direction.
+type Axis uint8
+
+// Alignment is the mutual alignment of a list of widgets.
+type Alignment uint8
+
+// Direction is the alignment of widgets relative to a containing
+// space.
+type Direction uint8
+
+// Widget is a function scope for drawing, processing events and
+// computing dimensions for a user interface element.
+type Widget func(gtx Context) Dimensions
+
+const (
+ Start Alignment = iota
+ End
+ Middle
+ Baseline
+)
+
+const (
+ NW Direction = iota
+ N
+ NE
+ E
+ SE
+ S
+ SW
+ W
+ Center
+)
+
+const (
+ Horizontal Axis = iota
+ Vertical
+)
+
+// Exact returns the Constraints with the minimum and maximum size
+// set to size.
+func Exact(size image.Point) Constraints {
+ return Constraints{
+ Min: size, Max: size,
+ }
+}
+
+// FPt converts an point to a f32.Point.
+func FPt(p image.Point) f32.Point {
+ return f32.Point{
+ X: float32(p.X), Y: float32(p.Y),
+ }
+}
+
+// FRect converts a rectangle to a f32.Rectangle.
+func FRect(r image.Rectangle) f32.Rectangle {
+ return f32.Rectangle{
+ Min: FPt(r.Min), Max: FPt(r.Max),
+ }
+}
+
+// Constrain a size so each dimension is in the range [min;max].
+func (c Constraints) Constrain(size image.Point) image.Point {
+ if min := c.Min.X; size.X < min {
+ size.X = min
+ }
+ if min := c.Min.Y; size.Y < min {
+ size.Y = min
+ }
+ if max := c.Max.X; size.X > max {
+ size.X = max
+ }
+ if max := c.Max.Y; size.Y > max {
+ size.Y = max
+ }
+ return size
+}
+
+// Inset adds space around a widget by decreasing its maximum
+// constraints. The minimum constraints will be adjusted to ensure
+// they do not exceed the maximum.
+type Inset struct {
+ Top, Right, Bottom, Left unit.Value
+}
+
+// Layout a widget.
+func (in Inset) Layout(gtx Context, w Widget) Dimensions {
+ top := gtx.Px(in.Top)
+ right := gtx.Px(in.Right)
+ bottom := gtx.Px(in.Bottom)
+ left := gtx.Px(in.Left)
+ mcs := gtx.Constraints
+ mcs.Max.X -= left + right
+ if mcs.Max.X < 0 {
+ left = 0
+ right = 0
+ mcs.Max.X = 0
+ }
+ if mcs.Min.X > mcs.Max.X {
+ mcs.Min.X = mcs.Max.X
+ }
+ mcs.Max.Y -= top + bottom
+ if mcs.Max.Y < 0 {
+ bottom = 0
+ top = 0
+ mcs.Max.Y = 0
+ }
+ if mcs.Min.Y > mcs.Max.Y {
+ mcs.Min.Y = mcs.Max.Y
+ }
+ gtx.Constraints = mcs
+ trans := op.Offset(FPt(image.Point{X: left, Y: top})).Push(gtx.Ops)
+ dims := w(gtx)
+ trans.Pop()
+ return Dimensions{
+ Size: dims.Size.Add(image.Point{X: right + left, Y: top + bottom}),
+ Baseline: dims.Baseline + bottom,
+ }
+}
+
+// UniformInset returns an Inset with a single inset applied to all
+// edges.
+func UniformInset(v unit.Value) Inset {
+ return Inset{Top: v, Right: v, Bottom: v, Left: v}
+}
+
+// Layout a widget according to the direction.
+// The widget is called with the context constraints minimum cleared.
+func (d Direction) Layout(gtx Context, w Widget) Dimensions {
+ macro := op.Record(gtx.Ops)
+ csn := gtx.Constraints.Min
+ switch d {
+ case N, S:
+ gtx.Constraints.Min.Y = 0
+ case E, W:
+ gtx.Constraints.Min.X = 0
+ default:
+ gtx.Constraints.Min = image.Point{}
+ }
+ dims := w(gtx)
+ call := macro.Stop()
+ sz := dims.Size
+ if sz.X < csn.X {
+ sz.X = csn.X
+ }
+ if sz.Y < csn.Y {
+ sz.Y = csn.Y
+ }
+
+ p := d.Position(dims.Size, sz)
+ defer op.Offset(FPt(p)).Push(gtx.Ops).Pop()
+ call.Add(gtx.Ops)
+
+ return Dimensions{
+ Size: sz,
+ Baseline: dims.Baseline + sz.Y - dims.Size.Y - p.Y,
+ }
+}
+
+// Position calculates widget position according to the direction.
+func (d Direction) Position(widget, bounds image.Point) image.Point {
+ var p image.Point
+
+ switch d {
+ case N, S, Center:
+ p.X = (bounds.X - widget.X) / 2
+ case NE, SE, E:
+ p.X = bounds.X - widget.X
+ }
+
+ switch d {
+ case W, Center, E:
+ p.Y = (bounds.Y - widget.Y) / 2
+ case SW, S, SE:
+ p.Y = bounds.Y - widget.Y
+ }
+
+ return p
+}
+
+// Spacer adds space between widgets.
+type Spacer struct {
+ Width, Height unit.Value
+}
+
+func (s Spacer) Layout(gtx Context) Dimensions {
+ return Dimensions{
+ Size: image.Point{
+ X: gtx.Px(s.Width),
+ Y: gtx.Px(s.Height),
+ },
+ }
+}
+
+func (a Alignment) String() string {
+ switch a {
+ case Start:
+ return "Start"
+ case End:
+ return "End"
+ case Middle:
+ return "Middle"
+ case Baseline:
+ return "Baseline"
+ default:
+ panic("unreachable")
+ }
+}
+
+// Convert a point in (x, y) coordinates to (main, cross) coordinates,
+// or vice versa. Specifically, Convert((x, y)) returns (x, y) unchanged
+// for the horizontal axis, or (y, x) for the vertical axis.
+func (a Axis) Convert(pt image.Point) image.Point {
+ if a == Horizontal {
+ return pt
+ }
+ return image.Pt(pt.Y, pt.X)
+}
+
+// FConvert a point in (x, y) coordinates to (main, cross) coordinates,
+// or vice versa. Specifically, FConvert((x, y)) returns (x, y) unchanged
+// for the horizontal axis, or (y, x) for the vertical axis.
+func (a Axis) FConvert(pt f32.Point) f32.Point {
+ if a == Horizontal {
+ return pt
+ }
+ return f32.Pt(pt.Y, pt.X)
+}
+
+// mainConstraint returns the min and max main constraints for axis a.
+func (a Axis) mainConstraint(cs Constraints) (int, int) {
+ if a == Horizontal {
+ return cs.Min.X, cs.Max.X
+ }
+ return cs.Min.Y, cs.Max.Y
+}
+
+// crossConstraint returns the min and max cross constraints for axis a.
+func (a Axis) crossConstraint(cs Constraints) (int, int) {
+ if a == Horizontal {
+ return cs.Min.Y, cs.Max.Y
+ }
+ return cs.Min.X, cs.Max.X
+}
+
+// constraints returns the constraints for axis a.
+func (a Axis) constraints(mainMin, mainMax, crossMin, crossMax int) Constraints {
+ if a == Horizontal {
+ return Constraints{Min: image.Pt(mainMin, crossMin), Max: image.Pt(mainMax, crossMax)}
+ }
+ return Constraints{Min: image.Pt(crossMin, mainMin), Max: image.Pt(crossMax, mainMax)}
+}
+
+func (a Axis) String() string {
+ switch a {
+ case Horizontal:
+ return "Horizontal"
+ case Vertical:
+ return "Vertical"
+ default:
+ panic("unreachable")
+ }
+}
+
+func (d Direction) String() string {
+ switch d {
+ case NW:
+ return "NW"
+ case N:
+ return "N"
+ case NE:
+ return "NE"
+ case E:
+ return "E"
+ case SE:
+ return "SE"
+ case S:
+ return "S"
+ case SW:
+ return "SW"
+ case W:
+ return "W"
+ case Center:
+ return "Center"
+ default:
+ panic("unreachable")
+ }
+}
diff --git a/vendor/gioui.org/layout/list.go b/vendor/gioui.org/layout/list.go
new file mode 100644
index 0000000..ea68c57
--- /dev/null
+++ b/vendor/gioui.org/layout/list.go
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package layout
+
+import (
+ "image"
+
+ "gioui.org/gesture"
+ "gioui.org/op"
+ "gioui.org/op/clip"
+)
+
+type scrollChild struct {
+ size image.Point
+ call op.CallOp
+}
+
+// List displays a subsection of a potentially infinitely
+// large underlying list. List accepts user input to scroll
+// the subsection.
+type List struct {
+ Axis Axis
+ // ScrollToEnd instructs the list to stay scrolled to the far end position
+ // once reached. A List with ScrollToEnd == true and Position.BeforeEnd ==
+ // false draws its content with the last item at the bottom of the list
+ // area.
+ ScrollToEnd bool
+ // Alignment is the cross axis alignment of list elements.
+ Alignment Alignment
+
+ cs Constraints
+ scroll gesture.Scroll
+ scrollDelta int
+
+ // Position is updated during Layout. To save the list scroll position,
+ // just save Position after Layout finishes. To scroll the list
+ // programmatically, update Position (e.g. restore it from a saved value)
+ // before calling Layout.
+ Position Position
+
+ len int
+
+ // maxSize is the total size of visible children.
+ maxSize int
+ children []scrollChild
+ dir iterationDir
+}
+
+// ListElement is a function that computes the dimensions of
+// a list element.
+type ListElement func(gtx Context, index int) Dimensions
+
+type iterationDir uint8
+
+// Position is a List scroll offset represented as an offset from the top edge
+// of a child element.
+type Position struct {
+ // BeforeEnd tracks whether the List position is before the very end. We
+ // use "before end" instead of "at end" so that the zero value of a
+ // Position struct is useful.
+ //
+ // When laying out a list, if ScrollToEnd is true and BeforeEnd is false,
+ // then First and Offset are ignored, and the list is drawn with the last
+ // item at the bottom. If ScrollToEnd is false then BeforeEnd is ignored.
+ BeforeEnd bool
+ // First is the index of the first visible child.
+ First int
+ // Offset is the distance in pixels from the top edge to the child at index
+ // First.
+ Offset int
+ // OffsetLast is the signed distance in pixels from the bottom edge to the
+ // bottom edge of the child at index First+Count.
+ OffsetLast int
+ // Count is the number of visible children.
+ Count int
+ // Length is the estimated total size of all children, measured in pixels.
+ Length int
+}
+
+const (
+ iterateNone iterationDir = iota
+ iterateForward
+ iterateBackward
+)
+
+const inf = 1e6
+
+// init prepares the list for iterating through its children with next.
+func (l *List) init(gtx Context, len int) {
+ if l.more() {
+ panic("unfinished child")
+ }
+ l.cs = gtx.Constraints
+ l.maxSize = 0
+ l.children = l.children[:0]
+ l.len = len
+ l.update(gtx)
+ if l.scrollToEnd() || l.Position.First > len {
+ l.Position.Offset = 0
+ l.Position.First = len
+ }
+}
+
+// Layout the List.
+func (l *List) Layout(gtx Context, len int, w ListElement) Dimensions {
+ l.init(gtx, len)
+ crossMin, crossMax := l.Axis.crossConstraint(gtx.Constraints)
+ gtx.Constraints = l.Axis.constraints(0, inf, crossMin, crossMax)
+ macro := op.Record(gtx.Ops)
+ laidOutTotalLength := 0
+ numLaidOut := 0
+
+ for l.next(); l.more(); l.next() {
+ child := op.Record(gtx.Ops)
+ dims := w(gtx, l.index())
+ call := child.Stop()
+ l.end(dims, call)
+ laidOutTotalLength += l.Axis.Convert(dims.Size).X
+ numLaidOut++
+ }
+
+ if numLaidOut > 0 {
+ l.Position.Length = laidOutTotalLength * len / numLaidOut
+ } else {
+ l.Position.Length = 0
+ }
+ return l.layout(gtx.Ops, macro)
+}
+
+func (l *List) scrollToEnd() bool {
+ return l.ScrollToEnd && !l.Position.BeforeEnd
+}
+
+// Dragging reports whether the List is being dragged.
+func (l *List) Dragging() bool {
+ return l.scroll.State() == gesture.StateDragging
+}
+
+func (l *List) update(gtx Context) {
+ d := l.scroll.Scroll(gtx.Metric, gtx, gtx.Now, gesture.Axis(l.Axis))
+ l.scrollDelta = d
+ l.Position.Offset += d
+}
+
+// next advances to the next child.
+func (l *List) next() {
+ l.dir = l.nextDir()
+ // The user scroll offset is applied after scrolling to
+ // list end.
+ if l.scrollToEnd() && !l.more() && l.scrollDelta < 0 {
+ l.Position.BeforeEnd = true
+ l.Position.Offset += l.scrollDelta
+ l.dir = l.nextDir()
+ }
+}
+
+// index is current child's position in the underlying list.
+func (l *List) index() int {
+ switch l.dir {
+ case iterateBackward:
+ return l.Position.First - 1
+ case iterateForward:
+ return l.Position.First + len(l.children)
+ default:
+ panic("Index called before Next")
+ }
+}
+
+// more reports whether more children are needed.
+func (l *List) more() bool {
+ return l.dir != iterateNone
+}
+
+func (l *List) nextDir() iterationDir {
+ _, vsize := l.Axis.mainConstraint(l.cs)
+ last := l.Position.First + len(l.children)
+ // Clamp offset.
+ if l.maxSize-l.Position.Offset < vsize && last == l.len {
+ l.Position.Offset = l.maxSize - vsize
+ }
+ if l.Position.Offset < 0 && l.Position.First == 0 {
+ l.Position.Offset = 0
+ }
+ switch {
+ case len(l.children) == l.len:
+ return iterateNone
+ case l.maxSize-l.Position.Offset < vsize:
+ return iterateForward
+ case l.Position.Offset < 0:
+ return iterateBackward
+ }
+ return iterateNone
+}
+
+// End the current child by specifying its dimensions.
+func (l *List) end(dims Dimensions, call op.CallOp) {
+ child := scrollChild{dims.Size, call}
+ mainSize := l.Axis.Convert(child.size).X
+ l.maxSize += mainSize
+ switch l.dir {
+ case iterateForward:
+ l.children = append(l.children, child)
+ case iterateBackward:
+ l.children = append(l.children, scrollChild{})
+ copy(l.children[1:], l.children)
+ l.children[0] = child
+ l.Position.First--
+ l.Position.Offset += mainSize
+ default:
+ panic("call Next before End")
+ }
+ l.dir = iterateNone
+}
+
+// Layout the List and return its dimensions.
+func (l *List) layout(ops *op.Ops, macro op.MacroOp) Dimensions {
+ if l.more() {
+ panic("unfinished child")
+ }
+ mainMin, mainMax := l.Axis.mainConstraint(l.cs)
+ children := l.children
+ // Skip invisible children
+ for len(children) > 0 {
+ sz := children[0].size
+ mainSize := l.Axis.Convert(sz).X
+ if l.Position.Offset < mainSize {
+ // First child is partially visible.
+ break
+ }
+ l.Position.First++
+ l.Position.Offset -= mainSize
+ children = children[1:]
+ }
+ size := -l.Position.Offset
+ var maxCross int
+ for i, child := range children {
+ sz := l.Axis.Convert(child.size)
+ if c := sz.Y; c > maxCross {
+ maxCross = c
+ }
+ size += sz.X
+ if size >= mainMax {
+ children = children[:i+1]
+ break
+ }
+ }
+ l.Position.Count = len(children)
+ l.Position.OffsetLast = mainMax - size
+ pos := -l.Position.Offset
+ // ScrollToEnd lists are end aligned.
+ if space := l.Position.OffsetLast; l.ScrollToEnd && space > 0 {
+ pos += space
+ }
+ for _, child := range children {
+ sz := l.Axis.Convert(child.size)
+ var cross int
+ switch l.Alignment {
+ case End:
+ cross = maxCross - sz.Y
+ case Middle:
+ cross = (maxCross - sz.Y) / 2
+ }
+ childSize := sz.X
+ max := childSize + pos
+ if max > mainMax {
+ max = mainMax
+ }
+ min := pos
+ if min < 0 {
+ min = 0
+ }
+ r := image.Rectangle{
+ Min: l.Axis.Convert(image.Pt(min, -inf)),
+ Max: l.Axis.Convert(image.Pt(max, inf)),
+ }
+ cl := clip.Rect(r).Push(ops)
+ pt := l.Axis.Convert(image.Pt(pos, cross))
+ trans := op.Offset(FPt(pt)).Push(ops)
+ child.call.Add(ops)
+ trans.Pop()
+ cl.Pop()
+ pos += childSize
+ }
+ atStart := l.Position.First == 0 && l.Position.Offset <= 0
+ atEnd := l.Position.First+len(children) == l.len && mainMax >= pos
+ if atStart && l.scrollDelta < 0 || atEnd && l.scrollDelta > 0 {
+ l.scroll.Stop()
+ }
+ l.Position.BeforeEnd = !atEnd
+ if pos < mainMin {
+ pos = mainMin
+ }
+ if pos > mainMax {
+ pos = mainMax
+ }
+ dims := l.Axis.Convert(image.Pt(pos, maxCross))
+ call := macro.Stop()
+ defer clip.Rect(image.Rectangle{Max: dims}).Push(ops).Pop()
+
+ var min, max int
+ if o := l.Position.Offset; o > 0 {
+ // Use the size of the invisible part as scroll boundary.
+ min = -o
+ } else if l.Position.First > 0 {
+ min = -inf
+ }
+ if o := l.Position.OffsetLast; o < 0 {
+ max = -o
+ } else if l.Position.First+l.Position.Count < l.len {
+ max = inf
+ }
+ scrollRange := image.Rectangle{
+ Min: l.Axis.Convert(image.Pt(min, 0)),
+ Max: l.Axis.Convert(image.Pt(max, 0)),
+ }
+ l.scroll.Add(ops, scrollRange)
+
+ call.Add(ops)
+ return Dimensions{Size: dims}
+}
diff --git a/vendor/gioui.org/layout/stack.go b/vendor/gioui.org/layout/stack.go
new file mode 100644
index 0000000..fb8b8ac
--- /dev/null
+++ b/vendor/gioui.org/layout/stack.go
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package layout
+
+import (
+ "image"
+
+ "gioui.org/op"
+)
+
+// Stack lays out child elements on top of each other,
+// according to an alignment direction.
+type Stack struct {
+ // Alignment is the direction to align children
+ // smaller than the available space.
+ Alignment Direction
+}
+
+// StackChild represents a child for a Stack layout.
+type StackChild struct {
+ expanded bool
+ widget Widget
+
+ // Scratch space.
+ call op.CallOp
+ dims Dimensions
+}
+
+// Stacked returns a Stack child that is laid out with no minimum
+// constraints and the maximum constraints passed to Stack.Layout.
+func Stacked(w Widget) StackChild {
+ return StackChild{
+ widget: w,
+ }
+}
+
+// Expanded returns a Stack child with the minimum constraints set
+// to the largest Stacked child. The maximum constraints are set to
+// the same as passed to Stack.Layout.
+func Expanded(w Widget) StackChild {
+ return StackChild{
+ expanded: true,
+ widget: w,
+ }
+}
+
+// Layout a stack of children. The position of the children are
+// determined by the specified order, but Stacked children are laid out
+// before Expanded children.
+func (s Stack) Layout(gtx Context, children ...StackChild) Dimensions {
+ var maxSZ image.Point
+ // First lay out Stacked children.
+ cgtx := gtx
+ cgtx.Constraints.Min = image.Point{}
+ for i, w := range children {
+ if w.expanded {
+ continue
+ }
+ macro := op.Record(gtx.Ops)
+ dims := w.widget(cgtx)
+ call := macro.Stop()
+ if w := dims.Size.X; w > maxSZ.X {
+ maxSZ.X = w
+ }
+ if h := dims.Size.Y; h > maxSZ.Y {
+ maxSZ.Y = h
+ }
+ children[i].call = call
+ children[i].dims = dims
+ }
+ // Then lay out Expanded children.
+ for i, w := range children {
+ if !w.expanded {
+ continue
+ }
+ macro := op.Record(gtx.Ops)
+ cgtx.Constraints.Min = maxSZ
+ dims := w.widget(cgtx)
+ call := macro.Stop()
+ if w := dims.Size.X; w > maxSZ.X {
+ maxSZ.X = w
+ }
+ if h := dims.Size.Y; h > maxSZ.Y {
+ maxSZ.Y = h
+ }
+ children[i].call = call
+ children[i].dims = dims
+ }
+
+ maxSZ = gtx.Constraints.Constrain(maxSZ)
+ var baseline int
+ for _, ch := range children {
+ sz := ch.dims.Size
+ var p image.Point
+ switch s.Alignment {
+ case N, S, Center:
+ p.X = (maxSZ.X - sz.X) / 2
+ case NE, SE, E:
+ p.X = maxSZ.X - sz.X
+ }
+ switch s.Alignment {
+ case W, Center, E:
+ p.Y = (maxSZ.Y - sz.Y) / 2
+ case SW, S, SE:
+ p.Y = maxSZ.Y - sz.Y
+ }
+ trans := op.Offset(FPt(p)).Push(gtx.Ops)
+ ch.call.Add(gtx.Ops)
+ trans.Pop()
+ if baseline == 0 {
+ if b := ch.dims.Baseline; b != 0 {
+ baseline = b + maxSZ.Y - sz.Y - p.Y
+ }
+ }
+ }
+ return Dimensions{
+ Size: maxSZ,
+ Baseline: baseline,
+ }
+}
diff --git a/vendor/gioui.org/op/clip/clip.go b/vendor/gioui.org/op/clip/clip.go
new file mode 100644
index 0000000..ed8ef17
--- /dev/null
+++ b/vendor/gioui.org/op/clip/clip.go
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package clip
+
+import (
+ "encoding/binary"
+ "hash/maphash"
+ "image"
+ "math"
+
+ "gioui.org/f32"
+ "gioui.org/internal/ops"
+ "gioui.org/internal/scene"
+ "gioui.org/internal/stroke"
+ "gioui.org/op"
+)
+
+// Op represents a clip area. Op intersects the current clip area with
+// itself.
+type Op struct {
+ path PathSpec
+
+ outline bool
+ width float32
+}
+
+// Stack represents an Op pushed on the clip stack.
+type Stack struct {
+ ops *ops.Ops
+ id ops.StackID
+ macroID int
+}
+
+var pathSeed maphash.Seed
+
+func init() {
+ pathSeed = maphash.MakeSeed()
+}
+
+// Push saves the current clip state on the stack and updates the current
+// state to the intersection of the current p.
+func (p Op) Push(o *op.Ops) Stack {
+ id, macroID := ops.PushOp(&o.Internal, ops.ClipStack)
+ p.add(o)
+ return Stack{ops: &o.Internal, id: id, macroID: macroID}
+}
+
+func (p Op) add(o *op.Ops) {
+ path := p.path
+
+ bo := binary.LittleEndian
+ if path.hasSegments {
+ data := ops.Write(&o.Internal, ops.TypePathLen)
+ data[0] = byte(ops.TypePath)
+ bo.PutUint64(data[1:], path.hash)
+ path.spec.Add(o)
+ }
+
+ bounds := path.bounds
+ if p.width > 0 {
+ // Expand bounds to cover stroke.
+ half := int(p.width*.5 + .5)
+ bounds.Min.X -= half
+ bounds.Min.Y -= half
+ bounds.Max.X += half
+ bounds.Max.Y += half
+ data := ops.Write(&o.Internal, ops.TypeStrokeLen)
+ data[0] = byte(ops.TypeStroke)
+ bo := binary.LittleEndian
+ bo.PutUint32(data[1:], math.Float32bits(p.width))
+ }
+
+ data := ops.Write(&o.Internal, ops.TypeClipLen)
+ data[0] = byte(ops.TypeClip)
+ bo.PutUint32(data[1:], uint32(bounds.Min.X))
+ bo.PutUint32(data[5:], uint32(bounds.Min.Y))
+ bo.PutUint32(data[9:], uint32(bounds.Max.X))
+ bo.PutUint32(data[13:], uint32(bounds.Max.Y))
+ if p.outline {
+ data[17] = byte(1)
+ }
+ data[18] = byte(path.shape)
+}
+
+func (s Stack) Pop() {
+ ops.PopOp(s.ops, ops.ClipStack, s.id, s.macroID)
+ data := ops.Write(s.ops, ops.TypePopClipLen)
+ data[0] = byte(ops.TypePopClip)
+}
+
+type PathSpec struct {
+ spec op.CallOp
+ // open is true if any path contour is not closed. A closed contour starts
+ // and ends in the same point.
+ open bool
+ // hasSegments tracks whether there are any segments in the path.
+ hasSegments bool
+ bounds image.Rectangle
+ shape ops.Shape
+ hash uint64
+}
+
+// Path constructs a Op clip path described by lines and
+// Bézier curves, where drawing outside the Path is discarded.
+// The inside-ness of a pixel is determines by the non-zero winding rule,
+// similar to the SVG rule of the same name.
+//
+// Path generates no garbage and can be used for dynamic paths; path
+// data is stored directly in the Ops list supplied to Begin.
+type Path struct {
+ ops *ops.Ops
+ open bool
+ contour int
+ pen f32.Point
+ macro op.MacroOp
+ start f32.Point
+ hasSegments bool
+ bounds f32.Rectangle
+ hash maphash.Hash
+}
+
+// Pos returns the current pen position.
+func (p *Path) Pos() f32.Point { return p.pen }
+
+// Begin the path, storing the path data and final Op into ops.
+func (p *Path) Begin(o *op.Ops) {
+ *p = Path{
+ ops: &o.Internal,
+ macro: op.Record(o),
+ contour: 1,
+ }
+ p.hash.SetSeed(pathSeed)
+ data := ops.Write(p.ops, ops.TypeAuxLen)
+ data[0] = byte(ops.TypeAux)
+}
+
+// End returns a PathSpec ready to use in clipping operations.
+func (p *Path) End() PathSpec {
+ c := p.macro.Stop()
+ return PathSpec{
+ spec: c,
+ open: p.open || p.pen != p.start,
+ hasSegments: p.hasSegments,
+ bounds: boundRectF(p.bounds),
+ hash: p.hash.Sum64(),
+ }
+}
+
+// Move moves the pen by the amount specified by delta.
+func (p *Path) Move(delta f32.Point) {
+ to := delta.Add(p.pen)
+ p.MoveTo(to)
+}
+
+// MoveTo moves the pen to the specified absolute coordinate.
+func (p *Path) MoveTo(to f32.Point) {
+ if p.pen == to {
+ return
+ }
+ p.open = p.open || p.pen != p.start
+ p.end()
+ p.pen = to
+ p.start = to
+}
+
+// end completes the current contour.
+func (p *Path) end() {
+ p.contour++
+}
+
+// Line moves the pen by the amount specified by delta, recording a line.
+func (p *Path) Line(delta f32.Point) {
+ to := delta.Add(p.pen)
+ p.LineTo(to)
+}
+
+// LineTo moves the pen to the absolute point specified, recording a line.
+func (p *Path) LineTo(to f32.Point) {
+ data := ops.Write(p.ops, scene.CommandSize+4)
+ bo := binary.LittleEndian
+ bo.PutUint32(data[0:], uint32(p.contour))
+ p.cmd(data[4:], scene.Line(p.pen, to))
+ p.pen = to
+ p.expand(to)
+}
+
+func (p *Path) cmd(data []byte, c scene.Command) {
+ ops.EncodeCommand(data, c)
+ p.hash.Write(data)
+}
+
+func (p *Path) expand(pt f32.Point) {
+ if !p.hasSegments {
+ p.hasSegments = true
+ p.bounds = f32.Rectangle{Min: pt, Max: pt}
+ } else {
+ b := p.bounds
+ if pt.X < b.Min.X {
+ b.Min.X = pt.X
+ }
+ if pt.Y < b.Min.Y {
+ b.Min.Y = pt.Y
+ }
+ if pt.X > b.Max.X {
+ b.Max.X = pt.X
+ }
+ if pt.Y > b.Max.Y {
+ b.Max.Y = pt.Y
+ }
+ p.bounds = b
+ }
+}
+
+// boundRectF returns a bounding image.Rectangle for a f32.Rectangle.
+func boundRectF(r f32.Rectangle) image.Rectangle {
+ return image.Rectangle{
+ Min: image.Point{
+ X: int(floor(r.Min.X)),
+ Y: int(floor(r.Min.Y)),
+ },
+ Max: image.Point{
+ X: int(ceil(r.Max.X)),
+ Y: int(ceil(r.Max.Y)),
+ },
+ }
+}
+
+func ceil(v float32) int {
+ return int(math.Ceil(float64(v)))
+}
+
+func floor(v float32) int {
+ return int(math.Floor(float64(v)))
+}
+
+// Quad records a quadratic Bézier from the pen to end
+// with the control point ctrl.
+func (p *Path) Quad(ctrl, to f32.Point) {
+ ctrl = ctrl.Add(p.pen)
+ to = to.Add(p.pen)
+ p.QuadTo(ctrl, to)
+}
+
+// QuadTo records a quadratic Bézier from the pen to end
+// with the control point ctrl, with absolute coordinates.
+func (p *Path) QuadTo(ctrl, to f32.Point) {
+ data := ops.Write(p.ops, scene.CommandSize+4)
+ bo := binary.LittleEndian
+ bo.PutUint32(data[0:], uint32(p.contour))
+ p.cmd(data[4:], scene.Quad(p.pen, ctrl, to))
+ p.pen = to
+ p.expand(ctrl)
+ p.expand(to)
+}
+
+// ArcTo adds an elliptical arc to the path. The implied ellipse is defined
+// by its focus points f1 and f2.
+// The arc starts in the current point and ends angle radians along the ellipse boundary.
+// The sign of angle determines the direction; positive being counter-clockwise,
+// negative clockwise.
+func (p *Path) ArcTo(f1, f2 f32.Point, angle float32) {
+ const segments = 16
+ m := stroke.ArcTransform(p.pen, f1, f2, angle, segments)
+
+ for i := 0; i < segments; i++ {
+ p0 := p.pen
+ p1 := m.Transform(p0)
+ p2 := m.Transform(p1)
+ ctl := p1.Mul(2).Sub(p0.Add(p2).Mul(.5))
+ p.QuadTo(ctl, p2)
+ }
+}
+
+// Arc is like ArcTo where f1 and f2 are relative to the current position.
+func (p *Path) Arc(f1, f2 f32.Point, angle float32) {
+ f1 = f1.Add(p.pen)
+ f2 = f2.Add(p.pen)
+ p.ArcTo(f1, f2, angle)
+}
+
+// Cube records a cubic Bézier from the pen through
+// two control points ending in to.
+func (p *Path) Cube(ctrl0, ctrl1, to f32.Point) {
+ p.CubeTo(p.pen.Add(ctrl0), p.pen.Add(ctrl1), p.pen.Add(to))
+}
+
+// CubeTo records a cubic Bézier from the pen through
+// two control points ending in to, with absolute coordinates.
+func (p *Path) CubeTo(ctrl0, ctrl1, to f32.Point) {
+ if ctrl0 == p.pen && ctrl1 == p.pen && to == p.pen {
+ return
+ }
+ data := ops.Write(p.ops, scene.CommandSize+4)
+ bo := binary.LittleEndian
+ bo.PutUint32(data[0:], uint32(p.contour))
+ p.cmd(data[4:], scene.Cubic(p.pen, ctrl0, ctrl1, to))
+ p.pen = to
+ p.expand(ctrl0)
+ p.expand(ctrl1)
+ p.expand(to)
+}
+
+// Close closes the path contour.
+func (p *Path) Close() {
+ if p.pen != p.start {
+ p.LineTo(p.start)
+ }
+ p.end()
+}
+
+// Stroke represents a stroked path.
+type Stroke struct {
+ Path PathSpec
+ // Width of the stroked path.
+ Width float32
+}
+
+// Op returns a clip operation representing the stroke.
+func (s Stroke) Op() Op {
+ return Op{
+ path: s.Path,
+ width: s.Width,
+ }
+}
+
+// Outline represents the area inside of a path, according to the
+// non-zero winding rule.
+type Outline struct {
+ Path PathSpec
+}
+
+// Op returns a clip operation representing the outline.
+func (o Outline) Op() Op {
+ if o.Path.open {
+ panic("not all path contours are closed")
+ }
+ return Op{
+ path: o.Path,
+ outline: true,
+ }
+}
diff --git a/vendor/gioui.org/op/clip/doc.go b/vendor/gioui.org/op/clip/doc.go
new file mode 100644
index 0000000..894cffd
--- /dev/null
+++ b/vendor/gioui.org/op/clip/doc.go
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+/*
+Package clip provides operations for defining areas that applies to operations
+such as paints and pointer handlers.
+
+The current clip is initially the infinite set. Pushing an Op sets the clip
+to the intersection of the current clip and pushed clip area. Popping the
+area restores the clip to its state before pushing.
+
+General clipping areas are constructed with Path. Common cases such as
+rectangular clip areas also exist as convenient constructors.
+*/
+package clip
diff --git a/vendor/gioui.org/op/clip/shapes.go b/vendor/gioui.org/op/clip/shapes.go
new file mode 100644
index 0000000..f361572
--- /dev/null
+++ b/vendor/gioui.org/op/clip/shapes.go
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package clip
+
+import (
+ "image"
+ "math"
+
+ "gioui.org/f32"
+ "gioui.org/internal/ops"
+ "gioui.org/op"
+)
+
+// Rect represents the clip area of a pixel-aligned rectangle.
+type Rect image.Rectangle
+
+// Op returns the op for the rectangle.
+func (r Rect) Op() Op {
+ return Op{
+ outline: true,
+ path: r.Path(),
+ }
+}
+
+// Push the clip operation on the clip stack.
+func (r Rect) Push(ops *op.Ops) Stack {
+ return r.Op().Push(ops)
+}
+
+// Path returns the PathSpec for the rectangle.
+func (r Rect) Path() PathSpec {
+ return PathSpec{
+ shape: ops.Rect,
+ bounds: image.Rectangle(r),
+ }
+}
+
+// UniformRRect returns an RRect with all corner radii set to the
+// provided radius.
+func UniformRRect(rect f32.Rectangle, radius float32) RRect {
+ return RRect{
+ Rect: rect,
+ SE: radius,
+ SW: radius,
+ NE: radius,
+ NW: radius,
+ }
+}
+
+// RRect represents the clip area of a rectangle with rounded
+// corners.
+//
+// Specify a square with corner radii equal to half the square size to
+// construct a circular clip area.
+type RRect struct {
+ Rect f32.Rectangle
+ // The corner radii.
+ SE, SW, NW, NE float32
+}
+
+// Op returns the op for the rounded rectangle.
+func (rr RRect) Op(ops *op.Ops) Op {
+ if rr.SE == 0 && rr.SW == 0 && rr.NW == 0 && rr.NE == 0 {
+ r := image.Rectangle{
+ Min: image.Point{X: int(rr.Rect.Min.X), Y: int(rr.Rect.Min.Y)},
+ Max: image.Point{X: int(rr.Rect.Max.X), Y: int(rr.Rect.Max.Y)},
+ }
+ // Only use Rect if rr is pixel-aligned, as Rect is guaranteed to be.
+ if fPt(r.Min) == rr.Rect.Min && fPt(r.Max) == rr.Rect.Max {
+ return Rect(r).Op()
+ }
+ }
+ return Outline{Path: rr.Path(ops)}.Op()
+}
+
+// Push the rectangle clip on the clip stack.
+func (rr RRect) Push(ops *op.Ops) Stack {
+ return rr.Op(ops).Push(ops)
+}
+
+// Path returns the PathSpec for the rounded rectangle.
+func (rr RRect) Path(ops *op.Ops) PathSpec {
+ var p Path
+ p.Begin(ops)
+
+ // https://pomax.github.io/bezierinfo/#circles_cubic.
+ const q = 4 * (math.Sqrt2 - 1) / 3
+ const iq = 1 - q
+
+ se, sw, nw, ne := rr.SE, rr.SW, rr.NW, rr.NE
+ w, n, e, s := rr.Rect.Min.X, rr.Rect.Min.Y, rr.Rect.Max.X, rr.Rect.Max.Y
+
+ p.MoveTo(f32.Point{X: w + nw, Y: n})
+ p.LineTo(f32.Point{X: e - ne, Y: n}) // N
+ p.CubeTo( // NE
+ f32.Point{X: e - ne*iq, Y: n},
+ f32.Point{X: e, Y: n + ne*iq},
+ f32.Point{X: e, Y: n + ne})
+ p.LineTo(f32.Point{X: e, Y: s - se}) // E
+ p.CubeTo( // SE
+ f32.Point{X: e, Y: s - se*iq},
+ f32.Point{X: e - se*iq, Y: s},
+ f32.Point{X: e - se, Y: s})
+ p.LineTo(f32.Point{X: w + sw, Y: s}) // S
+ p.CubeTo( // SW
+ f32.Point{X: w + sw*iq, Y: s},
+ f32.Point{X: w, Y: s - sw*iq},
+ f32.Point{X: w, Y: s - sw})
+ p.LineTo(f32.Point{X: w, Y: n + nw}) // W
+ p.CubeTo( // NW
+ f32.Point{X: w, Y: n + nw*iq},
+ f32.Point{X: w + nw*iq, Y: n},
+ f32.Point{X: w + nw, Y: n})
+
+ return p.End()
+}
+
+// Circle represents the clip area of a circle.
+type Circle struct {
+ Center f32.Point
+ Radius float32
+}
+
+// Op returns the op for the filled circle.
+func (c Circle) Op(ops *op.Ops) Op {
+ return Outline{Path: c.Path(ops)}.Op()
+}
+
+// Push the circle clip on the clip stack.
+func (c Circle) Push(ops *op.Ops) Stack {
+ return c.Op(ops).Push(ops)
+}
+
+// Path returns the PathSpec for the circle.
+//
+// Deprecated: use Ellipse instead.
+func (c Circle) Path(ops *op.Ops) PathSpec {
+ b := f32.Rectangle{
+ Min: f32.Pt(c.Center.X-c.Radius, c.Center.Y-c.Radius),
+ Max: f32.Pt(c.Center.X+c.Radius, c.Center.Y+c.Radius),
+ }
+ return Ellipse(b).Path(ops)
+}
+
+// Ellipse represents the largest axis-aligned ellipse that
+// is contained in its bounds.
+type Ellipse f32.Rectangle
+
+// Op returns the op for the filled ellipse.
+func (e Ellipse) Op(ops *op.Ops) Op {
+ return Outline{Path: e.Path(ops)}.Op()
+}
+
+// Push the filled ellipse clip op on the clip stack.
+func (e Ellipse) Push(ops *op.Ops) Stack {
+ return e.Op(ops).Push(ops)
+}
+
+// Path constructs a path for the ellipse.
+func (e Ellipse) Path(o *op.Ops) PathSpec {
+ bounds := f32.Rectangle(e)
+ if bounds.Dx() == 0 || bounds.Dy() == 0 {
+ return PathSpec{shape: ops.Rect}
+ }
+
+ var p Path
+ p.Begin(o)
+
+ center := bounds.Max.Add(bounds.Min).Mul(.5)
+ diam := bounds.Dx()
+ r := diam * .5
+ // We'll model the ellipse as a circle scaled in the Y
+ // direction.
+ scale := bounds.Dy() / diam
+
+ // https://pomax.github.io/bezierinfo/#circles_cubic.
+ const q = 4 * (math.Sqrt2 - 1) / 3
+
+ curve := r * q
+ top := f32.Point{X: center.X, Y: center.Y - r*scale}
+
+ p.MoveTo(top)
+ p.CubeTo(
+ f32.Point{X: center.X + curve, Y: center.Y - r*scale},
+ f32.Point{X: center.X + r, Y: center.Y - curve*scale},
+ f32.Point{X: center.X + r, Y: center.Y},
+ )
+ p.CubeTo(
+ f32.Point{X: center.X + r, Y: center.Y + curve*scale},
+ f32.Point{X: center.X + curve, Y: center.Y + r*scale},
+ f32.Point{X: center.X, Y: center.Y + r*scale},
+ )
+ p.CubeTo(
+ f32.Point{X: center.X - curve, Y: center.Y + r*scale},
+ f32.Point{X: center.X - r, Y: center.Y + curve*scale},
+ f32.Point{X: center.X - r, Y: center.Y},
+ )
+ p.CubeTo(
+ f32.Point{X: center.X - r, Y: center.Y - curve*scale},
+ f32.Point{X: center.X - curve, Y: center.Y - r*scale},
+ top,
+ )
+ ellipse := p.End()
+ ellipse.shape = ops.Ellipse
+ return ellipse
+}
+
+func fPt(p image.Point) f32.Point {
+ return f32.Point{
+ X: float32(p.X), Y: float32(p.Y),
+ }
+}
diff --git a/vendor/gioui.org/op/op.go b/vendor/gioui.org/op/op.go
new file mode 100644
index 0000000..f0b2620
--- /dev/null
+++ b/vendor/gioui.org/op/op.go
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+/*
+
+Package op implements operations for updating a user interface.
+
+Gio programs use operations, or ops, for describing their user
+interfaces. There are operations for drawing, defining input
+handlers, changing window properties as well as operations for
+controlling the execution of other operations.
+
+Ops represents a list of operations. The most important use
+for an Ops list is to describe a complete user interface update
+to a ui/app.Window's Update method.
+
+Drawing a colored square:
+
+ import "gioui.org/unit"
+ import "gioui.org/app"
+ import "gioui.org/op/paint"
+
+ var w app.Window
+ var e system.FrameEvent
+ ops := new(op.Ops)
+ ...
+ ops.Reset()
+ paint.ColorOp{Color: ...}.Add(ops)
+ paint.PaintOp{Rect: ...}.Add(ops)
+ e.Frame(ops)
+
+State
+
+An Ops list can be viewed as a very simple virtual machine: it has state such
+as transformation and color and execution flow can be controlled with macros.
+
+Some state, such as the current color, is modified directly by operations with
+Add methods. Other state, such as transformation and clip shape, are
+represented by stacks.
+
+This example sets the simple color state and pushes an offset to the
+transformation stack.
+
+ ops := new(op.Ops)
+ // Set the color.
+ paint.ColorOp{...}.Add(ops)
+ // Apply an offset to subsequent operations.
+ stack := op.Offset(...).Push(ops)
+ ...
+ // Undo the offset transformation.
+ stack.Pop()
+
+The MacroOp records a list of operations to be executed later:
+
+ ops := new(op.Ops)
+ macro := op.Record(ops)
+ // Record operations by adding them.
+ op.InvalidateOp{}.Add(ops)
+ ...
+ // End recording.
+ call := macro.Stop()
+
+ // replay the recorded operations:
+ call.Add(ops)
+
+*/
+package op
+
+import (
+ "encoding/binary"
+ "math"
+ "time"
+
+ "gioui.org/f32"
+ "gioui.org/internal/ops"
+)
+
+// Ops holds a list of operations. Operations are stored in
+// serialized form to avoid garbage during construction of
+// the ops list.
+type Ops struct {
+ // Internal is for internal use, despite being exported.
+ Internal ops.Ops
+}
+
+// MacroOp records a list of operations for later use.
+type MacroOp struct {
+ ops *ops.Ops
+ id ops.StackID
+ pc ops.PC
+}
+
+// CallOp invokes the operations recorded by Record.
+type CallOp struct {
+ // Ops is the list of operations to invoke.
+ ops *ops.Ops
+ pc ops.PC
+}
+
+// InvalidateOp requests a redraw at the given time. Use
+// the zero value to request an immediate redraw.
+type InvalidateOp struct {
+ At time.Time
+}
+
+// TransformOp represents a transformation that can be pushed on the
+// transformation stack.
+type TransformOp struct {
+ t f32.Affine2D
+}
+
+// TransformStack represents a TransformOp pushed on the transformation stack.
+type TransformStack struct {
+ id ops.StackID
+ macroID int
+ ops *ops.Ops
+}
+
+// Defer executes c after all other operations have completed, including
+// previously deferred operations.
+// Defer saves the transformation stack and pushes it prior to executing
+// c. All other operation state is reset.
+//
+// Note that deferred operations are executed in first-in-first-out order,
+// unlike the Go facility of the same name.
+func Defer(o *Ops, c CallOp) {
+ if c.ops == nil {
+ return
+ }
+ state := ops.Save(&o.Internal)
+ // Wrap c in a macro that loads the saved state before execution.
+ m := Record(o)
+ state.Load()
+ c.Add(o)
+ c = m.Stop()
+ // A Defer is recorded as a TypeDefer followed by the
+ // wrapped macro.
+ data := ops.Write(&o.Internal, ops.TypeDeferLen)
+ data[0] = byte(ops.TypeDefer)
+ c.Add(o)
+}
+
+// Reset the Ops, preparing it for re-use. Reset invalidates
+// any recorded macros.
+func (o *Ops) Reset() {
+ ops.Reset(&o.Internal)
+}
+
+// Record a macro of operations.
+func Record(o *Ops) MacroOp {
+ m := MacroOp{
+ ops: &o.Internal,
+ id: ops.PushMacro(&o.Internal),
+ pc: ops.PCFor(&o.Internal),
+ }
+ // Reserve room for a macro definition. Updated in Stop.
+ ops.Write(m.ops, ops.TypeMacroLen)
+ m.fill()
+ return m
+}
+
+// Stop ends a previously started recording and returns an
+// operation for replaying it.
+func (m MacroOp) Stop() CallOp {
+ ops.PopMacro(m.ops, m.id)
+ m.fill()
+ return CallOp{
+ ops: m.ops,
+ pc: m.pc,
+ }
+}
+
+func (m MacroOp) fill() {
+ ops.FillMacro(m.ops, m.pc)
+}
+
+// Add the recorded list of operations. Add
+// panics if the Ops containing the recording
+// has been reset.
+func (c CallOp) Add(o *Ops) {
+ if c.ops == nil {
+ return
+ }
+ ops.AddCall(&o.Internal, c.ops, c.pc)
+}
+
+func (r InvalidateOp) Add(o *Ops) {
+ data := ops.Write(&o.Internal, ops.TypeRedrawLen)
+ data[0] = byte(ops.TypeInvalidate)
+ bo := binary.LittleEndian
+ // UnixNano cannot represent the zero time.
+ if t := r.At; !t.IsZero() {
+ nanos := t.UnixNano()
+ if nanos > 0 {
+ bo.PutUint64(data[1:], uint64(nanos))
+ }
+ }
+}
+
+// Offset creates a TransformOp with the offset o.
+func Offset(o f32.Point) TransformOp {
+ return TransformOp{t: f32.Affine2D{}.Offset(o)}
+}
+
+// Affine creates a TransformOp representing the transformation a.
+func Affine(a f32.Affine2D) TransformOp {
+ return TransformOp{t: a}
+}
+
+// Push the current transformation to the stack and then multiply the
+// current transformation with t.
+func (t TransformOp) Push(o *Ops) TransformStack {
+ id, macroID := ops.PushOp(&o.Internal, ops.TransStack)
+ t.add(o, true)
+ return TransformStack{ops: &o.Internal, id: id, macroID: macroID}
+}
+
+// Add is like Push except it doesn't push the current transformation to the
+// stack.
+func (t TransformOp) Add(o *Ops) {
+ t.add(o, false)
+}
+
+func (t TransformOp) add(o *Ops, push bool) {
+ data := ops.Write(&o.Internal, ops.TypeTransformLen)
+ data[0] = byte(ops.TypeTransform)
+ if push {
+ data[1] = 1
+ }
+ bo := binary.LittleEndian
+ a, b, c, d, e, f := t.t.Elems()
+ bo.PutUint32(data[2:], math.Float32bits(a))
+ bo.PutUint32(data[2+4*1:], math.Float32bits(b))
+ bo.PutUint32(data[2+4*2:], math.Float32bits(c))
+ bo.PutUint32(data[2+4*3:], math.Float32bits(d))
+ bo.PutUint32(data[2+4*4:], math.Float32bits(e))
+ bo.PutUint32(data[2+4*5:], math.Float32bits(f))
+}
+
+func (t TransformStack) Pop() {
+ ops.PopOp(t.ops, ops.TransStack, t.id, t.macroID)
+ data := ops.Write(t.ops, ops.TypePopTransformLen)
+ data[0] = byte(ops.TypePopTransform)
+}
diff --git a/vendor/gioui.org/op/paint/doc.go b/vendor/gioui.org/op/paint/doc.go
new file mode 100644
index 0000000..bbec006
--- /dev/null
+++ b/vendor/gioui.org/op/paint/doc.go
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+/*
+Package paint provides drawing operations for 2D graphics.
+
+The PaintOp operation fills the current clip with the current brush, taking the
+current transformation into account. Drawing outside the current clip area is
+ignored.
+
+The current brush is set by either a ColorOp for a constant color, or
+ImageOp for an image, or LinearGradientOp for gradients.
+
+All color.NRGBA values are in the sRGB color space.
+*/
+package paint
diff --git a/vendor/gioui.org/op/paint/paint.go b/vendor/gioui.org/op/paint/paint.go
new file mode 100644
index 0000000..1c99297
--- /dev/null
+++ b/vendor/gioui.org/op/paint/paint.go
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package paint
+
+import (
+ "encoding/binary"
+ "image"
+ "image/color"
+ "image/draw"
+ "math"
+
+ "gioui.org/f32"
+ "gioui.org/internal/ops"
+ "gioui.org/op"
+ "gioui.org/op/clip"
+)
+
+// ImageOp sets the brush to an image.
+type ImageOp struct {
+ uniform bool
+ color color.NRGBA
+ src *image.RGBA
+
+ // handle is a key to uniquely identify this ImageOp
+ // in a map of cached textures.
+ handle interface{}
+}
+
+// ColorOp sets the brush to a constant color.
+type ColorOp struct {
+ Color color.NRGBA
+}
+
+// LinearGradientOp sets the brush to a gradient starting at stop1 with color1 and
+// ending at stop2 with color2.
+type LinearGradientOp struct {
+ Stop1 f32.Point
+ Color1 color.NRGBA
+ Stop2 f32.Point
+ Color2 color.NRGBA
+}
+
+// PaintOp fills the current clip area with the current brush.
+type PaintOp struct {
+}
+
+// NewImageOp creates an ImageOp backed by src.
+//
+// NewImageOp assumes the backing image is immutable, and may cache a
+// copy of its contents in a GPU-friendly way. Create new ImageOps to
+// ensure that changes to an image is reflected in the display of
+// it.
+func NewImageOp(src image.Image) ImageOp {
+ switch src := src.(type) {
+ case *image.Uniform:
+ col := color.NRGBAModel.Convert(src.C).(color.NRGBA)
+ return ImageOp{
+ uniform: true,
+ color: col,
+ }
+ case *image.RGBA:
+ return ImageOp{
+ src: src,
+ handle: new(int),
+ }
+ }
+
+ sz := src.Bounds().Size()
+ // Copy the image into a GPU friendly format.
+ dst := image.NewRGBA(image.Rectangle{
+ Max: sz,
+ })
+ draw.Draw(dst, dst.Bounds(), src, src.Bounds().Min, draw.Src)
+ return ImageOp{
+ src: dst,
+ handle: new(int),
+ }
+}
+
+func (i ImageOp) Size() image.Point {
+ if i.src == nil {
+ return image.Point{}
+ }
+ return i.src.Bounds().Size()
+}
+
+func (i ImageOp) Add(o *op.Ops) {
+ if i.uniform {
+ ColorOp{
+ Color: i.color,
+ }.Add(o)
+ return
+ } else if i.src == nil || i.src.Bounds().Empty() {
+ return
+ }
+ data := ops.Write2(&o.Internal, ops.TypeImageLen, i.src, i.handle)
+ data[0] = byte(ops.TypeImage)
+}
+
+func (c ColorOp) Add(o *op.Ops) {
+ data := ops.Write(&o.Internal, ops.TypeColorLen)
+ data[0] = byte(ops.TypeColor)
+ data[1] = c.Color.R
+ data[2] = c.Color.G
+ data[3] = c.Color.B
+ data[4] = c.Color.A
+}
+
+func (c LinearGradientOp) Add(o *op.Ops) {
+ data := ops.Write(&o.Internal, ops.TypeLinearGradientLen)
+ data[0] = byte(ops.TypeLinearGradient)
+
+ bo := binary.LittleEndian
+ bo.PutUint32(data[1:], math.Float32bits(c.Stop1.X))
+ bo.PutUint32(data[5:], math.Float32bits(c.Stop1.Y))
+ bo.PutUint32(data[9:], math.Float32bits(c.Stop2.X))
+ bo.PutUint32(data[13:], math.Float32bits(c.Stop2.Y))
+
+ data[17+0] = c.Color1.R
+ data[17+1] = c.Color1.G
+ data[17+2] = c.Color1.B
+ data[17+3] = c.Color1.A
+ data[21+0] = c.Color2.R
+ data[21+1] = c.Color2.G
+ data[21+2] = c.Color2.B
+ data[21+3] = c.Color2.A
+}
+
+func (d PaintOp) Add(o *op.Ops) {
+ data := ops.Write(&o.Internal, ops.TypePaintLen)
+ data[0] = byte(ops.TypePaint)
+}
+
+// FillShape fills the clip shape with a color.
+func FillShape(ops *op.Ops, c color.NRGBA, shape clip.Op) {
+ defer shape.Push(ops).Pop()
+ Fill(ops, c)
+}
+
+// Fill paints an infinitely large plane with the provided color. It
+// is intended to be used with a clip.Op already in place to limit
+// the painted area. Use FillShape unless you need to paint several
+// times within the same clip.Op.
+func Fill(ops *op.Ops, c color.NRGBA) {
+ ColorOp{Color: c}.Add(ops)
+ PaintOp{}.Add(ops)
+}
diff --git a/vendor/gioui.org/shader/LICENSE b/vendor/gioui.org/shader/LICENSE
new file mode 100644
index 0000000..81f4733
--- /dev/null
+++ b/vendor/gioui.org/shader/LICENSE
@@ -0,0 +1,63 @@
+This project is provided under the terms of the UNLICENSE or
+the MIT license denoted by the following SPDX identifier:
+
+SPDX-License-Identifier: Unlicense OR MIT
+
+You may use the project under the terms of either license.
+
+Both licenses are reproduced below.
+
+----
+The MIT License (MIT)
+
+Copyright (c) 2019 The Gio authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+---
+
+
+
+---
+The UNLICENSE
+
+This is free and unencumbered software released into the public domain.
+
+Anyone is free to copy, modify, publish, use, compile, sell, or
+distribute this software, either in source code form or as a compiled
+binary, for any purpose, commercial or non-commercial, and by any
+means.
+
+In jurisdictions that recognize copyright laws, the author or authors
+of this software dedicate any and all copyright interest in the
+software to the public domain. We make this dedication for the benefit
+of the public at large and to the detriment of our heirs and
+successors. We intend this dedication to be an overt act of
+relinquishment in perpetuity of all present and future rights to this
+software under copyright law.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+For more information, please refer to
+---
diff --git a/vendor/gioui.org/shader/README.md b/vendor/gioui.org/shader/README.md
new file mode 100644
index 0000000..4ea4867
--- /dev/null
+++ b/vendor/gioui.org/shader/README.md
@@ -0,0 +1,18 @@
+# GPU programs for the Gio project
+
+This repository contains the source code for the [Gio](https://gioui.org)
+project. It also contains the generators and dereived versions for use with the
+GPU APIs supported by Gio.
+
+# Generating CPU fallbacks
+
+The `piet/gencpu.sh` script updates the piet-gpu binaries:
+
+```
+$ cd piet
+$ ./gencpu.sh
+```
+
+## Issues and contributions
+
+See the [Gio contribution guide](https://gioui.org/doc/contribute).
diff --git a/vendor/gioui.org/shader/gio/blit.frag b/vendor/gioui.org/shader/gio/blit.frag
new file mode 100644
index 0000000..a88f4dc
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/blit.frag
@@ -0,0 +1,15 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+precision mediump float;
+
+layout(location=0) in highp vec2 vUV;
+
+{{.Header}}
+
+layout(location = 0) out vec4 fragColor;
+
+void main() {
+ fragColor = {{.FetchColorExpr}};
+}
diff --git a/vendor/gioui.org/shader/gio/blit.vert b/vendor/gioui.org/shader/gio/blit.vert
new file mode 100644
index 0000000..83d041f
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/blit.vert
@@ -0,0 +1,27 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+#extension GL_GOOGLE_include_directive : enable
+
+precision highp float;
+
+#include "common.h"
+
+layout(push_constant) uniform Block {
+ vec4 transform;
+ vec4 uvTransformR1;
+ vec4 uvTransformR2;
+} _block;
+
+layout(location = 0) in vec2 pos;
+
+layout(location = 1) in vec2 uv;
+
+layout(location = 0) out vec2 vUV;
+
+void main() {
+ vec2 p = pos*_block.transform.xy + _block.transform.zw;
+ gl_Position = vec4(transform3x2(windowTransform, vec3(p, 0)), 1);
+ vUV = transform3x2(m3x2(_block.uvTransformR1.xyz, _block.uvTransformR2.xyz), vec3(uv,1)).xy;
+}
diff --git a/vendor/gioui.org/shader/gio/common.h b/vendor/gioui.org/shader/gio/common.h
new file mode 100644
index 0000000..9b6dc59
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/common.h
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+struct m3x2 {
+ vec3 r0;
+ vec3 r1;
+};
+
+// fboTransform is the transformation that cancels the implied transformation
+// between the clip space and the framebuffer. Only two rows are returned. The
+// last is implied to be [0, 0, 1].
+const m3x2 fboTransform = m3x2(
+#if defined(LANG_HLSL) || defined(LANG_MSL) || defined(LANG_MSLIOS)
+ vec3(1.0, 0.0, 0.0),
+ vec3(0.0, -1.0, 0.0)
+#else
+ vec3(1.0, 0.0, 0.0),
+ vec3(0.0, 1.0, 0.0)
+#endif
+);
+
+// windowTransform is the transformation that cancels the implied transformation
+// between framebuffer space and window system coordinates.
+const m3x2 windowTransform = m3x2(
+#if defined(LANG_VULKAN)
+ vec3(1.0, 0.0, 0.0),
+ vec3(0.0, 1.0, 0.0)
+#else
+ vec3(1.0, 0.0, 0.0),
+ vec3(0.0, -1.0, 0.0)
+#endif
+);
+
+vec3 transform3x2(m3x2 t, vec3 v) {
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
diff --git a/vendor/gioui.org/shader/gio/copy.frag b/vendor/gioui.org/shader/gio/copy.frag
new file mode 100644
index 0000000..048d9e7
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/copy.frag
@@ -0,0 +1,24 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+precision mediump float;
+
+layout(binding = 0) uniform sampler2D tex;
+
+layout(location = 0) in highp vec2 vUV;
+
+layout(location = 0) out vec4 fragColor;
+
+vec3 sRGBtoRGB(vec3 rgb) {
+ bvec3 cutoff = greaterThanEqual(rgb, vec3(0.04045));
+ vec3 below = rgb/vec3(12.92);
+ vec3 above = pow((rgb + vec3(0.055))/vec3(1.055), vec3(2.4));
+ return mix(below, above, cutoff);
+}
+
+void main() {
+ vec4 texel = texture(tex, vUV);
+ texel.rgb = sRGBtoRGB(texel.rgb);
+ fragColor = texel;
+}
diff --git a/vendor/gioui.org/shader/gio/copy.vert b/vendor/gioui.org/shader/gio/copy.vert
new file mode 100644
index 0000000..c079b96
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/copy.vert
@@ -0,0 +1,26 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+#extension GL_GOOGLE_include_directive : enable
+
+precision highp float;
+
+#include "common.h"
+
+layout(push_constant) uniform Block {
+ vec2 scale;
+ vec2 pos;
+ vec2 uvScale;
+} _block;
+
+layout(location = 0) in vec2 pos;
+layout(location = 1) in vec2 uv;
+
+layout(location = 0) out vec2 vUV;
+
+void main() {
+ vUV = vec2(uv*_block.uvScale);
+ vec2 p = vec2(pos*_block.scale + _block.pos);
+ gl_Position = vec4(transform3x2(windowTransform, vec3(p, 0)), 1);
+}
diff --git a/vendor/gioui.org/shader/gio/cover.frag b/vendor/gioui.org/shader/gio/cover.frag
new file mode 100644
index 0000000..34c5e4c
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/cover.frag
@@ -0,0 +1,20 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+precision mediump float;
+
+{{.Header}}
+
+layout(location = 0) in highp vec2 vCoverUV;
+layout(location = 1) in highp vec2 vUV;
+
+layout(binding = 1) uniform sampler2D cover;
+
+layout(location = 0) out vec4 fragColor;
+
+void main() {
+ fragColor = {{.FetchColorExpr}};
+ float c = min(abs(texture(cover, vCoverUV).r), 1.0);
+ fragColor *= c;
+}
diff --git a/vendor/gioui.org/shader/gio/cover.vert b/vendor/gioui.org/shader/gio/cover.vert
new file mode 100644
index 0000000..02fae50
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/cover.vert
@@ -0,0 +1,31 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+#extension GL_GOOGLE_include_directive : enable
+
+precision highp float;
+
+#include "common.h"
+
+layout(push_constant) uniform Block {
+ vec4 transform;
+ vec4 uvCoverTransform;
+ vec4 uvTransformR1;
+ vec4 uvTransformR2;
+} _block;
+
+layout(location = 0) in vec2 pos;
+
+layout(location = 0) out vec2 vCoverUV;
+
+layout(location = 1) in vec2 uv;
+layout(location = 1) out vec2 vUV;
+
+void main() {
+ vec2 p = vec2(pos*_block.transform.xy + _block.transform.zw);
+ gl_Position = vec4(transform3x2(windowTransform, vec3(p, 0)), 1);
+ vUV = transform3x2(m3x2(_block.uvTransformR1.xyz, _block.uvTransformR2.xyz), vec3(uv,1)).xy;
+ vec3 uv3 = vec3(uv, 1.0);
+ vCoverUV = (uv3*vec3(_block.uvCoverTransform.xy, 1.0)+vec3(_block.uvCoverTransform.zw, 0.0)).xy;
+}
diff --git a/vendor/gioui.org/shader/gio/gen.go b/vendor/gioui.org/shader/gio/gen.go
new file mode 100644
index 0000000..f26056a
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/gen.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package gio
+
+//go:generate go run ../cmd/convertshaders -package gio -dir .
diff --git a/vendor/gioui.org/shader/gio/input.vert b/vendor/gioui.org/shader/gio/input.vert
new file mode 100644
index 0000000..3d0cd50
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/input.vert
@@ -0,0 +1,15 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+#extension GL_GOOGLE_include_directive : enable
+
+precision highp float;
+
+#include "common.h"
+
+layout(location=0) in vec4 position;
+
+void main() {
+ gl_Position = vec4(transform3x2(windowTransform, position.xyz), position.w);
+}
diff --git a/vendor/gioui.org/shader/gio/intersect.frag b/vendor/gioui.org/shader/gio/intersect.frag
new file mode 100644
index 0000000..21a126f
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/intersect.frag
@@ -0,0 +1,15 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+precision mediump float;
+
+layout(location = 0) in highp vec2 vUV;
+
+layout(binding = 0) uniform sampler2D cover;
+
+layout(location = 0) out vec4 fragColor;
+
+void main() {
+ fragColor.r = abs(texture(cover, vUV).r);
+}
diff --git a/vendor/gioui.org/shader/gio/intersect.vert b/vendor/gioui.org/shader/gio/intersect.vert
new file mode 100644
index 0000000..e7ee2fe
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/intersect.vert
@@ -0,0 +1,26 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+#extension GL_GOOGLE_include_directive : enable
+
+precision highp float;
+
+#include "common.h"
+
+layout(location = 0) in vec2 pos;
+layout(location = 1) in vec2 uv;
+
+layout(push_constant) uniform Block {
+ vec4 uvTransform;
+ vec4 subUVTransform;
+} _block;
+
+layout(location = 0) out vec2 vUV;
+
+void main() {
+ vec3 p = transform3x2(fboTransform, vec3(pos, 1.0));
+ gl_Position = vec4(p, 1);
+ vUV = uv.xy*_block.subUVTransform.xy + _block.subUVTransform.zw;
+ vUV = vUV*_block.uvTransform.xy + _block.uvTransform.zw;
+}
diff --git a/vendor/gioui.org/shader/gio/material.frag b/vendor/gioui.org/shader/gio/material.frag
new file mode 100644
index 0000000..489461e
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/material.frag
@@ -0,0 +1,32 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+precision mediump float;
+
+layout(binding = 0) uniform sampler2D tex;
+
+layout(location = 0) in highp vec2 vUV;
+
+layout(location = 0) out vec4 fragColor;
+
+layout(push_constant) uniform Color {
+ // If emulateSRGB is set (!= 0), the input texels are sRGB encoded. We save the
+ // conversion step below, at the cost of texture filtering in sRGB space.
+ layout(offset=16) float emulateSRGB;
+} _color;
+
+vec3 RGBtosRGB(vec3 rgb) {
+ bvec3 cutoff = greaterThanEqual(rgb, vec3(0.0031308));
+ vec3 below = vec3(12.92)*rgb;
+ vec3 above = vec3(1.055)*pow(rgb, vec3(0.41666)) - vec3(0.055);
+ return mix(below, above, cutoff);
+}
+
+void main() {
+ vec4 texel = texture(tex, vUV);
+ if (_color.emulateSRGB == 0.0) {
+ texel.rgb = RGBtosRGB(texel.rgb);
+ }
+ fragColor = texel;
+}
diff --git a/vendor/gioui.org/shader/gio/material.vert b/vendor/gioui.org/shader/gio/material.vert
new file mode 100644
index 0000000..22c41a0
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/material.vert
@@ -0,0 +1,25 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+#extension GL_GOOGLE_include_directive : enable
+
+precision highp float;
+
+#include "common.h"
+
+layout(push_constant) uniform Block {
+ vec2 scale;
+ vec2 pos;
+} _block;
+
+layout(location = 0) in vec2 pos;
+layout(location = 1) in vec2 uv;
+
+layout(location = 0) out vec2 vUV;
+
+void main() {
+ vUV = uv;
+ vec2 p = vec2(pos*_block.scale + _block.pos);
+ gl_Position = vec4(transform3x2(fboTransform, vec3(p, 0)), 1);
+}
diff --git a/vendor/gioui.org/shader/gio/shaders.go b/vendor/gioui.org/shader/gio/shaders.go
new file mode 100644
index 0000000..1166c1c
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/shaders.go
@@ -0,0 +1,796 @@
+// Code generated by build.go. DO NOT EDIT.
+
+package gio
+
+import (
+ _ "embed"
+ "runtime"
+
+ "gioui.org/shader"
+)
+
+var (
+ Shader_blit_frag = [...]shader.Sources{
+ {
+ Name: "blit.frag",
+ Inputs: []shader.InputLocation{{Name: "vUV", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}},
+ Uniforms: shader.UniformsReflection{
+ Locations: []shader.UniformLocation{{Name: "_color.color", Type: 0x0, Size: 4, Offset: 112}},
+ Size: 16,
+ },
+ },
+ {
+ Name: "blit.frag",
+ Inputs: []shader.InputLocation{{Name: "vUV", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}},
+ Uniforms: shader.UniformsReflection{
+ Locations: []shader.UniformLocation{{Name: "_gradient.color1", Type: 0x0, Size: 4, Offset: 96}, {Name: "_gradient.color2", Type: 0x0, Size: 4, Offset: 112}},
+ Size: 32,
+ },
+ },
+ {
+ Name: "blit.frag",
+ Inputs: []shader.InputLocation{{Name: "vUV", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}},
+ Textures: []shader.TextureBinding{{Name: "tex", Binding: 0}},
+ },
+ }
+ //go:embed zblit.frag.0.spirv
+ zblit_frag_0_spirv string
+ //go:embed zblit.frag.0.glsl100es
+ zblit_frag_0_glsl100es string
+ //go:embed zblit.frag.0.glsl150
+ zblit_frag_0_glsl150 string
+ //go:embed zblit.frag.0.dxbc
+ zblit_frag_0_dxbc string
+ //go:embed zblit.frag.0.metallibmacos
+ zblit_frag_0_metallibmacos string
+ //go:embed zblit.frag.0.metallibios
+ zblit_frag_0_metallibios string
+ //go:embed zblit.frag.0.metallibiossimulator
+ zblit_frag_0_metallibiossimulator string
+ //go:embed zblit.frag.1.spirv
+ zblit_frag_1_spirv string
+ //go:embed zblit.frag.1.glsl100es
+ zblit_frag_1_glsl100es string
+ //go:embed zblit.frag.1.glsl150
+ zblit_frag_1_glsl150 string
+ //go:embed zblit.frag.1.dxbc
+ zblit_frag_1_dxbc string
+ //go:embed zblit.frag.1.metallibmacos
+ zblit_frag_1_metallibmacos string
+ //go:embed zblit.frag.1.metallibios
+ zblit_frag_1_metallibios string
+ //go:embed zblit.frag.1.metallibiossimulator
+ zblit_frag_1_metallibiossimulator string
+ //go:embed zblit.frag.2.spirv
+ zblit_frag_2_spirv string
+ //go:embed zblit.frag.2.glsl100es
+ zblit_frag_2_glsl100es string
+ //go:embed zblit.frag.2.glsl150
+ zblit_frag_2_glsl150 string
+ //go:embed zblit.frag.2.dxbc
+ zblit_frag_2_dxbc string
+ //go:embed zblit.frag.2.metallibmacos
+ zblit_frag_2_metallibmacos string
+ //go:embed zblit.frag.2.metallibios
+ zblit_frag_2_metallibios string
+ //go:embed zblit.frag.2.metallibiossimulator
+ zblit_frag_2_metallibiossimulator string
+ Shader_blit_vert = shader.Sources{
+ Name: "blit.vert",
+ Inputs: []shader.InputLocation{{Name: "pos", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}, {Name: "uv", Location: 1, Semantic: "TEXCOORD", SemanticIndex: 1, Type: 0x0, Size: 2}},
+ Uniforms: shader.UniformsReflection{
+ Locations: []shader.UniformLocation{{Name: "_block.transform", Type: 0x0, Size: 4, Offset: 0}, {Name: "_block.uvTransformR1", Type: 0x0, Size: 4, Offset: 16}, {Name: "_block.uvTransformR2", Type: 0x0, Size: 4, Offset: 32}},
+ Size: 48,
+ },
+ }
+ //go:embed zblit.vert.0.spirv
+ zblit_vert_0_spirv string
+ //go:embed zblit.vert.0.glsl100es
+ zblit_vert_0_glsl100es string
+ //go:embed zblit.vert.0.glsl150
+ zblit_vert_0_glsl150 string
+ //go:embed zblit.vert.0.dxbc
+ zblit_vert_0_dxbc string
+ //go:embed zblit.vert.0.metallibmacos
+ zblit_vert_0_metallibmacos string
+ //go:embed zblit.vert.0.metallibios
+ zblit_vert_0_metallibios string
+ //go:embed zblit.vert.0.metallibiossimulator
+ zblit_vert_0_metallibiossimulator string
+ Shader_copy_frag = shader.Sources{
+ Name: "copy.frag",
+ Inputs: []shader.InputLocation{{Name: "vUV", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}},
+ Textures: []shader.TextureBinding{{Name: "tex", Binding: 0}},
+ }
+ //go:embed zcopy.frag.0.spirv
+ zcopy_frag_0_spirv string
+ //go:embed zcopy.frag.0.glsl100es
+ zcopy_frag_0_glsl100es string
+ //go:embed zcopy.frag.0.glsl150
+ zcopy_frag_0_glsl150 string
+ //go:embed zcopy.frag.0.dxbc
+ zcopy_frag_0_dxbc string
+ //go:embed zcopy.frag.0.metallibmacos
+ zcopy_frag_0_metallibmacos string
+ //go:embed zcopy.frag.0.metallibios
+ zcopy_frag_0_metallibios string
+ //go:embed zcopy.frag.0.metallibiossimulator
+ zcopy_frag_0_metallibiossimulator string
+ Shader_copy_vert = shader.Sources{
+ Name: "copy.vert",
+ Inputs: []shader.InputLocation{{Name: "pos", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}, {Name: "uv", Location: 1, Semantic: "TEXCOORD", SemanticIndex: 1, Type: 0x0, Size: 2}},
+ Uniforms: shader.UniformsReflection{
+ Locations: []shader.UniformLocation{{Name: "_block.scale", Type: 0x0, Size: 2, Offset: 0}, {Name: "_block.pos", Type: 0x0, Size: 2, Offset: 8}, {Name: "_block.uvScale", Type: 0x0, Size: 2, Offset: 16}},
+ Size: 24,
+ },
+ }
+ //go:embed zcopy.vert.0.spirv
+ zcopy_vert_0_spirv string
+ //go:embed zcopy.vert.0.glsl100es
+ zcopy_vert_0_glsl100es string
+ //go:embed zcopy.vert.0.glsl150
+ zcopy_vert_0_glsl150 string
+ //go:embed zcopy.vert.0.dxbc
+ zcopy_vert_0_dxbc string
+ //go:embed zcopy.vert.0.metallibmacos
+ zcopy_vert_0_metallibmacos string
+ //go:embed zcopy.vert.0.metallibios
+ zcopy_vert_0_metallibios string
+ //go:embed zcopy.vert.0.metallibiossimulator
+ zcopy_vert_0_metallibiossimulator string
+ Shader_cover_frag = [...]shader.Sources{
+ {
+ Name: "cover.frag",
+ Inputs: []shader.InputLocation{{Name: "vCoverUV", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}, {Name: "vUV", Location: 1, Semantic: "TEXCOORD", SemanticIndex: 1, Type: 0x0, Size: 2}},
+ Uniforms: shader.UniformsReflection{
+ Locations: []shader.UniformLocation{{Name: "_color.color", Type: 0x0, Size: 4, Offset: 112}},
+ Size: 16,
+ },
+ Textures: []shader.TextureBinding{{Name: "cover", Binding: 1}},
+ },
+ {
+ Name: "cover.frag",
+ Inputs: []shader.InputLocation{{Name: "vCoverUV", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}, {Name: "vUV", Location: 1, Semantic: "TEXCOORD", SemanticIndex: 1, Type: 0x0, Size: 2}},
+ Uniforms: shader.UniformsReflection{
+ Locations: []shader.UniformLocation{{Name: "_gradient.color1", Type: 0x0, Size: 4, Offset: 96}, {Name: "_gradient.color2", Type: 0x0, Size: 4, Offset: 112}},
+ Size: 32,
+ },
+ Textures: []shader.TextureBinding{{Name: "cover", Binding: 1}},
+ },
+ {
+ Name: "cover.frag",
+ Inputs: []shader.InputLocation{{Name: "vCoverUV", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}, {Name: "vUV", Location: 1, Semantic: "TEXCOORD", SemanticIndex: 1, Type: 0x0, Size: 2}},
+ Textures: []shader.TextureBinding{{Name: "tex", Binding: 0}, {Name: "cover", Binding: 1}},
+ },
+ }
+ //go:embed zcover.frag.0.spirv
+ zcover_frag_0_spirv string
+ //go:embed zcover.frag.0.glsl100es
+ zcover_frag_0_glsl100es string
+ //go:embed zcover.frag.0.glsl150
+ zcover_frag_0_glsl150 string
+ //go:embed zcover.frag.0.dxbc
+ zcover_frag_0_dxbc string
+ //go:embed zcover.frag.0.metallibmacos
+ zcover_frag_0_metallibmacos string
+ //go:embed zcover.frag.0.metallibios
+ zcover_frag_0_metallibios string
+ //go:embed zcover.frag.0.metallibiossimulator
+ zcover_frag_0_metallibiossimulator string
+ //go:embed zcover.frag.1.spirv
+ zcover_frag_1_spirv string
+ //go:embed zcover.frag.1.glsl100es
+ zcover_frag_1_glsl100es string
+ //go:embed zcover.frag.1.glsl150
+ zcover_frag_1_glsl150 string
+ //go:embed zcover.frag.1.dxbc
+ zcover_frag_1_dxbc string
+ //go:embed zcover.frag.1.metallibmacos
+ zcover_frag_1_metallibmacos string
+ //go:embed zcover.frag.1.metallibios
+ zcover_frag_1_metallibios string
+ //go:embed zcover.frag.1.metallibiossimulator
+ zcover_frag_1_metallibiossimulator string
+ //go:embed zcover.frag.2.spirv
+ zcover_frag_2_spirv string
+ //go:embed zcover.frag.2.glsl100es
+ zcover_frag_2_glsl100es string
+ //go:embed zcover.frag.2.glsl150
+ zcover_frag_2_glsl150 string
+ //go:embed zcover.frag.2.dxbc
+ zcover_frag_2_dxbc string
+ //go:embed zcover.frag.2.metallibmacos
+ zcover_frag_2_metallibmacos string
+ //go:embed zcover.frag.2.metallibios
+ zcover_frag_2_metallibios string
+ //go:embed zcover.frag.2.metallibiossimulator
+ zcover_frag_2_metallibiossimulator string
+ Shader_cover_vert = shader.Sources{
+ Name: "cover.vert",
+ Inputs: []shader.InputLocation{{Name: "pos", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}, {Name: "uv", Location: 1, Semantic: "TEXCOORD", SemanticIndex: 1, Type: 0x0, Size: 2}},
+ Uniforms: shader.UniformsReflection{
+ Locations: []shader.UniformLocation{{Name: "_block.transform", Type: 0x0, Size: 4, Offset: 0}, {Name: "_block.uvCoverTransform", Type: 0x0, Size: 4, Offset: 16}, {Name: "_block.uvTransformR1", Type: 0x0, Size: 4, Offset: 32}, {Name: "_block.uvTransformR2", Type: 0x0, Size: 4, Offset: 48}},
+ Size: 64,
+ },
+ }
+ //go:embed zcover.vert.0.spirv
+ zcover_vert_0_spirv string
+ //go:embed zcover.vert.0.glsl100es
+ zcover_vert_0_glsl100es string
+ //go:embed zcover.vert.0.glsl150
+ zcover_vert_0_glsl150 string
+ //go:embed zcover.vert.0.dxbc
+ zcover_vert_0_dxbc string
+ //go:embed zcover.vert.0.metallibmacos
+ zcover_vert_0_metallibmacos string
+ //go:embed zcover.vert.0.metallibios
+ zcover_vert_0_metallibios string
+ //go:embed zcover.vert.0.metallibiossimulator
+ zcover_vert_0_metallibiossimulator string
+ Shader_input_vert = shader.Sources{
+ Name: "input.vert",
+ Inputs: []shader.InputLocation{{Name: "position", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 4}},
+ }
+ //go:embed zinput.vert.0.spirv
+ zinput_vert_0_spirv string
+ //go:embed zinput.vert.0.glsl100es
+ zinput_vert_0_glsl100es string
+ //go:embed zinput.vert.0.glsl150
+ zinput_vert_0_glsl150 string
+ //go:embed zinput.vert.0.dxbc
+ zinput_vert_0_dxbc string
+ //go:embed zinput.vert.0.metallibmacos
+ zinput_vert_0_metallibmacos string
+ //go:embed zinput.vert.0.metallibios
+ zinput_vert_0_metallibios string
+ //go:embed zinput.vert.0.metallibiossimulator
+ zinput_vert_0_metallibiossimulator string
+ Shader_intersect_frag = shader.Sources{
+ Name: "intersect.frag",
+ Inputs: []shader.InputLocation{{Name: "vUV", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}},
+ Textures: []shader.TextureBinding{{Name: "cover", Binding: 0}},
+ }
+ //go:embed zintersect.frag.0.spirv
+ zintersect_frag_0_spirv string
+ //go:embed zintersect.frag.0.glsl100es
+ zintersect_frag_0_glsl100es string
+ //go:embed zintersect.frag.0.glsl150
+ zintersect_frag_0_glsl150 string
+ //go:embed zintersect.frag.0.dxbc
+ zintersect_frag_0_dxbc string
+ //go:embed zintersect.frag.0.metallibmacos
+ zintersect_frag_0_metallibmacos string
+ //go:embed zintersect.frag.0.metallibios
+ zintersect_frag_0_metallibios string
+ //go:embed zintersect.frag.0.metallibiossimulator
+ zintersect_frag_0_metallibiossimulator string
+ Shader_intersect_vert = shader.Sources{
+ Name: "intersect.vert",
+ Inputs: []shader.InputLocation{{Name: "pos", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}, {Name: "uv", Location: 1, Semantic: "TEXCOORD", SemanticIndex: 1, Type: 0x0, Size: 2}},
+ Uniforms: shader.UniformsReflection{
+ Locations: []shader.UniformLocation{{Name: "_block.uvTransform", Type: 0x0, Size: 4, Offset: 0}, {Name: "_block.subUVTransform", Type: 0x0, Size: 4, Offset: 16}},
+ Size: 32,
+ },
+ }
+ //go:embed zintersect.vert.0.spirv
+ zintersect_vert_0_spirv string
+ //go:embed zintersect.vert.0.glsl100es
+ zintersect_vert_0_glsl100es string
+ //go:embed zintersect.vert.0.glsl150
+ zintersect_vert_0_glsl150 string
+ //go:embed zintersect.vert.0.dxbc
+ zintersect_vert_0_dxbc string
+ //go:embed zintersect.vert.0.metallibmacos
+ zintersect_vert_0_metallibmacos string
+ //go:embed zintersect.vert.0.metallibios
+ zintersect_vert_0_metallibios string
+ //go:embed zintersect.vert.0.metallibiossimulator
+ zintersect_vert_0_metallibiossimulator string
+ Shader_material_frag = shader.Sources{
+ Name: "material.frag",
+ Inputs: []shader.InputLocation{{Name: "vUV", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}},
+ Uniforms: shader.UniformsReflection{
+ Locations: []shader.UniformLocation{{Name: "_color.emulateSRGB", Type: 0x0, Size: 1, Offset: 16}},
+ Size: 4,
+ },
+ Textures: []shader.TextureBinding{{Name: "tex", Binding: 0}},
+ }
+ //go:embed zmaterial.frag.0.spirv
+ zmaterial_frag_0_spirv string
+ //go:embed zmaterial.frag.0.glsl100es
+ zmaterial_frag_0_glsl100es string
+ //go:embed zmaterial.frag.0.glsl150
+ zmaterial_frag_0_glsl150 string
+ //go:embed zmaterial.frag.0.dxbc
+ zmaterial_frag_0_dxbc string
+ //go:embed zmaterial.frag.0.metallibmacos
+ zmaterial_frag_0_metallibmacos string
+ //go:embed zmaterial.frag.0.metallibios
+ zmaterial_frag_0_metallibios string
+ //go:embed zmaterial.frag.0.metallibiossimulator
+ zmaterial_frag_0_metallibiossimulator string
+ Shader_material_vert = shader.Sources{
+ Name: "material.vert",
+ Inputs: []shader.InputLocation{{Name: "pos", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}, {Name: "uv", Location: 1, Semantic: "TEXCOORD", SemanticIndex: 1, Type: 0x0, Size: 2}},
+ Uniforms: shader.UniformsReflection{
+ Locations: []shader.UniformLocation{{Name: "_block.scale", Type: 0x0, Size: 2, Offset: 0}, {Name: "_block.pos", Type: 0x0, Size: 2, Offset: 8}},
+ Size: 16,
+ },
+ }
+ //go:embed zmaterial.vert.0.spirv
+ zmaterial_vert_0_spirv string
+ //go:embed zmaterial.vert.0.glsl100es
+ zmaterial_vert_0_glsl100es string
+ //go:embed zmaterial.vert.0.glsl150
+ zmaterial_vert_0_glsl150 string
+ //go:embed zmaterial.vert.0.dxbc
+ zmaterial_vert_0_dxbc string
+ //go:embed zmaterial.vert.0.metallibmacos
+ zmaterial_vert_0_metallibmacos string
+ //go:embed zmaterial.vert.0.metallibios
+ zmaterial_vert_0_metallibios string
+ //go:embed zmaterial.vert.0.metallibiossimulator
+ zmaterial_vert_0_metallibiossimulator string
+ Shader_simple_frag = shader.Sources{
+ Name: "simple.frag",
+ }
+ //go:embed zsimple.frag.0.spirv
+ zsimple_frag_0_spirv string
+ //go:embed zsimple.frag.0.glsl100es
+ zsimple_frag_0_glsl100es string
+ //go:embed zsimple.frag.0.glsl150
+ zsimple_frag_0_glsl150 string
+ //go:embed zsimple.frag.0.dxbc
+ zsimple_frag_0_dxbc string
+ //go:embed zsimple.frag.0.metallibmacos
+ zsimple_frag_0_metallibmacos string
+ //go:embed zsimple.frag.0.metallibios
+ zsimple_frag_0_metallibios string
+ //go:embed zsimple.frag.0.metallibiossimulator
+ zsimple_frag_0_metallibiossimulator string
+ Shader_stencil_frag = shader.Sources{
+ Name: "stencil.frag",
+ Inputs: []shader.InputLocation{{Name: "vFrom", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 2}, {Name: "vCtrl", Location: 1, Semantic: "TEXCOORD", SemanticIndex: 1, Type: 0x0, Size: 2}, {Name: "vTo", Location: 2, Semantic: "TEXCOORD", SemanticIndex: 2, Type: 0x0, Size: 2}},
+ }
+ //go:embed zstencil.frag.0.spirv
+ zstencil_frag_0_spirv string
+ //go:embed zstencil.frag.0.glsl100es
+ zstencil_frag_0_glsl100es string
+ //go:embed zstencil.frag.0.glsl150
+ zstencil_frag_0_glsl150 string
+ //go:embed zstencil.frag.0.dxbc
+ zstencil_frag_0_dxbc string
+ //go:embed zstencil.frag.0.metallibmacos
+ zstencil_frag_0_metallibmacos string
+ //go:embed zstencil.frag.0.metallibios
+ zstencil_frag_0_metallibios string
+ //go:embed zstencil.frag.0.metallibiossimulator
+ zstencil_frag_0_metallibiossimulator string
+ Shader_stencil_vert = shader.Sources{
+ Name: "stencil.vert",
+ Inputs: []shader.InputLocation{{Name: "corner", Location: 0, Semantic: "TEXCOORD", SemanticIndex: 0, Type: 0x0, Size: 1}, {Name: "maxy", Location: 1, Semantic: "TEXCOORD", SemanticIndex: 1, Type: 0x0, Size: 1}, {Name: "from", Location: 2, Semantic: "TEXCOORD", SemanticIndex: 2, Type: 0x0, Size: 2}, {Name: "ctrl", Location: 3, Semantic: "TEXCOORD", SemanticIndex: 3, Type: 0x0, Size: 2}, {Name: "to", Location: 4, Semantic: "TEXCOORD", SemanticIndex: 4, Type: 0x0, Size: 2}},
+ Uniforms: shader.UniformsReflection{
+ Locations: []shader.UniformLocation{{Name: "_block.transform", Type: 0x0, Size: 4, Offset: 0}, {Name: "_block.pathOffset", Type: 0x0, Size: 2, Offset: 16}},
+ Size: 24,
+ },
+ }
+ //go:embed zstencil.vert.0.spirv
+ zstencil_vert_0_spirv string
+ //go:embed zstencil.vert.0.glsl100es
+ zstencil_vert_0_glsl100es string
+ //go:embed zstencil.vert.0.glsl150
+ zstencil_vert_0_glsl150 string
+ //go:embed zstencil.vert.0.dxbc
+ zstencil_vert_0_dxbc string
+ //go:embed zstencil.vert.0.metallibmacos
+ zstencil_vert_0_metallibmacos string
+ //go:embed zstencil.vert.0.metallibios
+ zstencil_vert_0_metallibios string
+ //go:embed zstencil.vert.0.metallibiossimulator
+ zstencil_vert_0_metallibiossimulator string
+)
+
+func init() {
+ const (
+ opengles = runtime.GOOS == "linux" || runtime.GOOS == "freebsd" || runtime.GOOS == "openbsd" || runtime.GOOS == "windows" || runtime.GOOS == "js" || runtime.GOOS == "android" || runtime.GOOS == "darwin" || runtime.GOOS == "ios"
+ opengl = runtime.GOOS == "darwin"
+ d3d11 = runtime.GOOS == "windows"
+ vulkan = runtime.GOOS == "linux" || runtime.GOOS == "android"
+ )
+ if vulkan {
+ Shader_blit_frag[0].SPIRV = zblit_frag_0_spirv
+ }
+ if opengles {
+ Shader_blit_frag[0].GLSL100ES = zblit_frag_0_glsl100es
+ }
+ if opengl {
+ Shader_blit_frag[0].GLSL150 = zblit_frag_0_glsl150
+ }
+ if d3d11 {
+ Shader_blit_frag[0].DXBC = zblit_frag_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_blit_frag[0].MetalLib = zblit_frag_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_blit_frag[0].MetalLib = zblit_frag_0_metallibiossimulator
+ } else {
+ Shader_blit_frag[0].MetalLib = zblit_frag_0_metallibios
+ }
+ }
+ if vulkan {
+ Shader_blit_frag[1].SPIRV = zblit_frag_1_spirv
+ }
+ if opengles {
+ Shader_blit_frag[1].GLSL100ES = zblit_frag_1_glsl100es
+ }
+ if opengl {
+ Shader_blit_frag[1].GLSL150 = zblit_frag_1_glsl150
+ }
+ if d3d11 {
+ Shader_blit_frag[1].DXBC = zblit_frag_1_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_blit_frag[1].MetalLib = zblit_frag_1_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_blit_frag[1].MetalLib = zblit_frag_1_metallibiossimulator
+ } else {
+ Shader_blit_frag[1].MetalLib = zblit_frag_1_metallibios
+ }
+ }
+ if vulkan {
+ Shader_blit_frag[2].SPIRV = zblit_frag_2_spirv
+ }
+ if opengles {
+ Shader_blit_frag[2].GLSL100ES = zblit_frag_2_glsl100es
+ }
+ if opengl {
+ Shader_blit_frag[2].GLSL150 = zblit_frag_2_glsl150
+ }
+ if d3d11 {
+ Shader_blit_frag[2].DXBC = zblit_frag_2_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_blit_frag[2].MetalLib = zblit_frag_2_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_blit_frag[2].MetalLib = zblit_frag_2_metallibiossimulator
+ } else {
+ Shader_blit_frag[2].MetalLib = zblit_frag_2_metallibios
+ }
+ }
+ if vulkan {
+ Shader_blit_vert.SPIRV = zblit_vert_0_spirv
+ }
+ if opengles {
+ Shader_blit_vert.GLSL100ES = zblit_vert_0_glsl100es
+ }
+ if opengl {
+ Shader_blit_vert.GLSL150 = zblit_vert_0_glsl150
+ }
+ if d3d11 {
+ Shader_blit_vert.DXBC = zblit_vert_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_blit_vert.MetalLib = zblit_vert_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_blit_vert.MetalLib = zblit_vert_0_metallibiossimulator
+ } else {
+ Shader_blit_vert.MetalLib = zblit_vert_0_metallibios
+ }
+ }
+ if vulkan {
+ Shader_copy_frag.SPIRV = zcopy_frag_0_spirv
+ }
+ if opengles {
+ Shader_copy_frag.GLSL100ES = zcopy_frag_0_glsl100es
+ }
+ if opengl {
+ Shader_copy_frag.GLSL150 = zcopy_frag_0_glsl150
+ }
+ if d3d11 {
+ Shader_copy_frag.DXBC = zcopy_frag_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_copy_frag.MetalLib = zcopy_frag_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_copy_frag.MetalLib = zcopy_frag_0_metallibiossimulator
+ } else {
+ Shader_copy_frag.MetalLib = zcopy_frag_0_metallibios
+ }
+ }
+ if vulkan {
+ Shader_copy_vert.SPIRV = zcopy_vert_0_spirv
+ }
+ if opengles {
+ Shader_copy_vert.GLSL100ES = zcopy_vert_0_glsl100es
+ }
+ if opengl {
+ Shader_copy_vert.GLSL150 = zcopy_vert_0_glsl150
+ }
+ if d3d11 {
+ Shader_copy_vert.DXBC = zcopy_vert_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_copy_vert.MetalLib = zcopy_vert_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_copy_vert.MetalLib = zcopy_vert_0_metallibiossimulator
+ } else {
+ Shader_copy_vert.MetalLib = zcopy_vert_0_metallibios
+ }
+ }
+ if vulkan {
+ Shader_cover_frag[0].SPIRV = zcover_frag_0_spirv
+ }
+ if opengles {
+ Shader_cover_frag[0].GLSL100ES = zcover_frag_0_glsl100es
+ }
+ if opengl {
+ Shader_cover_frag[0].GLSL150 = zcover_frag_0_glsl150
+ }
+ if d3d11 {
+ Shader_cover_frag[0].DXBC = zcover_frag_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_cover_frag[0].MetalLib = zcover_frag_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_cover_frag[0].MetalLib = zcover_frag_0_metallibiossimulator
+ } else {
+ Shader_cover_frag[0].MetalLib = zcover_frag_0_metallibios
+ }
+ }
+ if vulkan {
+ Shader_cover_frag[1].SPIRV = zcover_frag_1_spirv
+ }
+ if opengles {
+ Shader_cover_frag[1].GLSL100ES = zcover_frag_1_glsl100es
+ }
+ if opengl {
+ Shader_cover_frag[1].GLSL150 = zcover_frag_1_glsl150
+ }
+ if d3d11 {
+ Shader_cover_frag[1].DXBC = zcover_frag_1_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_cover_frag[1].MetalLib = zcover_frag_1_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_cover_frag[1].MetalLib = zcover_frag_1_metallibiossimulator
+ } else {
+ Shader_cover_frag[1].MetalLib = zcover_frag_1_metallibios
+ }
+ }
+ if vulkan {
+ Shader_cover_frag[2].SPIRV = zcover_frag_2_spirv
+ }
+ if opengles {
+ Shader_cover_frag[2].GLSL100ES = zcover_frag_2_glsl100es
+ }
+ if opengl {
+ Shader_cover_frag[2].GLSL150 = zcover_frag_2_glsl150
+ }
+ if d3d11 {
+ Shader_cover_frag[2].DXBC = zcover_frag_2_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_cover_frag[2].MetalLib = zcover_frag_2_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_cover_frag[2].MetalLib = zcover_frag_2_metallibiossimulator
+ } else {
+ Shader_cover_frag[2].MetalLib = zcover_frag_2_metallibios
+ }
+ }
+ if vulkan {
+ Shader_cover_vert.SPIRV = zcover_vert_0_spirv
+ }
+ if opengles {
+ Shader_cover_vert.GLSL100ES = zcover_vert_0_glsl100es
+ }
+ if opengl {
+ Shader_cover_vert.GLSL150 = zcover_vert_0_glsl150
+ }
+ if d3d11 {
+ Shader_cover_vert.DXBC = zcover_vert_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_cover_vert.MetalLib = zcover_vert_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_cover_vert.MetalLib = zcover_vert_0_metallibiossimulator
+ } else {
+ Shader_cover_vert.MetalLib = zcover_vert_0_metallibios
+ }
+ }
+ if vulkan {
+ Shader_input_vert.SPIRV = zinput_vert_0_spirv
+ }
+ if opengles {
+ Shader_input_vert.GLSL100ES = zinput_vert_0_glsl100es
+ }
+ if opengl {
+ Shader_input_vert.GLSL150 = zinput_vert_0_glsl150
+ }
+ if d3d11 {
+ Shader_input_vert.DXBC = zinput_vert_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_input_vert.MetalLib = zinput_vert_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_input_vert.MetalLib = zinput_vert_0_metallibiossimulator
+ } else {
+ Shader_input_vert.MetalLib = zinput_vert_0_metallibios
+ }
+ }
+ if vulkan {
+ Shader_intersect_frag.SPIRV = zintersect_frag_0_spirv
+ }
+ if opengles {
+ Shader_intersect_frag.GLSL100ES = zintersect_frag_0_glsl100es
+ }
+ if opengl {
+ Shader_intersect_frag.GLSL150 = zintersect_frag_0_glsl150
+ }
+ if d3d11 {
+ Shader_intersect_frag.DXBC = zintersect_frag_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_intersect_frag.MetalLib = zintersect_frag_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_intersect_frag.MetalLib = zintersect_frag_0_metallibiossimulator
+ } else {
+ Shader_intersect_frag.MetalLib = zintersect_frag_0_metallibios
+ }
+ }
+ if vulkan {
+ Shader_intersect_vert.SPIRV = zintersect_vert_0_spirv
+ }
+ if opengles {
+ Shader_intersect_vert.GLSL100ES = zintersect_vert_0_glsl100es
+ }
+ if opengl {
+ Shader_intersect_vert.GLSL150 = zintersect_vert_0_glsl150
+ }
+ if d3d11 {
+ Shader_intersect_vert.DXBC = zintersect_vert_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_intersect_vert.MetalLib = zintersect_vert_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_intersect_vert.MetalLib = zintersect_vert_0_metallibiossimulator
+ } else {
+ Shader_intersect_vert.MetalLib = zintersect_vert_0_metallibios
+ }
+ }
+ if vulkan {
+ Shader_material_frag.SPIRV = zmaterial_frag_0_spirv
+ }
+ if opengles {
+ Shader_material_frag.GLSL100ES = zmaterial_frag_0_glsl100es
+ }
+ if opengl {
+ Shader_material_frag.GLSL150 = zmaterial_frag_0_glsl150
+ }
+ if d3d11 {
+ Shader_material_frag.DXBC = zmaterial_frag_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_material_frag.MetalLib = zmaterial_frag_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_material_frag.MetalLib = zmaterial_frag_0_metallibiossimulator
+ } else {
+ Shader_material_frag.MetalLib = zmaterial_frag_0_metallibios
+ }
+ }
+ if vulkan {
+ Shader_material_vert.SPIRV = zmaterial_vert_0_spirv
+ }
+ if opengles {
+ Shader_material_vert.GLSL100ES = zmaterial_vert_0_glsl100es
+ }
+ if opengl {
+ Shader_material_vert.GLSL150 = zmaterial_vert_0_glsl150
+ }
+ if d3d11 {
+ Shader_material_vert.DXBC = zmaterial_vert_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_material_vert.MetalLib = zmaterial_vert_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_material_vert.MetalLib = zmaterial_vert_0_metallibiossimulator
+ } else {
+ Shader_material_vert.MetalLib = zmaterial_vert_0_metallibios
+ }
+ }
+ if vulkan {
+ Shader_simple_frag.SPIRV = zsimple_frag_0_spirv
+ }
+ if opengles {
+ Shader_simple_frag.GLSL100ES = zsimple_frag_0_glsl100es
+ }
+ if opengl {
+ Shader_simple_frag.GLSL150 = zsimple_frag_0_glsl150
+ }
+ if d3d11 {
+ Shader_simple_frag.DXBC = zsimple_frag_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_simple_frag.MetalLib = zsimple_frag_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_simple_frag.MetalLib = zsimple_frag_0_metallibiossimulator
+ } else {
+ Shader_simple_frag.MetalLib = zsimple_frag_0_metallibios
+ }
+ }
+ if vulkan {
+ Shader_stencil_frag.SPIRV = zstencil_frag_0_spirv
+ }
+ if opengles {
+ Shader_stencil_frag.GLSL100ES = zstencil_frag_0_glsl100es
+ }
+ if opengl {
+ Shader_stencil_frag.GLSL150 = zstencil_frag_0_glsl150
+ }
+ if d3d11 {
+ Shader_stencil_frag.DXBC = zstencil_frag_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_stencil_frag.MetalLib = zstencil_frag_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_stencil_frag.MetalLib = zstencil_frag_0_metallibiossimulator
+ } else {
+ Shader_stencil_frag.MetalLib = zstencil_frag_0_metallibios
+ }
+ }
+ if vulkan {
+ Shader_stencil_vert.SPIRV = zstencil_vert_0_spirv
+ }
+ if opengles {
+ Shader_stencil_vert.GLSL100ES = zstencil_vert_0_glsl100es
+ }
+ if opengl {
+ Shader_stencil_vert.GLSL150 = zstencil_vert_0_glsl150
+ }
+ if d3d11 {
+ Shader_stencil_vert.DXBC = zstencil_vert_0_dxbc
+ }
+ if runtime.GOOS == "darwin" {
+ Shader_stencil_vert.MetalLib = zstencil_vert_0_metallibmacos
+ }
+ if runtime.GOOS == "ios" {
+ if runtime.GOARCH == "amd64" {
+ Shader_stencil_vert.MetalLib = zstencil_vert_0_metallibiossimulator
+ } else {
+ Shader_stencil_vert.MetalLib = zstencil_vert_0_metallibios
+ }
+ }
+}
diff --git a/vendor/gioui.org/shader/gio/simple.frag b/vendor/gioui.org/shader/gio/simple.frag
new file mode 100644
index 0000000..4614f33
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/simple.frag
@@ -0,0 +1,11 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+precision mediump float;
+
+layout(location = 0) out vec4 fragColor;
+
+void main() {
+ fragColor = vec4(.25, .55, .75, 1.0);
+}
diff --git a/vendor/gioui.org/shader/gio/stencil.frag b/vendor/gioui.org/shader/gio/stencil.frag
new file mode 100644
index 0000000..956dae8
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/stencil.frag
@@ -0,0 +1,81 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+precision mediump float;
+
+layout(location=0) in highp vec2 vFrom;
+layout(location=1) in highp vec2 vCtrl;
+layout(location=2) in highp vec2 vTo;
+
+layout(location = 0) out vec4 fragCover;
+
+void main() {
+ float dx = vTo.x - vFrom.x;
+ // Sort from and to in increasing order so the root below
+ // is always the positive square root, if any.
+ // We need the direction of the curve below, so this can't be
+ // done from the vertex shader.
+ bool increasing = vTo.x >= vFrom.x;
+ vec2 left = increasing ? vFrom : vTo;
+ vec2 right = increasing ? vTo : vFrom;
+
+ // The signed horizontal extent of the fragment.
+ vec2 extent = clamp(vec2(vFrom.x, vTo.x), -0.5, 0.5);
+ // Find the t where the curve crosses the middle of the
+ // extent, x₀.
+ // Given the Bézier curve with x coordinates P₀, P₁, P₂
+ // where P₀ is at the origin, its x coordinate in t
+ // is given by:
+ //
+ // x(t) = 2(1-t)tP₁ + t²P₂
+ //
+ // Rearranging:
+ //
+ // x(t) = (P₂ - 2P₁)t² + 2P₁t
+ //
+ // Setting x(t) = x₀ and using Muller's quadratic formula ("Citardauq")
+ // for robustnesss,
+ //
+ // t = 2x₀/(2P₁±√(4P₁²+4(P₂-2P₁)x₀))
+ //
+ // which simplifies to
+ //
+ // t = x₀/(P₁±√(P₁²+(P₂-2P₁)x₀))
+ //
+ // Setting v = P₂-P₁,
+ //
+ // t = x₀/(P₁±√(P₁²+(v-P₁)x₀))
+ //
+ // t lie in [0; 1]; P₂ ≥ P₁ and P₁ ≥ 0 since we split curves where
+ // the control point lies before the start point or after the end point.
+ // It can then be shown that only the positive square root is valid.
+ float midx = mix(extent.x, extent.y, 0.5);
+ float x0 = midx - left.x;
+ vec2 p1 = vCtrl - left;
+ vec2 v = right - vCtrl;
+ float t = x0/(p1.x+sqrt(p1.x*p1.x+(v.x-p1.x)*x0));
+ // Find y(t) on the curve.
+ float y = mix(mix(left.y, vCtrl.y, t), mix(vCtrl.y, right.y, t), t);
+ // And the slope.
+ vec2 d_half = mix(p1, v, t);
+ float dy = d_half.y/d_half.x;
+ // Together, y and dy form a line approximation.
+
+ // Compute the fragment area above the line.
+ // The area is symmetric around dy = 0. Scale slope with extent width.
+ float width = extent.y - extent.x;
+ dy = abs(dy*width);
+
+ vec4 sides = vec4(dy*+0.5 + y, dy*-0.5 + y, (+0.5-y)/dy, (-0.5-y)/dy);
+ sides = clamp(sides+0.5, 0.0, 1.0);
+
+ float area = 0.5*(sides.z - sides.z*sides.y + 1.0 - sides.x+sides.x*sides.w);
+ area *= width;
+
+ // Work around issue #13.
+ if (width == 0.0)
+ area = 0.0;
+
+ fragCover.r = area;
+}
diff --git a/vendor/gioui.org/shader/gio/stencil.vert b/vendor/gioui.org/shader/gio/stencil.vert
new file mode 100644
index 0000000..fd6f28b
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/stencil.vert
@@ -0,0 +1,57 @@
+#version 310 es
+
+// SPDX-License-Identifier: Unlicense OR MIT
+
+#extension GL_GOOGLE_include_directive : enable
+
+precision highp float;
+
+#include "common.h"
+
+layout(push_constant) uniform Block {
+ vec4 transform;
+ vec2 pathOffset;
+} _block;
+
+layout(location=0) in float corner;
+layout(location=1) in float maxy;
+layout(location=2) in vec2 from;
+layout(location=3) in vec2 ctrl;
+layout(location=4) in vec2 to;
+
+layout(location=0) out vec2 vFrom;
+layout(location=1) out vec2 vCtrl;
+layout(location=2) out vec2 vTo;
+
+void main() {
+ // Add a one pixel overlap so curve quads cover their
+ // entire curves. Could use conservative rasterization
+ // if available.
+ vec2 from = from + _block.pathOffset;
+ vec2 ctrl = ctrl + _block.pathOffset;
+ vec2 to = to + _block.pathOffset;
+ float maxy = maxy + _block.pathOffset.y;
+ vec2 pos;
+ float c = corner;
+ if (c >= 0.375) {
+ // North.
+ c -= 0.5;
+ pos.y = maxy + 1.0;
+ } else {
+ // South.
+ pos.y = min(min(from.y, ctrl.y), to.y) - 1.0;
+ }
+ if (c >= 0.125) {
+ // East.
+ pos.x = max(max(from.x, ctrl.x), to.x)+1.0;
+ } else {
+ // West.
+ pos.x = min(min(from.x, ctrl.x), to.x)-1.0;
+ }
+ vFrom = from-pos;
+ vCtrl = ctrl-pos;
+ vTo = to-pos;
+ pos = pos*_block.transform.xy + _block.transform.zw;
+ gl_Position = vec4(transform3x2(fboTransform, vec3(pos, 0)), 1);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.0.dxbc b/vendor/gioui.org/shader/gio/zblit.frag.0.dxbc
new file mode 100644
index 0000000..45e8355
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.0.glsl100es b/vendor/gioui.org/shader/gio/zblit.frag.0.glsl100es
new file mode 100644
index 0000000..d7ca41a
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zblit.frag.0.glsl100es
@@ -0,0 +1,18 @@
+#version 100
+precision mediump float;
+precision highp int;
+
+struct Color
+{
+ vec4 color;
+};
+
+uniform Color _color;
+
+varying highp vec2 vUV;
+
+void main()
+{
+ gl_FragData[0] = _color.color;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.0.glsl150 b/vendor/gioui.org/shader/gio/zblit.frag.0.glsl150
new file mode 100644
index 0000000..9559c85
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zblit.frag.0.glsl150
@@ -0,0 +1,17 @@
+#version 150
+
+struct Color
+{
+ vec4 color;
+};
+
+uniform Color _color;
+
+out vec4 fragColor;
+in vec2 vUV;
+
+void main()
+{
+ fragColor = _color.color;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.0.metallibios b/vendor/gioui.org/shader/gio/zblit.frag.0.metallibios
new file mode 100644
index 0000000..63e606f
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zblit.frag.0.metallibiossimulator
new file mode 100644
index 0000000..d8d2589
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.0.metallibmacos b/vendor/gioui.org/shader/gio/zblit.frag.0.metallibmacos
new file mode 100644
index 0000000..85946aa
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.0.spirv b/vendor/gioui.org/shader/gio/zblit.frag.0.spirv
new file mode 100644
index 0000000..c2a88d2
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.0.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.1.dxbc b/vendor/gioui.org/shader/gio/zblit.frag.1.dxbc
new file mode 100644
index 0000000..ddb8dad
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.1.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.1.glsl100es b/vendor/gioui.org/shader/gio/zblit.frag.1.glsl100es
new file mode 100644
index 0000000..ccc6d2a
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zblit.frag.1.glsl100es
@@ -0,0 +1,19 @@
+#version 100
+precision mediump float;
+precision highp int;
+
+struct Gradient
+{
+ vec4 color1;
+ vec4 color2;
+};
+
+uniform Gradient _gradient;
+
+varying highp vec2 vUV;
+
+void main()
+{
+ gl_FragData[0] = mix(_gradient.color1, _gradient.color2, vec4(clamp(vUV.x, 0.0, 1.0)));
+}
+
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.1.glsl150 b/vendor/gioui.org/shader/gio/zblit.frag.1.glsl150
new file mode 100644
index 0000000..a55f29f
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zblit.frag.1.glsl150
@@ -0,0 +1,18 @@
+#version 150
+
+struct Gradient
+{
+ vec4 color1;
+ vec4 color2;
+};
+
+uniform Gradient _gradient;
+
+out vec4 fragColor;
+in vec2 vUV;
+
+void main()
+{
+ fragColor = mix(_gradient.color1, _gradient.color2, vec4(clamp(vUV.x, 0.0, 1.0)));
+}
+
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.1.metallibios b/vendor/gioui.org/shader/gio/zblit.frag.1.metallibios
new file mode 100644
index 0000000..680c495
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.1.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.1.metallibiossimulator b/vendor/gioui.org/shader/gio/zblit.frag.1.metallibiossimulator
new file mode 100644
index 0000000..ef5ac86
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.1.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.1.metallibmacos b/vendor/gioui.org/shader/gio/zblit.frag.1.metallibmacos
new file mode 100644
index 0000000..5fbd2c2
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.1.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.1.spirv b/vendor/gioui.org/shader/gio/zblit.frag.1.spirv
new file mode 100644
index 0000000..88128fc
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.1.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.2.dxbc b/vendor/gioui.org/shader/gio/zblit.frag.2.dxbc
new file mode 100644
index 0000000..fd95db8
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.2.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.2.glsl100es b/vendor/gioui.org/shader/gio/zblit.frag.2.glsl100es
new file mode 100644
index 0000000..5c2d832
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zblit.frag.2.glsl100es
@@ -0,0 +1,13 @@
+#version 100
+precision mediump float;
+precision highp int;
+
+uniform mediump sampler2D tex;
+
+varying highp vec2 vUV;
+
+void main()
+{
+ gl_FragData[0] = texture2D(tex, vUV);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.2.glsl150 b/vendor/gioui.org/shader/gio/zblit.frag.2.glsl150
new file mode 100644
index 0000000..f5df869
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zblit.frag.2.glsl150
@@ -0,0 +1,12 @@
+#version 150
+
+uniform sampler2D tex;
+
+out vec4 fragColor;
+in vec2 vUV;
+
+void main()
+{
+ fragColor = texture(tex, vUV);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.2.metallibios b/vendor/gioui.org/shader/gio/zblit.frag.2.metallibios
new file mode 100644
index 0000000..c4eb922
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.2.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.2.metallibiossimulator b/vendor/gioui.org/shader/gio/zblit.frag.2.metallibiossimulator
new file mode 100644
index 0000000..81f7aa3
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.2.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.2.metallibmacos b/vendor/gioui.org/shader/gio/zblit.frag.2.metallibmacos
new file mode 100644
index 0000000..70b3da8
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.2.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zblit.frag.2.spirv b/vendor/gioui.org/shader/gio/zblit.frag.2.spirv
new file mode 100644
index 0000000..efa380b
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.frag.2.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zblit.vert.0.dxbc b/vendor/gioui.org/shader/gio/zblit.vert.0.dxbc
new file mode 100644
index 0000000..5ad9601
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.vert.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zblit.vert.0.glsl100es b/vendor/gioui.org/shader/gio/zblit.vert.0.glsl100es
new file mode 100644
index 0000000..61dee04
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zblit.vert.0.glsl100es
@@ -0,0 +1,37 @@
+#version 100
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+struct Block
+{
+ vec4 transform;
+ vec4 uvTransformR1;
+ vec4 uvTransformR2;
+};
+
+uniform Block _block;
+
+attribute vec2 pos;
+varying vec2 vUV;
+attribute vec2 uv;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ vec2 p = (pos * _block.transform.xy) + _block.transform.zw;
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, -1.0, 0.0));
+ vec3 param_1 = vec3(p, 0.0);
+ gl_Position = vec4(transform3x2(param, param_1), 1.0);
+ m3x2 param_2 = m3x2(_block.uvTransformR1.xyz, _block.uvTransformR2.xyz);
+ vec3 param_3 = vec3(uv, 1.0);
+ vUV = transform3x2(param_2, param_3).xy;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zblit.vert.0.glsl150 b/vendor/gioui.org/shader/gio/zblit.vert.0.glsl150
new file mode 100644
index 0000000..eba9f1a
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zblit.vert.0.glsl150
@@ -0,0 +1,37 @@
+#version 150
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+struct Block
+{
+ vec4 transform;
+ vec4 uvTransformR1;
+ vec4 uvTransformR2;
+};
+
+uniform Block _block;
+
+in vec2 pos;
+out vec2 vUV;
+in vec2 uv;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ vec2 p = (pos * _block.transform.xy) + _block.transform.zw;
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, -1.0, 0.0));
+ vec3 param_1 = vec3(p, 0.0);
+ gl_Position = vec4(transform3x2(param, param_1), 1.0);
+ m3x2 param_2 = m3x2(_block.uvTransformR1.xyz, _block.uvTransformR2.xyz);
+ vec3 param_3 = vec3(uv, 1.0);
+ vUV = transform3x2(param_2, param_3).xy;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zblit.vert.0.metallibios b/vendor/gioui.org/shader/gio/zblit.vert.0.metallibios
new file mode 100644
index 0000000..2450ec9
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.vert.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zblit.vert.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zblit.vert.0.metallibiossimulator
new file mode 100644
index 0000000..6b4960b
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.vert.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zblit.vert.0.metallibmacos b/vendor/gioui.org/shader/gio/zblit.vert.0.metallibmacos
new file mode 100644
index 0000000..d902b36
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.vert.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zblit.vert.0.spirv b/vendor/gioui.org/shader/gio/zblit.vert.0.spirv
new file mode 100644
index 0000000..4f762bb
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zblit.vert.0.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zcopy.frag.0.dxbc b/vendor/gioui.org/shader/gio/zcopy.frag.0.dxbc
new file mode 100644
index 0000000..a521803
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcopy.frag.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zcopy.frag.0.glsl100es b/vendor/gioui.org/shader/gio/zcopy.frag.0.glsl100es
new file mode 100644
index 0000000..b2dd95a
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zcopy.frag.0.glsl100es
@@ -0,0 +1,27 @@
+#version 100
+precision mediump float;
+precision highp int;
+
+uniform mediump sampler2D tex;
+
+varying highp vec2 vUV;
+
+vec3 sRGBtoRGB(vec3 rgb)
+{
+ bvec3 cutoff = greaterThanEqual(rgb, vec3(0.040449999272823333740234375));
+ vec3 below = rgb / vec3(12.9200000762939453125);
+ vec3 above = pow((rgb + vec3(0.054999999701976776123046875)) / vec3(1.05499994754791259765625), vec3(2.400000095367431640625));
+ return vec3(cutoff.x ? above.x : below.x, cutoff.y ? above.y : below.y, cutoff.z ? above.z : below.z);
+}
+
+void main()
+{
+ vec4 texel = texture2D(tex, vUV);
+ vec3 param = texel.xyz;
+ vec3 _59 = sRGBtoRGB(param);
+ texel.x = _59.x;
+ texel.y = _59.y;
+ texel.z = _59.z;
+ gl_FragData[0] = texel;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zcopy.frag.0.glsl150 b/vendor/gioui.org/shader/gio/zcopy.frag.0.glsl150
new file mode 100644
index 0000000..799add1
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zcopy.frag.0.glsl150
@@ -0,0 +1,26 @@
+#version 150
+
+uniform sampler2D tex;
+
+in vec2 vUV;
+out vec4 fragColor;
+
+vec3 sRGBtoRGB(vec3 rgb)
+{
+ bvec3 cutoff = greaterThanEqual(rgb, vec3(0.040449999272823333740234375));
+ vec3 below = rgb / vec3(12.9200000762939453125);
+ vec3 above = pow((rgb + vec3(0.054999999701976776123046875)) / vec3(1.05499994754791259765625), vec3(2.400000095367431640625));
+ return vec3(cutoff.x ? above.x : below.x, cutoff.y ? above.y : below.y, cutoff.z ? above.z : below.z);
+}
+
+void main()
+{
+ vec4 texel = texture(tex, vUV);
+ vec3 param = texel.xyz;
+ vec3 _59 = sRGBtoRGB(param);
+ texel.x = _59.x;
+ texel.y = _59.y;
+ texel.z = _59.z;
+ fragColor = texel;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zcopy.frag.0.metallibios b/vendor/gioui.org/shader/gio/zcopy.frag.0.metallibios
new file mode 100644
index 0000000..f143069
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcopy.frag.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zcopy.frag.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zcopy.frag.0.metallibiossimulator
new file mode 100644
index 0000000..8fe4c74
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcopy.frag.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zcopy.frag.0.metallibmacos b/vendor/gioui.org/shader/gio/zcopy.frag.0.metallibmacos
new file mode 100644
index 0000000..2e43803
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcopy.frag.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zcopy.frag.0.spirv b/vendor/gioui.org/shader/gio/zcopy.frag.0.spirv
new file mode 100644
index 0000000..3299552
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcopy.frag.0.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zcopy.vert.0.dxbc b/vendor/gioui.org/shader/gio/zcopy.vert.0.dxbc
new file mode 100644
index 0000000..acce7ab
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcopy.vert.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zcopy.vert.0.glsl100es b/vendor/gioui.org/shader/gio/zcopy.vert.0.glsl100es
new file mode 100644
index 0000000..ff0be47
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zcopy.vert.0.glsl100es
@@ -0,0 +1,35 @@
+#version 100
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+struct Block
+{
+ vec2 scale;
+ vec2 pos;
+ vec2 uvScale;
+};
+
+uniform Block _block;
+
+varying vec2 vUV;
+attribute vec2 uv;
+attribute vec2 pos;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ vUV = vec2(uv * _block.uvScale);
+ vec2 p = vec2((pos * _block.scale) + _block.pos);
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, -1.0, 0.0));
+ vec3 param_1 = vec3(p, 0.0);
+ gl_Position = vec4(transform3x2(param, param_1), 1.0);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zcopy.vert.0.glsl150 b/vendor/gioui.org/shader/gio/zcopy.vert.0.glsl150
new file mode 100644
index 0000000..036cd39
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zcopy.vert.0.glsl150
@@ -0,0 +1,35 @@
+#version 150
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+struct Block
+{
+ vec2 scale;
+ vec2 pos;
+ vec2 uvScale;
+};
+
+uniform Block _block;
+
+out vec2 vUV;
+in vec2 uv;
+in vec2 pos;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ vUV = vec2(uv * _block.uvScale);
+ vec2 p = vec2((pos * _block.scale) + _block.pos);
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, -1.0, 0.0));
+ vec3 param_1 = vec3(p, 0.0);
+ gl_Position = vec4(transform3x2(param, param_1), 1.0);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zcopy.vert.0.metallibios b/vendor/gioui.org/shader/gio/zcopy.vert.0.metallibios
new file mode 100644
index 0000000..25f5e56
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcopy.vert.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zcopy.vert.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zcopy.vert.0.metallibiossimulator
new file mode 100644
index 0000000..f696caf
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcopy.vert.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zcopy.vert.0.metallibmacos b/vendor/gioui.org/shader/gio/zcopy.vert.0.metallibmacos
new file mode 100644
index 0000000..d4a5777
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcopy.vert.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zcopy.vert.0.spirv b/vendor/gioui.org/shader/gio/zcopy.vert.0.spirv
new file mode 100644
index 0000000..7251754
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcopy.vert.0.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.0.dxbc b/vendor/gioui.org/shader/gio/zcover.frag.0.dxbc
new file mode 100644
index 0000000..8b77ae4
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.0.glsl100es b/vendor/gioui.org/shader/gio/zcover.frag.0.glsl100es
new file mode 100644
index 0000000..2399901
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zcover.frag.0.glsl100es
@@ -0,0 +1,23 @@
+#version 100
+precision mediump float;
+precision highp int;
+
+struct Color
+{
+ vec4 color;
+};
+
+uniform Color _color;
+
+uniform mediump sampler2D cover;
+
+varying highp vec2 vCoverUV;
+varying highp vec2 vUV;
+
+void main()
+{
+ gl_FragData[0] = _color.color;
+ float c = min(abs(texture2D(cover, vCoverUV).x), 1.0);
+ gl_FragData[0] *= c;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.0.glsl150 b/vendor/gioui.org/shader/gio/zcover.frag.0.glsl150
new file mode 100644
index 0000000..ce1beaa
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zcover.frag.0.glsl150
@@ -0,0 +1,22 @@
+#version 150
+
+struct Color
+{
+ vec4 color;
+};
+
+uniform Color _color;
+
+uniform sampler2D cover;
+
+out vec4 fragColor;
+in vec2 vCoverUV;
+in vec2 vUV;
+
+void main()
+{
+ fragColor = _color.color;
+ float c = min(abs(texture(cover, vCoverUV).x), 1.0);
+ fragColor *= c;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.0.metallibios b/vendor/gioui.org/shader/gio/zcover.frag.0.metallibios
new file mode 100644
index 0000000..22256c8
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zcover.frag.0.metallibiossimulator
new file mode 100644
index 0000000..c8040ad
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.0.metallibmacos b/vendor/gioui.org/shader/gio/zcover.frag.0.metallibmacos
new file mode 100644
index 0000000..fe25160
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.0.spirv b/vendor/gioui.org/shader/gio/zcover.frag.0.spirv
new file mode 100644
index 0000000..2061ff1
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.0.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.1.dxbc b/vendor/gioui.org/shader/gio/zcover.frag.1.dxbc
new file mode 100644
index 0000000..c9a44cf
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.1.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.1.glsl100es b/vendor/gioui.org/shader/gio/zcover.frag.1.glsl100es
new file mode 100644
index 0000000..01cc88e
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zcover.frag.1.glsl100es
@@ -0,0 +1,24 @@
+#version 100
+precision mediump float;
+precision highp int;
+
+struct Gradient
+{
+ vec4 color1;
+ vec4 color2;
+};
+
+uniform Gradient _gradient;
+
+uniform mediump sampler2D cover;
+
+varying highp vec2 vUV;
+varying highp vec2 vCoverUV;
+
+void main()
+{
+ gl_FragData[0] = mix(_gradient.color1, _gradient.color2, vec4(clamp(vUV.x, 0.0, 1.0)));
+ float c = min(abs(texture2D(cover, vCoverUV).x), 1.0);
+ gl_FragData[0] *= c;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.1.glsl150 b/vendor/gioui.org/shader/gio/zcover.frag.1.glsl150
new file mode 100644
index 0000000..3f832c3
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zcover.frag.1.glsl150
@@ -0,0 +1,23 @@
+#version 150
+
+struct Gradient
+{
+ vec4 color1;
+ vec4 color2;
+};
+
+uniform Gradient _gradient;
+
+uniform sampler2D cover;
+
+out vec4 fragColor;
+in vec2 vUV;
+in vec2 vCoverUV;
+
+void main()
+{
+ fragColor = mix(_gradient.color1, _gradient.color2, vec4(clamp(vUV.x, 0.0, 1.0)));
+ float c = min(abs(texture(cover, vCoverUV).x), 1.0);
+ fragColor *= c;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.1.metallibios b/vendor/gioui.org/shader/gio/zcover.frag.1.metallibios
new file mode 100644
index 0000000..228996a
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.1.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.1.metallibiossimulator b/vendor/gioui.org/shader/gio/zcover.frag.1.metallibiossimulator
new file mode 100644
index 0000000..1c99553
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.1.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.1.metallibmacos b/vendor/gioui.org/shader/gio/zcover.frag.1.metallibmacos
new file mode 100644
index 0000000..889e249
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.1.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.1.spirv b/vendor/gioui.org/shader/gio/zcover.frag.1.spirv
new file mode 100644
index 0000000..4c48d20
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.1.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.2.dxbc b/vendor/gioui.org/shader/gio/zcover.frag.2.dxbc
new file mode 100644
index 0000000..d8c6a80
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.2.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.2.glsl100es b/vendor/gioui.org/shader/gio/zcover.frag.2.glsl100es
new file mode 100644
index 0000000..7cfde4b
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zcover.frag.2.glsl100es
@@ -0,0 +1,17 @@
+#version 100
+precision mediump float;
+precision highp int;
+
+uniform mediump sampler2D tex;
+uniform mediump sampler2D cover;
+
+varying highp vec2 vUV;
+varying highp vec2 vCoverUV;
+
+void main()
+{
+ gl_FragData[0] = texture2D(tex, vUV);
+ float c = min(abs(texture2D(cover, vCoverUV).x), 1.0);
+ gl_FragData[0] *= c;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.2.glsl150 b/vendor/gioui.org/shader/gio/zcover.frag.2.glsl150
new file mode 100644
index 0000000..939baee
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zcover.frag.2.glsl150
@@ -0,0 +1,16 @@
+#version 150
+
+uniform sampler2D tex;
+uniform sampler2D cover;
+
+out vec4 fragColor;
+in vec2 vUV;
+in vec2 vCoverUV;
+
+void main()
+{
+ fragColor = texture(tex, vUV);
+ float c = min(abs(texture(cover, vCoverUV).x), 1.0);
+ fragColor *= c;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.2.metallibios b/vendor/gioui.org/shader/gio/zcover.frag.2.metallibios
new file mode 100644
index 0000000..6eba400
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.2.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.2.metallibiossimulator b/vendor/gioui.org/shader/gio/zcover.frag.2.metallibiossimulator
new file mode 100644
index 0000000..09ddb1f
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.2.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.2.metallibmacos b/vendor/gioui.org/shader/gio/zcover.frag.2.metallibmacos
new file mode 100644
index 0000000..3ba9105
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.2.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zcover.frag.2.spirv b/vendor/gioui.org/shader/gio/zcover.frag.2.spirv
new file mode 100644
index 0000000..cd618ce
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.frag.2.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zcover.vert.0.dxbc b/vendor/gioui.org/shader/gio/zcover.vert.0.dxbc
new file mode 100644
index 0000000..3b6cf6b
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.vert.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zcover.vert.0.glsl100es b/vendor/gioui.org/shader/gio/zcover.vert.0.glsl100es
new file mode 100644
index 0000000..423ca60
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zcover.vert.0.glsl100es
@@ -0,0 +1,41 @@
+#version 100
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+struct Block
+{
+ vec4 transform;
+ vec4 uvCoverTransform;
+ vec4 uvTransformR1;
+ vec4 uvTransformR2;
+};
+
+uniform Block _block;
+
+attribute vec2 pos;
+varying vec2 vUV;
+attribute vec2 uv;
+varying vec2 vCoverUV;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ vec2 p = vec2((pos * _block.transform.xy) + _block.transform.zw);
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, -1.0, 0.0));
+ vec3 param_1 = vec3(p, 0.0);
+ gl_Position = vec4(transform3x2(param, param_1), 1.0);
+ m3x2 param_2 = m3x2(_block.uvTransformR1.xyz, _block.uvTransformR2.xyz);
+ vec3 param_3 = vec3(uv, 1.0);
+ vUV = transform3x2(param_2, param_3).xy;
+ vec3 uv3 = vec3(uv, 1.0);
+ vCoverUV = ((uv3 * vec3(_block.uvCoverTransform.xy, 1.0)) + vec3(_block.uvCoverTransform.zw, 0.0)).xy;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zcover.vert.0.glsl150 b/vendor/gioui.org/shader/gio/zcover.vert.0.glsl150
new file mode 100644
index 0000000..a5005d6
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zcover.vert.0.glsl150
@@ -0,0 +1,41 @@
+#version 150
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+struct Block
+{
+ vec4 transform;
+ vec4 uvCoverTransform;
+ vec4 uvTransformR1;
+ vec4 uvTransformR2;
+};
+
+uniform Block _block;
+
+in vec2 pos;
+out vec2 vUV;
+in vec2 uv;
+out vec2 vCoverUV;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ vec2 p = vec2((pos * _block.transform.xy) + _block.transform.zw);
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, -1.0, 0.0));
+ vec3 param_1 = vec3(p, 0.0);
+ gl_Position = vec4(transform3x2(param, param_1), 1.0);
+ m3x2 param_2 = m3x2(_block.uvTransformR1.xyz, _block.uvTransformR2.xyz);
+ vec3 param_3 = vec3(uv, 1.0);
+ vUV = transform3x2(param_2, param_3).xy;
+ vec3 uv3 = vec3(uv, 1.0);
+ vCoverUV = ((uv3 * vec3(_block.uvCoverTransform.xy, 1.0)) + vec3(_block.uvCoverTransform.zw, 0.0)).xy;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zcover.vert.0.metallibios b/vendor/gioui.org/shader/gio/zcover.vert.0.metallibios
new file mode 100644
index 0000000..335704b
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.vert.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zcover.vert.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zcover.vert.0.metallibiossimulator
new file mode 100644
index 0000000..96470cd
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.vert.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zcover.vert.0.metallibmacos b/vendor/gioui.org/shader/gio/zcover.vert.0.metallibmacos
new file mode 100644
index 0000000..629e82b
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.vert.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zcover.vert.0.spirv b/vendor/gioui.org/shader/gio/zcover.vert.0.spirv
new file mode 100644
index 0000000..f9af8ab
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zcover.vert.0.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zinput.vert.0.dxbc b/vendor/gioui.org/shader/gio/zinput.vert.0.dxbc
new file mode 100644
index 0000000..096bd01
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zinput.vert.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zinput.vert.0.glsl100es b/vendor/gioui.org/shader/gio/zinput.vert.0.glsl100es
new file mode 100644
index 0000000..d1d4c8d
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zinput.vert.0.glsl100es
@@ -0,0 +1,22 @@
+#version 100
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+attribute vec4 position;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, -1.0, 0.0));
+ vec3 param_1 = position.xyz;
+ gl_Position = vec4(transform3x2(param, param_1), position.w);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zinput.vert.0.glsl150 b/vendor/gioui.org/shader/gio/zinput.vert.0.glsl150
new file mode 100644
index 0000000..ab28308
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zinput.vert.0.glsl150
@@ -0,0 +1,22 @@
+#version 150
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+in vec4 position;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, -1.0, 0.0));
+ vec3 param_1 = position.xyz;
+ gl_Position = vec4(transform3x2(param, param_1), position.w);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zinput.vert.0.metallibios b/vendor/gioui.org/shader/gio/zinput.vert.0.metallibios
new file mode 100644
index 0000000..6fdc427
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zinput.vert.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zinput.vert.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zinput.vert.0.metallibiossimulator
new file mode 100644
index 0000000..da28890
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zinput.vert.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zinput.vert.0.metallibmacos b/vendor/gioui.org/shader/gio/zinput.vert.0.metallibmacos
new file mode 100644
index 0000000..064c501
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zinput.vert.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zinput.vert.0.spirv b/vendor/gioui.org/shader/gio/zinput.vert.0.spirv
new file mode 100644
index 0000000..eb999e5
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zinput.vert.0.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zintersect.frag.0.dxbc b/vendor/gioui.org/shader/gio/zintersect.frag.0.dxbc
new file mode 100644
index 0000000..f2081ee
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zintersect.frag.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zintersect.frag.0.glsl100es b/vendor/gioui.org/shader/gio/zintersect.frag.0.glsl100es
new file mode 100644
index 0000000..90fe2f9
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zintersect.frag.0.glsl100es
@@ -0,0 +1,13 @@
+#version 100
+precision mediump float;
+precision highp int;
+
+uniform mediump sampler2D cover;
+
+varying highp vec2 vUV;
+
+void main()
+{
+ gl_FragData[0].x = abs(texture2D(cover, vUV).x);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zintersect.frag.0.glsl150 b/vendor/gioui.org/shader/gio/zintersect.frag.0.glsl150
new file mode 100644
index 0000000..f574aae
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zintersect.frag.0.glsl150
@@ -0,0 +1,12 @@
+#version 150
+
+uniform sampler2D cover;
+
+out vec4 fragColor;
+in vec2 vUV;
+
+void main()
+{
+ fragColor.x = abs(texture(cover, vUV).x);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zintersect.frag.0.metallibios b/vendor/gioui.org/shader/gio/zintersect.frag.0.metallibios
new file mode 100644
index 0000000..f48da4e
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zintersect.frag.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zintersect.frag.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zintersect.frag.0.metallibiossimulator
new file mode 100644
index 0000000..b1a40c4
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zintersect.frag.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zintersect.frag.0.metallibmacos b/vendor/gioui.org/shader/gio/zintersect.frag.0.metallibmacos
new file mode 100644
index 0000000..f16d190
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zintersect.frag.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zintersect.frag.0.spirv b/vendor/gioui.org/shader/gio/zintersect.frag.0.spirv
new file mode 100644
index 0000000..6c28644
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zintersect.frag.0.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zintersect.vert.0.dxbc b/vendor/gioui.org/shader/gio/zintersect.vert.0.dxbc
new file mode 100644
index 0000000..03005ea
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zintersect.vert.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zintersect.vert.0.glsl100es b/vendor/gioui.org/shader/gio/zintersect.vert.0.glsl100es
new file mode 100644
index 0000000..4441763
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zintersect.vert.0.glsl100es
@@ -0,0 +1,35 @@
+#version 100
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+struct Block
+{
+ vec4 uvTransform;
+ vec4 subUVTransform;
+};
+
+uniform Block _block;
+
+attribute vec2 pos;
+varying vec2 vUV;
+attribute vec2 uv;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, 1.0, 0.0));
+ vec3 param_1 = vec3(pos, 1.0);
+ vec3 p = transform3x2(param, param_1);
+ gl_Position = vec4(p, 1.0);
+ vUV = (uv * _block.subUVTransform.xy) + _block.subUVTransform.zw;
+ vUV = (vUV * _block.uvTransform.xy) + _block.uvTransform.zw;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zintersect.vert.0.glsl150 b/vendor/gioui.org/shader/gio/zintersect.vert.0.glsl150
new file mode 100644
index 0000000..656b4c9
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zintersect.vert.0.glsl150
@@ -0,0 +1,35 @@
+#version 150
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+struct Block
+{
+ vec4 uvTransform;
+ vec4 subUVTransform;
+};
+
+uniform Block _block;
+
+in vec2 pos;
+out vec2 vUV;
+in vec2 uv;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, 1.0, 0.0));
+ vec3 param_1 = vec3(pos, 1.0);
+ vec3 p = transform3x2(param, param_1);
+ gl_Position = vec4(p, 1.0);
+ vUV = (uv * _block.subUVTransform.xy) + _block.subUVTransform.zw;
+ vUV = (vUV * _block.uvTransform.xy) + _block.uvTransform.zw;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zintersect.vert.0.metallibios b/vendor/gioui.org/shader/gio/zintersect.vert.0.metallibios
new file mode 100644
index 0000000..975fbb8
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zintersect.vert.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zintersect.vert.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zintersect.vert.0.metallibiossimulator
new file mode 100644
index 0000000..682bad1
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zintersect.vert.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zintersect.vert.0.metallibmacos b/vendor/gioui.org/shader/gio/zintersect.vert.0.metallibmacos
new file mode 100644
index 0000000..a43236d
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zintersect.vert.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zintersect.vert.0.spirv b/vendor/gioui.org/shader/gio/zintersect.vert.0.spirv
new file mode 100644
index 0000000..13e7335
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zintersect.vert.0.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zmaterial.frag.0.dxbc b/vendor/gioui.org/shader/gio/zmaterial.frag.0.dxbc
new file mode 100644
index 0000000..e250378
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zmaterial.frag.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zmaterial.frag.0.glsl100es b/vendor/gioui.org/shader/gio/zmaterial.frag.0.glsl100es
new file mode 100644
index 0000000..f2f2a30
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zmaterial.frag.0.glsl100es
@@ -0,0 +1,37 @@
+#version 100
+precision mediump float;
+precision highp int;
+
+struct Color
+{
+ float emulateSRGB;
+};
+
+uniform Color _color;
+
+uniform mediump sampler2D tex;
+
+varying highp vec2 vUV;
+
+vec3 RGBtosRGB(vec3 rgb)
+{
+ bvec3 cutoff = greaterThanEqual(rgb, vec3(0.003130800090730190277099609375));
+ vec3 below = vec3(12.9200000762939453125) * rgb;
+ vec3 above = (vec3(1.05499994754791259765625) * pow(rgb, vec3(0.416660010814666748046875))) - vec3(0.054999999701976776123046875);
+ return vec3(cutoff.x ? above.x : below.x, cutoff.y ? above.y : below.y, cutoff.z ? above.z : below.z);
+}
+
+void main()
+{
+ vec4 texel = texture2D(tex, vUV);
+ if (_color.emulateSRGB == 0.0)
+ {
+ vec3 param = texel.xyz;
+ vec3 _71 = RGBtosRGB(param);
+ texel.x = _71.x;
+ texel.y = _71.y;
+ texel.z = _71.z;
+ }
+ gl_FragData[0] = texel;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zmaterial.frag.0.glsl150 b/vendor/gioui.org/shader/gio/zmaterial.frag.0.glsl150
new file mode 100644
index 0000000..6b8bed3
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zmaterial.frag.0.glsl150
@@ -0,0 +1,36 @@
+#version 150
+
+struct Color
+{
+ float emulateSRGB;
+};
+
+uniform Color _color;
+
+uniform sampler2D tex;
+
+in vec2 vUV;
+out vec4 fragColor;
+
+vec3 RGBtosRGB(vec3 rgb)
+{
+ bvec3 cutoff = greaterThanEqual(rgb, vec3(0.003130800090730190277099609375));
+ vec3 below = vec3(12.9200000762939453125) * rgb;
+ vec3 above = (vec3(1.05499994754791259765625) * pow(rgb, vec3(0.416660010814666748046875))) - vec3(0.054999999701976776123046875);
+ return vec3(cutoff.x ? above.x : below.x, cutoff.y ? above.y : below.y, cutoff.z ? above.z : below.z);
+}
+
+void main()
+{
+ vec4 texel = texture(tex, vUV);
+ if (_color.emulateSRGB == 0.0)
+ {
+ vec3 param = texel.xyz;
+ vec3 _71 = RGBtosRGB(param);
+ texel.x = _71.x;
+ texel.y = _71.y;
+ texel.z = _71.z;
+ }
+ fragColor = texel;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zmaterial.frag.0.metallibios b/vendor/gioui.org/shader/gio/zmaterial.frag.0.metallibios
new file mode 100644
index 0000000..8679f2a
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zmaterial.frag.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zmaterial.frag.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zmaterial.frag.0.metallibiossimulator
new file mode 100644
index 0000000..6306d9a
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zmaterial.frag.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zmaterial.frag.0.metallibmacos b/vendor/gioui.org/shader/gio/zmaterial.frag.0.metallibmacos
new file mode 100644
index 0000000..911948e
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zmaterial.frag.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zmaterial.frag.0.spirv b/vendor/gioui.org/shader/gio/zmaterial.frag.0.spirv
new file mode 100644
index 0000000..475cfb8
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zmaterial.frag.0.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zmaterial.vert.0.dxbc b/vendor/gioui.org/shader/gio/zmaterial.vert.0.dxbc
new file mode 100644
index 0000000..a5056cc
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zmaterial.vert.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zmaterial.vert.0.glsl100es b/vendor/gioui.org/shader/gio/zmaterial.vert.0.glsl100es
new file mode 100644
index 0000000..39a7b50
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zmaterial.vert.0.glsl100es
@@ -0,0 +1,34 @@
+#version 100
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+struct Block
+{
+ vec2 scale;
+ vec2 pos;
+};
+
+uniform Block _block;
+
+varying vec2 vUV;
+attribute vec2 uv;
+attribute vec2 pos;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ vUV = uv;
+ vec2 p = vec2((pos * _block.scale) + _block.pos);
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, 1.0, 0.0));
+ vec3 param_1 = vec3(p, 0.0);
+ gl_Position = vec4(transform3x2(param, param_1), 1.0);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zmaterial.vert.0.glsl150 b/vendor/gioui.org/shader/gio/zmaterial.vert.0.glsl150
new file mode 100644
index 0000000..be02b44
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zmaterial.vert.0.glsl150
@@ -0,0 +1,34 @@
+#version 150
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+struct Block
+{
+ vec2 scale;
+ vec2 pos;
+};
+
+uniform Block _block;
+
+out vec2 vUV;
+in vec2 uv;
+in vec2 pos;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ vUV = uv;
+ vec2 p = vec2((pos * _block.scale) + _block.pos);
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, 1.0, 0.0));
+ vec3 param_1 = vec3(p, 0.0);
+ gl_Position = vec4(transform3x2(param, param_1), 1.0);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zmaterial.vert.0.metallibios b/vendor/gioui.org/shader/gio/zmaterial.vert.0.metallibios
new file mode 100644
index 0000000..19cf6d8
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zmaterial.vert.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zmaterial.vert.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zmaterial.vert.0.metallibiossimulator
new file mode 100644
index 0000000..c710ca9
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zmaterial.vert.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zmaterial.vert.0.metallibmacos b/vendor/gioui.org/shader/gio/zmaterial.vert.0.metallibmacos
new file mode 100644
index 0000000..cfbdfbc
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zmaterial.vert.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zmaterial.vert.0.spirv b/vendor/gioui.org/shader/gio/zmaterial.vert.0.spirv
new file mode 100644
index 0000000..18e731f
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zmaterial.vert.0.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zsimple.frag.0.dxbc b/vendor/gioui.org/shader/gio/zsimple.frag.0.dxbc
new file mode 100644
index 0000000..f4f8894
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zsimple.frag.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zsimple.frag.0.glsl100es b/vendor/gioui.org/shader/gio/zsimple.frag.0.glsl100es
new file mode 100644
index 0000000..0f86af5
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zsimple.frag.0.glsl100es
@@ -0,0 +1,9 @@
+#version 100
+precision mediump float;
+precision highp int;
+
+void main()
+{
+ gl_FragData[0] = vec4(0.25, 0.550000011920928955078125, 0.75, 1.0);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zsimple.frag.0.glsl150 b/vendor/gioui.org/shader/gio/zsimple.frag.0.glsl150
new file mode 100644
index 0000000..db4c060
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zsimple.frag.0.glsl150
@@ -0,0 +1,9 @@
+#version 150
+
+out vec4 fragColor;
+
+void main()
+{
+ fragColor = vec4(0.25, 0.550000011920928955078125, 0.75, 1.0);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zsimple.frag.0.metallibios b/vendor/gioui.org/shader/gio/zsimple.frag.0.metallibios
new file mode 100644
index 0000000..05e6329
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zsimple.frag.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zsimple.frag.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zsimple.frag.0.metallibiossimulator
new file mode 100644
index 0000000..2c87af9
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zsimple.frag.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zsimple.frag.0.metallibmacos b/vendor/gioui.org/shader/gio/zsimple.frag.0.metallibmacos
new file mode 100644
index 0000000..407d7ea
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zsimple.frag.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zsimple.frag.0.spirv b/vendor/gioui.org/shader/gio/zsimple.frag.0.spirv
new file mode 100644
index 0000000..2b2aba3
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zsimple.frag.0.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zstencil.frag.0.dxbc b/vendor/gioui.org/shader/gio/zstencil.frag.0.dxbc
new file mode 100644
index 0000000..ded34dc
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zstencil.frag.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zstencil.frag.0.glsl100es b/vendor/gioui.org/shader/gio/zstencil.frag.0.glsl100es
new file mode 100644
index 0000000..15530e3
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zstencil.frag.0.glsl100es
@@ -0,0 +1,38 @@
+#version 100
+precision mediump float;
+precision highp int;
+
+varying highp vec2 vTo;
+varying highp vec2 vFrom;
+varying highp vec2 vCtrl;
+
+void main()
+{
+ float dx = vTo.x - vFrom.x;
+ bool increasing = vTo.x >= vFrom.x;
+ bvec2 _35 = bvec2(increasing);
+ vec2 left = vec2(_35.x ? vFrom.x : vTo.x, _35.y ? vFrom.y : vTo.y);
+ bvec2 _41 = bvec2(increasing);
+ vec2 right = vec2(_41.x ? vTo.x : vFrom.x, _41.y ? vTo.y : vFrom.y);
+ vec2 extent = clamp(vec2(vFrom.x, vTo.x), vec2(-0.5), vec2(0.5));
+ float midx = mix(extent.x, extent.y, 0.5);
+ float x0 = midx - left.x;
+ vec2 p1 = vCtrl - left;
+ vec2 v = right - vCtrl;
+ float t = x0 / (p1.x + sqrt((p1.x * p1.x) + ((v.x - p1.x) * x0)));
+ float y = mix(mix(left.y, vCtrl.y, t), mix(vCtrl.y, right.y, t), t);
+ vec2 d_half = mix(p1, v, vec2(t));
+ float dy = d_half.y / d_half.x;
+ float width = extent.y - extent.x;
+ dy = abs(dy * width);
+ vec4 sides = vec4((dy * 0.5) + y, (dy * (-0.5)) + y, (0.5 - y) / dy, ((-0.5) - y) / dy);
+ sides = clamp(sides + vec4(0.5), vec4(0.0), vec4(1.0));
+ float area = 0.5 * ((((sides.z - (sides.z * sides.y)) + 1.0) - sides.x) + (sides.x * sides.w));
+ area *= width;
+ if (width == 0.0)
+ {
+ area = 0.0;
+ }
+ gl_FragData[0].x = area;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zstencil.frag.0.glsl150 b/vendor/gioui.org/shader/gio/zstencil.frag.0.glsl150
new file mode 100644
index 0000000..8e2abd8
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zstencil.frag.0.glsl150
@@ -0,0 +1,37 @@
+#version 150
+
+in vec2 vTo;
+in vec2 vFrom;
+in vec2 vCtrl;
+out vec4 fragCover;
+
+void main()
+{
+ float dx = vTo.x - vFrom.x;
+ bool increasing = vTo.x >= vFrom.x;
+ bvec2 _35 = bvec2(increasing);
+ vec2 left = vec2(_35.x ? vFrom.x : vTo.x, _35.y ? vFrom.y : vTo.y);
+ bvec2 _41 = bvec2(increasing);
+ vec2 right = vec2(_41.x ? vTo.x : vFrom.x, _41.y ? vTo.y : vFrom.y);
+ vec2 extent = clamp(vec2(vFrom.x, vTo.x), vec2(-0.5), vec2(0.5));
+ float midx = mix(extent.x, extent.y, 0.5);
+ float x0 = midx - left.x;
+ vec2 p1 = vCtrl - left;
+ vec2 v = right - vCtrl;
+ float t = x0 / (p1.x + sqrt((p1.x * p1.x) + ((v.x - p1.x) * x0)));
+ float y = mix(mix(left.y, vCtrl.y, t), mix(vCtrl.y, right.y, t), t);
+ vec2 d_half = mix(p1, v, vec2(t));
+ float dy = d_half.y / d_half.x;
+ float width = extent.y - extent.x;
+ dy = abs(dy * width);
+ vec4 sides = vec4((dy * 0.5) + y, (dy * (-0.5)) + y, (0.5 - y) / dy, ((-0.5) - y) / dy);
+ sides = clamp(sides + vec4(0.5), vec4(0.0), vec4(1.0));
+ float area = 0.5 * ((((sides.z - (sides.z * sides.y)) + 1.0) - sides.x) + (sides.x * sides.w));
+ area *= width;
+ if (width == 0.0)
+ {
+ area = 0.0;
+ }
+ fragCover.x = area;
+}
+
diff --git a/vendor/gioui.org/shader/gio/zstencil.frag.0.metallibios b/vendor/gioui.org/shader/gio/zstencil.frag.0.metallibios
new file mode 100644
index 0000000..52f668f
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zstencil.frag.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zstencil.frag.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zstencil.frag.0.metallibiossimulator
new file mode 100644
index 0000000..3fbfa8f
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zstencil.frag.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zstencil.frag.0.metallibmacos b/vendor/gioui.org/shader/gio/zstencil.frag.0.metallibmacos
new file mode 100644
index 0000000..358a02a
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zstencil.frag.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zstencil.frag.0.spirv b/vendor/gioui.org/shader/gio/zstencil.frag.0.spirv
new file mode 100644
index 0000000..9a22d9e
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zstencil.frag.0.spirv differ
diff --git a/vendor/gioui.org/shader/gio/zstencil.vert.0.dxbc b/vendor/gioui.org/shader/gio/zstencil.vert.0.dxbc
new file mode 100644
index 0000000..505672f
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zstencil.vert.0.dxbc differ
diff --git a/vendor/gioui.org/shader/gio/zstencil.vert.0.glsl100es b/vendor/gioui.org/shader/gio/zstencil.vert.0.glsl100es
new file mode 100644
index 0000000..c8e1261
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zstencil.vert.0.glsl100es
@@ -0,0 +1,64 @@
+#version 100
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+struct Block
+{
+ vec4 transform;
+ vec2 pathOffset;
+};
+
+uniform Block _block;
+
+attribute vec2 from;
+attribute vec2 ctrl;
+attribute vec2 to;
+attribute float maxy;
+attribute float corner;
+varying vec2 vFrom;
+varying vec2 vCtrl;
+varying vec2 vTo;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ vec2 from_1 = from + _block.pathOffset;
+ vec2 ctrl_1 = ctrl + _block.pathOffset;
+ vec2 to_1 = to + _block.pathOffset;
+ float maxy_1 = maxy + _block.pathOffset.y;
+ float c = corner;
+ vec2 pos;
+ if (c >= 0.375)
+ {
+ c -= 0.5;
+ pos.y = maxy_1 + 1.0;
+ }
+ else
+ {
+ pos.y = min(min(from_1.y, ctrl_1.y), to_1.y) - 1.0;
+ }
+ if (c >= 0.125)
+ {
+ pos.x = max(max(from_1.x, ctrl_1.x), to_1.x) + 1.0;
+ }
+ else
+ {
+ pos.x = min(min(from_1.x, ctrl_1.x), to_1.x) - 1.0;
+ }
+ vFrom = from_1 - pos;
+ vCtrl = ctrl_1 - pos;
+ vTo = to_1 - pos;
+ pos = (pos * _block.transform.xy) + _block.transform.zw;
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, 1.0, 0.0));
+ vec3 param_1 = vec3(pos, 0.0);
+ gl_Position = vec4(transform3x2(param, param_1), 1.0);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zstencil.vert.0.glsl150 b/vendor/gioui.org/shader/gio/zstencil.vert.0.glsl150
new file mode 100644
index 0000000..9ae6c9b
--- /dev/null
+++ b/vendor/gioui.org/shader/gio/zstencil.vert.0.glsl150
@@ -0,0 +1,64 @@
+#version 150
+
+struct m3x2
+{
+ vec3 r0;
+ vec3 r1;
+};
+
+struct Block
+{
+ vec4 transform;
+ vec2 pathOffset;
+};
+
+uniform Block _block;
+
+in vec2 from;
+in vec2 ctrl;
+in vec2 to;
+in float maxy;
+in float corner;
+out vec2 vFrom;
+out vec2 vCtrl;
+out vec2 vTo;
+
+vec3 transform3x2(m3x2 t, vec3 v)
+{
+ return vec3(dot(t.r0, v), dot(t.r1, v), dot(vec3(0.0, 0.0, 1.0), v));
+}
+
+void main()
+{
+ vec2 from_1 = from + _block.pathOffset;
+ vec2 ctrl_1 = ctrl + _block.pathOffset;
+ vec2 to_1 = to + _block.pathOffset;
+ float maxy_1 = maxy + _block.pathOffset.y;
+ float c = corner;
+ vec2 pos;
+ if (c >= 0.375)
+ {
+ c -= 0.5;
+ pos.y = maxy_1 + 1.0;
+ }
+ else
+ {
+ pos.y = min(min(from_1.y, ctrl_1.y), to_1.y) - 1.0;
+ }
+ if (c >= 0.125)
+ {
+ pos.x = max(max(from_1.x, ctrl_1.x), to_1.x) + 1.0;
+ }
+ else
+ {
+ pos.x = min(min(from_1.x, ctrl_1.x), to_1.x) - 1.0;
+ }
+ vFrom = from_1 - pos;
+ vCtrl = ctrl_1 - pos;
+ vTo = to_1 - pos;
+ pos = (pos * _block.transform.xy) + _block.transform.zw;
+ m3x2 param = m3x2(vec3(1.0, 0.0, 0.0), vec3(0.0, 1.0, 0.0));
+ vec3 param_1 = vec3(pos, 0.0);
+ gl_Position = vec4(transform3x2(param, param_1), 1.0);
+}
+
diff --git a/vendor/gioui.org/shader/gio/zstencil.vert.0.metallibios b/vendor/gioui.org/shader/gio/zstencil.vert.0.metallibios
new file mode 100644
index 0000000..75ac068
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zstencil.vert.0.metallibios differ
diff --git a/vendor/gioui.org/shader/gio/zstencil.vert.0.metallibiossimulator b/vendor/gioui.org/shader/gio/zstencil.vert.0.metallibiossimulator
new file mode 100644
index 0000000..9ba5f58
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zstencil.vert.0.metallibiossimulator differ
diff --git a/vendor/gioui.org/shader/gio/zstencil.vert.0.metallibmacos b/vendor/gioui.org/shader/gio/zstencil.vert.0.metallibmacos
new file mode 100644
index 0000000..4de454d
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zstencil.vert.0.metallibmacos differ
diff --git a/vendor/gioui.org/shader/gio/zstencil.vert.0.spirv b/vendor/gioui.org/shader/gio/zstencil.vert.0.spirv
new file mode 100644
index 0000000..b45803f
Binary files /dev/null and b/vendor/gioui.org/shader/gio/zstencil.vert.0.spirv differ
diff --git a/vendor/gioui.org/shader/go.mod b/vendor/gioui.org/shader/go.mod
new file mode 100644
index 0000000..4c9167c
--- /dev/null
+++ b/vendor/gioui.org/shader/go.mod
@@ -0,0 +1,5 @@
+module gioui.org/shader
+
+go 1.16
+
+require gioui.org/cpu v0.0.0-20210808092351-bfe733dd3334
diff --git a/vendor/gioui.org/shader/go.sum b/vendor/gioui.org/shader/go.sum
new file mode 100644
index 0000000..ec5a47a
--- /dev/null
+++ b/vendor/gioui.org/shader/go.sum
@@ -0,0 +1,2 @@
+gioui.org/cpu v0.0.0-20210808092351-bfe733dd3334 h1:1xK224B5DnjlPKCfVDTl7+olrzgAXn4ym6dum3l34rs=
+gioui.org/cpu v0.0.0-20210808092351-bfe733dd3334/go.mod h1:A8M0Cn5o+vY5LTMlnRoK3O5kG+rH0kWfJjeKd9QpBmQ=
diff --git a/vendor/gioui.org/shader/piet/abi.h b/vendor/gioui.org/shader/piet/abi.h
new file mode 100644
index 0000000..365d936
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/abi.h
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+#define ALIGN(bytes, type) type __attribute__((aligned(bytes)))
+
+typedef ALIGN(8, uint8_t) byte8[8];
+typedef ALIGN(8, uint16_t) word4[4];
+typedef ALIGN(4, uint32_t) dword;
+typedef ALIGN(16, uint32_t) dword4[4];
+typedef ALIGN(8, uint64_t) qword;
+typedef ALIGN(16, uint64_t) qword2[2];
+typedef ALIGN(16, unsigned int) uint4[4];
+typedef ALIGN(8, uint32_t) dword2[2];
+typedef ALIGN(8, unsigned short) ushort4[4];
+typedef ALIGN(16, float) float4[4];
+typedef ALIGN(16, int) int4[4];
+
+typedef unsigned short half;
+
+typedef unsigned char bool;
+
+enum {
+ MAX_BOUND_DESCRIPTOR_SETS = 4,
+ MAX_DESCRIPTOR_SET_UNIFORM_BUFFERS_DYNAMIC = 8,
+ MAX_DESCRIPTOR_SET_STORAGE_BUFFERS_DYNAMIC = 4,
+ MAX_DESCRIPTOR_SET_COMBINED_BUFFERS_DYNAMIC =
+ MAX_DESCRIPTOR_SET_UNIFORM_BUFFERS_DYNAMIC +
+ MAX_DESCRIPTOR_SET_STORAGE_BUFFERS_DYNAMIC,
+ MAX_PUSH_CONSTANT_SIZE = 128,
+
+ MIN_STORAGE_BUFFER_OFFSET_ALIGNMENT = 256,
+
+ REQUIRED_MEMORY_ALIGNMENT = 16,
+
+ SIMD_WIDTH = 4,
+};
+
+struct image_descriptor {
+ ALIGN(16, void *ptr);
+ int width;
+ int height;
+ int depth;
+ int row_pitch_bytes;
+ int slice_pitch_bytes;
+ int sample_pitch_bytes;
+ int sample_count;
+ int size_in_bytes;
+
+ void *stencil_ptr;
+ int stencil_row_pitch_bytes;
+ int stencil_slice_pitch_bytes;
+ int stencil_sample_pitch_bytes;
+
+ // TODO: unused?
+ void *memoryOwner;
+};
+
+struct buffer_descriptor {
+ ALIGN(16, void *ptr);
+ int size_in_bytes;
+ int robustness_size;
+};
+
+struct program_data {
+ uint8_t *descriptor_sets[MAX_BOUND_DESCRIPTOR_SETS];
+ uint32_t descriptor_dynamic_offsets[MAX_DESCRIPTOR_SET_COMBINED_BUFFERS_DYNAMIC];
+ uint4 num_workgroups;
+ uint4 workgroup_size;
+ uint32_t invocations_per_subgroup;
+ uint32_t subgroups_per_workgroup;
+ uint32_t invocations_per_workgroup;
+ unsigned char push_constants[MAX_PUSH_CONSTANT_SIZE];
+ // Unused.
+ void *constants;
+};
+
+typedef int32_t yield_result;
+
+typedef void * coroutine;
+
+typedef coroutine (*routine_begin)(struct program_data *data,
+ int32_t workgroupX,
+ int32_t workgroupY,
+ int32_t workgroupZ,
+ void *workgroupMemory,
+ int32_t firstSubgroup,
+ int32_t subgroupCount);
+
+typedef bool (*routine_await)(coroutine r, yield_result *res);
+
+typedef void (*routine_destroy)(coroutine r);
+
diff --git a/vendor/gioui.org/shader/piet/annotated.h b/vendor/gioui.org/shader/piet/annotated.h
new file mode 100644
index 0000000..6b18155
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/annotated.h
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense
+
+// Code auto-generated by piet-gpu-derive
+
+struct AnnoImageRef {
+ uint offset;
+};
+
+struct AnnoColorRef {
+ uint offset;
+};
+
+struct AnnoBeginClipRef {
+ uint offset;
+};
+
+struct AnnoEndClipRef {
+ uint offset;
+};
+
+struct AnnotatedRef {
+ uint offset;
+};
+
+struct AnnoImage {
+ vec4 bbox;
+ float linewidth;
+ uint index;
+ ivec2 offset;
+};
+
+#define AnnoImage_size 28
+
+AnnoImageRef AnnoImage_index(AnnoImageRef ref, uint index) {
+ return AnnoImageRef(ref.offset + index * AnnoImage_size);
+}
+
+struct AnnoColor {
+ vec4 bbox;
+ float linewidth;
+ uint rgba_color;
+};
+
+#define AnnoColor_size 24
+
+AnnoColorRef AnnoColor_index(AnnoColorRef ref, uint index) {
+ return AnnoColorRef(ref.offset + index * AnnoColor_size);
+}
+
+struct AnnoBeginClip {
+ vec4 bbox;
+ float linewidth;
+};
+
+#define AnnoBeginClip_size 20
+
+AnnoBeginClipRef AnnoBeginClip_index(AnnoBeginClipRef ref, uint index) {
+ return AnnoBeginClipRef(ref.offset + index * AnnoBeginClip_size);
+}
+
+struct AnnoEndClip {
+ vec4 bbox;
+};
+
+#define AnnoEndClip_size 16
+
+AnnoEndClipRef AnnoEndClip_index(AnnoEndClipRef ref, uint index) {
+ return AnnoEndClipRef(ref.offset + index * AnnoEndClip_size);
+}
+
+#define Annotated_Nop 0
+#define Annotated_Color 1
+#define Annotated_Image 2
+#define Annotated_BeginClip 3
+#define Annotated_EndClip 4
+#define Annotated_size 32
+
+AnnotatedRef Annotated_index(AnnotatedRef ref, uint index) {
+ return AnnotatedRef(ref.offset + index * Annotated_size);
+}
+
+struct AnnotatedTag {
+ uint tag;
+ uint flags;
+};
+
+AnnoImage AnnoImage_read(Alloc a, AnnoImageRef ref) {
+ uint ix = ref.offset >> 2;
+ uint raw0 = read_mem(a, ix + 0);
+ uint raw1 = read_mem(a, ix + 1);
+ uint raw2 = read_mem(a, ix + 2);
+ uint raw3 = read_mem(a, ix + 3);
+ uint raw4 = read_mem(a, ix + 4);
+ uint raw5 = read_mem(a, ix + 5);
+ uint raw6 = read_mem(a, ix + 6);
+ AnnoImage s;
+ s.bbox = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3));
+ s.linewidth = uintBitsToFloat(raw4);
+ s.index = raw5;
+ s.offset = ivec2(int(raw6 << 16) >> 16, int(raw6) >> 16);
+ return s;
+}
+
+void AnnoImage_write(Alloc a, AnnoImageRef ref, AnnoImage s) {
+ uint ix = ref.offset >> 2;
+ write_mem(a, ix + 0, floatBitsToUint(s.bbox.x));
+ write_mem(a, ix + 1, floatBitsToUint(s.bbox.y));
+ write_mem(a, ix + 2, floatBitsToUint(s.bbox.z));
+ write_mem(a, ix + 3, floatBitsToUint(s.bbox.w));
+ write_mem(a, ix + 4, floatBitsToUint(s.linewidth));
+ write_mem(a, ix + 5, s.index);
+ write_mem(a, ix + 6, (uint(s.offset.x) & 0xffff) | (uint(s.offset.y) << 16));
+}
+
+AnnoColor AnnoColor_read(Alloc a, AnnoColorRef ref) {
+ uint ix = ref.offset >> 2;
+ uint raw0 = read_mem(a, ix + 0);
+ uint raw1 = read_mem(a, ix + 1);
+ uint raw2 = read_mem(a, ix + 2);
+ uint raw3 = read_mem(a, ix + 3);
+ uint raw4 = read_mem(a, ix + 4);
+ uint raw5 = read_mem(a, ix + 5);
+ AnnoColor s;
+ s.bbox = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3));
+ s.linewidth = uintBitsToFloat(raw4);
+ s.rgba_color = raw5;
+ return s;
+}
+
+void AnnoColor_write(Alloc a, AnnoColorRef ref, AnnoColor s) {
+ uint ix = ref.offset >> 2;
+ write_mem(a, ix + 0, floatBitsToUint(s.bbox.x));
+ write_mem(a, ix + 1, floatBitsToUint(s.bbox.y));
+ write_mem(a, ix + 2, floatBitsToUint(s.bbox.z));
+ write_mem(a, ix + 3, floatBitsToUint(s.bbox.w));
+ write_mem(a, ix + 4, floatBitsToUint(s.linewidth));
+ write_mem(a, ix + 5, s.rgba_color);
+}
+
+AnnoBeginClip AnnoBeginClip_read(Alloc a, AnnoBeginClipRef ref) {
+ uint ix = ref.offset >> 2;
+ uint raw0 = read_mem(a, ix + 0);
+ uint raw1 = read_mem(a, ix + 1);
+ uint raw2 = read_mem(a, ix + 2);
+ uint raw3 = read_mem(a, ix + 3);
+ uint raw4 = read_mem(a, ix + 4);
+ AnnoBeginClip s;
+ s.bbox = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3));
+ s.linewidth = uintBitsToFloat(raw4);
+ return s;
+}
+
+void AnnoBeginClip_write(Alloc a, AnnoBeginClipRef ref, AnnoBeginClip s) {
+ uint ix = ref.offset >> 2;
+ write_mem(a, ix + 0, floatBitsToUint(s.bbox.x));
+ write_mem(a, ix + 1, floatBitsToUint(s.bbox.y));
+ write_mem(a, ix + 2, floatBitsToUint(s.bbox.z));
+ write_mem(a, ix + 3, floatBitsToUint(s.bbox.w));
+ write_mem(a, ix + 4, floatBitsToUint(s.linewidth));
+}
+
+AnnoEndClip AnnoEndClip_read(Alloc a, AnnoEndClipRef ref) {
+ uint ix = ref.offset >> 2;
+ uint raw0 = read_mem(a, ix + 0);
+ uint raw1 = read_mem(a, ix + 1);
+ uint raw2 = read_mem(a, ix + 2);
+ uint raw3 = read_mem(a, ix + 3);
+ AnnoEndClip s;
+ s.bbox = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3));
+ return s;
+}
+
+void AnnoEndClip_write(Alloc a, AnnoEndClipRef ref, AnnoEndClip s) {
+ uint ix = ref.offset >> 2;
+ write_mem(a, ix + 0, floatBitsToUint(s.bbox.x));
+ write_mem(a, ix + 1, floatBitsToUint(s.bbox.y));
+ write_mem(a, ix + 2, floatBitsToUint(s.bbox.z));
+ write_mem(a, ix + 3, floatBitsToUint(s.bbox.w));
+}
+
+AnnotatedTag Annotated_tag(Alloc a, AnnotatedRef ref) {
+ uint tag_and_flags = read_mem(a, ref.offset >> 2);
+ return AnnotatedTag(tag_and_flags & 0xffff, tag_and_flags >> 16);
+}
+
+AnnoColor Annotated_Color_read(Alloc a, AnnotatedRef ref) {
+ return AnnoColor_read(a, AnnoColorRef(ref.offset + 4));
+}
+
+AnnoImage Annotated_Image_read(Alloc a, AnnotatedRef ref) {
+ return AnnoImage_read(a, AnnoImageRef(ref.offset + 4));
+}
+
+AnnoBeginClip Annotated_BeginClip_read(Alloc a, AnnotatedRef ref) {
+ return AnnoBeginClip_read(a, AnnoBeginClipRef(ref.offset + 4));
+}
+
+AnnoEndClip Annotated_EndClip_read(Alloc a, AnnotatedRef ref) {
+ return AnnoEndClip_read(a, AnnoEndClipRef(ref.offset + 4));
+}
+
+void Annotated_Nop_write(Alloc a, AnnotatedRef ref) {
+ write_mem(a, ref.offset >> 2, Annotated_Nop);
+}
+
+void Annotated_Color_write(Alloc a, AnnotatedRef ref, uint flags, AnnoColor s) {
+ write_mem(a, ref.offset >> 2, (flags << 16) | Annotated_Color);
+ AnnoColor_write(a, AnnoColorRef(ref.offset + 4), s);
+}
+
+void Annotated_Image_write(Alloc a, AnnotatedRef ref, uint flags, AnnoImage s) {
+ write_mem(a, ref.offset >> 2, (flags << 16) | Annotated_Image);
+ AnnoImage_write(a, AnnoImageRef(ref.offset + 4), s);
+}
+
+void Annotated_BeginClip_write(Alloc a, AnnotatedRef ref, uint flags, AnnoBeginClip s) {
+ write_mem(a, ref.offset >> 2, (flags << 16) | Annotated_BeginClip);
+ AnnoBeginClip_write(a, AnnoBeginClipRef(ref.offset + 4), s);
+}
+
+void Annotated_EndClip_write(Alloc a, AnnotatedRef ref, AnnoEndClip s) {
+ write_mem(a, ref.offset >> 2, Annotated_EndClip);
+ AnnoEndClip_write(a, AnnoEndClipRef(ref.offset + 4), s);
+}
+
diff --git a/vendor/gioui.org/shader/piet/backdrop.comp b/vendor/gioui.org/shader/piet/backdrop.comp
new file mode 100644
index 0000000..12ae5b1
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/backdrop.comp
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense
+
+// Propagation of tile backdrop for filling.
+//
+// Each thread reads one path element and calculates the number of spanned tiles
+// based on the bounding box.
+// In a further compaction step, the workgroup loops over the corresponding tile rows per element in parallel.
+// For each row the per tile backdrop will be read, as calculated in the previous coarse path segment kernel,
+// and propagated from the left to the right (prefix summed).
+//
+// Output state:
+// - Each path element has an array of tiles covering the whole path based on boundig box
+// - Each tile per path element contains the 'backdrop' and a list of subdivided path segments
+
+#version 450
+#extension GL_GOOGLE_include_directive : enable
+
+#include "mem.h"
+#include "setup.h"
+
+#define LG_BACKDROP_WG (7 + LG_WG_FACTOR)
+#define BACKDROP_WG (1 << LG_BACKDROP_WG)
+
+layout(local_size_x = BACKDROP_WG, local_size_y = 1) in;
+
+layout(set = 0, binding = 1) readonly buffer ConfigBuf {
+ Config conf;
+};
+
+#include "annotated.h"
+#include "tile.h"
+
+shared uint sh_row_count[BACKDROP_WG];
+shared Alloc sh_row_alloc[BACKDROP_WG];
+shared uint sh_row_width[BACKDROP_WG];
+
+void main() {
+ uint th_ix = gl_LocalInvocationID.x;
+ uint element_ix = gl_GlobalInvocationID.x;
+ AnnotatedRef ref = AnnotatedRef(conf.anno_alloc.offset + element_ix * Annotated_size);
+
+ // Work assignment: 1 thread : 1 path element
+ uint row_count = 0;
+ bool mem_ok = mem_error == NO_ERROR;
+ if (element_ix < conf.n_elements) {
+ AnnotatedTag tag = Annotated_tag(conf.anno_alloc, ref);
+ switch (tag.tag) {
+ case Annotated_Image:
+ case Annotated_BeginClip:
+ case Annotated_Color:
+ if (fill_mode_from_flags(tag.flags) != MODE_NONZERO) {
+ break;
+ }
+ // Fall through.
+ PathRef path_ref = PathRef(conf.tile_alloc.offset + element_ix * Path_size);
+ Path path = Path_read(conf.tile_alloc, path_ref);
+ sh_row_width[th_ix] = path.bbox.z - path.bbox.x;
+ row_count = path.bbox.w - path.bbox.y;
+ // Paths that don't cross tile top edges don't have backdrops.
+ // Don't apply the optimization to paths that may cross the y = 0
+ // top edge, but clipped to 1 row.
+ if (row_count == 1 && path.bbox.y > 0) {
+ // Note: this can probably be expanded to width = 2 as
+ // long as it doesn't cross the left edge.
+ row_count = 0;
+ }
+ Alloc path_alloc = new_alloc(path.tiles.offset, (path.bbox.z - path.bbox.x) * (path.bbox.w - path.bbox.y) * Tile_size, mem_ok);
+ sh_row_alloc[th_ix] = path_alloc;
+ }
+ }
+
+ sh_row_count[th_ix] = row_count;
+ // Prefix sum of sh_row_count
+ for (uint i = 0; i < LG_BACKDROP_WG; i++) {
+ barrier();
+ if (th_ix >= (1 << i)) {
+ row_count += sh_row_count[th_ix - (1 << i)];
+ }
+ barrier();
+ sh_row_count[th_ix] = row_count;
+ }
+ barrier();
+ // Work assignment: 1 thread : 1 path element row
+ uint total_rows = sh_row_count[BACKDROP_WG - 1];
+ for (uint row = th_ix; row < total_rows; row += BACKDROP_WG) {
+ // Binary search to find element
+ uint el_ix = 0;
+ for (uint i = 0; i < LG_BACKDROP_WG; i++) {
+ uint probe = el_ix + ((BACKDROP_WG / 2) >> i);
+ if (row >= sh_row_count[probe - 1]) {
+ el_ix = probe;
+ }
+ }
+ uint width = sh_row_width[el_ix];
+ if (width > 0 && mem_ok) {
+ // Process one row sequentially
+ // Read backdrop value per tile and prefix sum it
+ Alloc tiles_alloc = sh_row_alloc[el_ix];
+ uint seq_ix = row - (el_ix > 0 ? sh_row_count[el_ix - 1] : 0);
+ uint tile_el_ix = (tiles_alloc.offset >> 2) + 1 + seq_ix * 2 * width;
+ uint sum = read_mem(tiles_alloc, tile_el_ix);
+ for (uint x = 1; x < width; x++) {
+ tile_el_ix += 2;
+ sum += read_mem(tiles_alloc, tile_el_ix);
+ write_mem(tiles_alloc, tile_el_ix, sum);
+ }
+ }
+ }
+}
diff --git a/vendor/gioui.org/shader/piet/backdrop_abi.c b/vendor/gioui.org/shader/piet/backdrop_abi.c
new file mode 100644
index 0000000..48a4a30
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/backdrop_abi.c
@@ -0,0 +1,23 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+#include
+#include
+#include "abi.h"
+#include "runtime.h"
+#include "backdrop_abi.h"
+
+const struct program_info backdrop_program_info = {
+ .has_cbarriers = 1,
+ .min_memory_size = 100000,
+ .desc_set_size = sizeof(struct backdrop_descriptor_set_layout),
+ .workgroup_size_x = 128,
+ .workgroup_size_y = 1,
+ .workgroup_size_z = 1,
+ .begin = backdrop_coroutine_begin,
+ .await = backdrop_coroutine_await,
+ .destroy = backdrop_coroutine_destroy,
+};
diff --git a/vendor/gioui.org/shader/piet/backdrop_abi.go b/vendor/gioui.org/shader/piet/backdrop_abi.go
new file mode 100644
index 0000000..a40a37d
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/backdrop_abi.go
@@ -0,0 +1,35 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+package piet
+
+import "gioui.org/cpu"
+import "unsafe"
+
+/*
+#cgo LDFLAGS: -lm
+
+#include
+#include
+#include "abi.h"
+#include "runtime.h"
+#include "backdrop_abi.h"
+*/
+import "C"
+
+var BackdropProgramInfo = (*cpu.ProgramInfo)(unsafe.Pointer(&C.backdrop_program_info))
+
+type BackdropDescriptorSetLayout = C.struct_backdrop_descriptor_set_layout
+
+const BackdropHash = "6862eaf623d89da635e9d5bc981c77aae4e39aa44047ab47c62d243cf5fe7e73"
+
+func (l *BackdropDescriptorSetLayout) Binding0() *cpu.BufferDescriptor {
+ return (*cpu.BufferDescriptor)(unsafe.Pointer(&l.binding0))
+}
+
+func (l *BackdropDescriptorSetLayout) Binding1() *cpu.BufferDescriptor {
+ return (*cpu.BufferDescriptor)(unsafe.Pointer(&l.binding1))
+}
diff --git a/vendor/gioui.org/shader/piet/backdrop_abi.h b/vendor/gioui.org/shader/piet/backdrop_abi.h
new file mode 100644
index 0000000..f5c0303
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/backdrop_abi.h
@@ -0,0 +1,17 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+struct backdrop_descriptor_set_layout {
+ struct buffer_descriptor binding0;
+ struct buffer_descriptor binding1;
+};
+
+extern coroutine backdrop_coroutine_begin(struct program_data *data,
+ int32_t workgroupX, int32_t workgroupY, int32_t workgroupZ,
+ void *workgroupMemory,
+ int32_t firstSubgroup,
+ int32_t subgroupCount) ATTR_HIDDEN;
+
+extern bool backdrop_coroutine_await(coroutine r, yield_result *res) ATTR_HIDDEN;
+extern void backdrop_coroutine_destroy(coroutine r) ATTR_HIDDEN;
+
+extern const struct program_info backdrop_program_info ATTR_HIDDEN;
diff --git a/vendor/gioui.org/shader/piet/backdrop_abi_nosupport.go b/vendor/gioui.org/shader/piet/backdrop_abi_nosupport.go
new file mode 100644
index 0000000..c60119b
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/backdrop_abi_nosupport.go
@@ -0,0 +1,22 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build !(linux && (arm64 || arm || amd64))
+// +build !linux !arm64,!arm,!amd64
+
+package piet
+
+import "gioui.org/cpu"
+
+var BackdropProgramInfo *cpu.ProgramInfo
+
+type BackdropDescriptorSetLayout struct{}
+
+const BackdropHash = ""
+
+func (l *BackdropDescriptorSetLayout) Binding0() *cpu.BufferDescriptor {
+ panic("unsupported")
+}
+
+func (l *BackdropDescriptorSetLayout) Binding1() *cpu.BufferDescriptor {
+ panic("unsupported")
+}
diff --git a/vendor/gioui.org/shader/piet/backdrop_linux_amd64.syso b/vendor/gioui.org/shader/piet/backdrop_linux_amd64.syso
new file mode 100644
index 0000000..2a71e8a
Binary files /dev/null and b/vendor/gioui.org/shader/piet/backdrop_linux_amd64.syso differ
diff --git a/vendor/gioui.org/shader/piet/backdrop_linux_arm.syso b/vendor/gioui.org/shader/piet/backdrop_linux_arm.syso
new file mode 100644
index 0000000..c86dada
Binary files /dev/null and b/vendor/gioui.org/shader/piet/backdrop_linux_arm.syso differ
diff --git a/vendor/gioui.org/shader/piet/backdrop_linux_arm64.syso b/vendor/gioui.org/shader/piet/backdrop_linux_arm64.syso
new file mode 100644
index 0000000..4608a7e
Binary files /dev/null and b/vendor/gioui.org/shader/piet/backdrop_linux_arm64.syso differ
diff --git a/vendor/gioui.org/shader/piet/binning.comp b/vendor/gioui.org/shader/piet/binning.comp
new file mode 100644
index 0000000..acda83c
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/binning.comp
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense
+
+// The binning stage of the pipeline.
+//
+// Each workgroup processes N_TILE paths.
+// Each thread processes one path and calculates a N_TILE_X x N_TILE_Y coverage mask
+// based on the path bounding box to bin the paths.
+
+#version 450
+#extension GL_GOOGLE_include_directive : enable
+
+#include "mem.h"
+#include "setup.h"
+
+layout(local_size_x = N_TILE, local_size_y = 1) in;
+
+layout(set = 0, binding = 1) readonly buffer ConfigBuf {
+ Config conf;
+};
+
+#include "annotated.h"
+#include "bins.h"
+
+// scale factors useful for converting coordinates to bins
+#define SX (1.0 / float(N_TILE_X * TILE_WIDTH_PX))
+#define SY (1.0 / float(N_TILE_Y * TILE_HEIGHT_PX))
+
+// Constant not available in GLSL. Also consider uintBitsToFloat(0x7f800000)
+#define INFINITY (1.0 / 0.0)
+
+// Note: cudaraster has N_TILE + 1 to cut down on bank conflicts.
+// Bitmaps are sliced (256bit into 8 (N_SLICE) 32bit submaps)
+shared uint bitmaps[N_SLICE][N_TILE];
+shared uint count[N_SLICE][N_TILE];
+shared Alloc sh_chunk_alloc[N_TILE];
+// Really a bool, but some Metal devices don't accept shared bools.
+shared uint sh_alloc_failed;
+
+void main() {
+ uint my_n_elements = conf.n_elements;
+ uint my_partition = gl_WorkGroupID.x;
+
+ for (uint i = 0; i < N_SLICE; i++) {
+ bitmaps[i][gl_LocalInvocationID.x] = 0;
+ }
+ if (gl_LocalInvocationID.x == 0) {
+ sh_alloc_failed = 0;
+ }
+ barrier();
+
+ // Read inputs and determine coverage of bins
+ uint element_ix = my_partition * N_TILE + gl_LocalInvocationID.x;
+ AnnotatedRef ref = AnnotatedRef(conf.anno_alloc.offset + element_ix * Annotated_size);
+ uint tag = Annotated_Nop;
+ if (element_ix < my_n_elements) {
+ tag = Annotated_tag(conf.anno_alloc, ref).tag;
+ }
+ int x0 = 0, y0 = 0, x1 = 0, y1 = 0;
+ switch (tag) {
+ case Annotated_Color:
+ case Annotated_Image:
+ case Annotated_BeginClip:
+ case Annotated_EndClip:
+ // Note: we take advantage of the fact that these drawing elements
+ // have the bbox at the same place in their layout.
+ AnnoEndClip clip = Annotated_EndClip_read(conf.anno_alloc, ref);
+ x0 = int(floor(clip.bbox.x * SX));
+ y0 = int(floor(clip.bbox.y * SY));
+ x1 = int(ceil(clip.bbox.z * SX));
+ y1 = int(ceil(clip.bbox.w * SY));
+ break;
+ }
+
+ // At this point, we run an iterator over the coverage area,
+ // trying to keep divergence low.
+ // Right now, it's just a bbox, but we'll get finer with
+ // segments.
+ uint width_in_bins = (conf.width_in_tiles + N_TILE_X - 1)/N_TILE_X;
+ uint height_in_bins = (conf.height_in_tiles + N_TILE_Y - 1)/N_TILE_Y;
+ x0 = clamp(x0, 0, int(width_in_bins));
+ x1 = clamp(x1, x0, int(width_in_bins));
+ y0 = clamp(y0, 0, int(height_in_bins));
+ y1 = clamp(y1, y0, int(height_in_bins));
+ if (x0 == x1) y1 = y0;
+ int x = x0, y = y0;
+ uint my_slice = gl_LocalInvocationID.x / 32;
+ uint my_mask = 1 << (gl_LocalInvocationID.x & 31);
+ while (y < y1) {
+ atomicOr(bitmaps[my_slice][y * width_in_bins + x], my_mask);
+ x++;
+ if (x == x1) {
+ x = x0;
+ y++;
+ }
+ }
+
+ barrier();
+ // Allocate output segments.
+ uint element_count = 0;
+ for (uint i = 0; i < N_SLICE; i++) {
+ element_count += bitCount(bitmaps[i][gl_LocalInvocationID.x]);
+ count[i][gl_LocalInvocationID.x] = element_count;
+ }
+ // element_count is number of elements covering bin for this invocation.
+ Alloc chunk_alloc = new_alloc(0, 0, true);
+ if (element_count != 0) {
+ // TODO: aggregate atomic adds (subgroup is probably fastest)
+ MallocResult chunk = malloc(element_count * BinInstance_size);
+ chunk_alloc = chunk.alloc;
+ sh_chunk_alloc[gl_LocalInvocationID.x] = chunk_alloc;
+ if (chunk.failed) {
+ sh_alloc_failed = 1;
+ }
+ }
+ // Note: it might be more efficient for reading to do this in the
+ // other order (each bin is a contiguous sequence of partitions)
+ uint out_ix = (conf.bin_alloc.offset >> 2) + (my_partition * N_TILE + gl_LocalInvocationID.x) * 2;
+ write_mem(conf.bin_alloc, out_ix, element_count);
+ write_mem(conf.bin_alloc, out_ix + 1, chunk_alloc.offset);
+
+ barrier();
+ if (sh_alloc_failed != 0 || mem_error != NO_ERROR) {
+ return;
+ }
+
+ // Use similar strategy as Laine & Karras paper; loop over bbox of bins
+ // touched by this element
+ x = x0;
+ y = y0;
+ while (y < y1) {
+ uint bin_ix = y * width_in_bins + x;
+ uint out_mask = bitmaps[my_slice][bin_ix];
+ if ((out_mask & my_mask) != 0) {
+ uint idx = bitCount(out_mask & (my_mask - 1));
+ if (my_slice > 0) {
+ idx += count[my_slice - 1][bin_ix];
+ }
+ Alloc out_alloc = sh_chunk_alloc[bin_ix];
+ uint out_offset = out_alloc.offset + idx * BinInstance_size;
+ BinInstance_write(out_alloc, BinInstanceRef(out_offset), BinInstance(element_ix));
+ }
+ x++;
+ if (x == x1) {
+ x = x0;
+ y++;
+ }
+ }
+}
diff --git a/vendor/gioui.org/shader/piet/binning_abi.c b/vendor/gioui.org/shader/piet/binning_abi.c
new file mode 100644
index 0000000..b28be33
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/binning_abi.c
@@ -0,0 +1,23 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+#include
+#include
+#include "abi.h"
+#include "runtime.h"
+#include "binning_abi.h"
+
+const struct program_info binning_program_info = {
+ .has_cbarriers = 1,
+ .min_memory_size = 100000,
+ .desc_set_size = sizeof(struct binning_descriptor_set_layout),
+ .workgroup_size_x = 128,
+ .workgroup_size_y = 1,
+ .workgroup_size_z = 1,
+ .begin = binning_coroutine_begin,
+ .await = binning_coroutine_await,
+ .destroy = binning_coroutine_destroy,
+};
diff --git a/vendor/gioui.org/shader/piet/binning_abi.go b/vendor/gioui.org/shader/piet/binning_abi.go
new file mode 100644
index 0000000..7ee361f
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/binning_abi.go
@@ -0,0 +1,35 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+package piet
+
+import "gioui.org/cpu"
+import "unsafe"
+
+/*
+#cgo LDFLAGS: -lm
+
+#include
+#include
+#include "abi.h"
+#include "runtime.h"
+#include "binning_abi.h"
+*/
+import "C"
+
+var BinningProgramInfo = (*cpu.ProgramInfo)(unsafe.Pointer(&C.binning_program_info))
+
+type BinningDescriptorSetLayout = C.struct_binning_descriptor_set_layout
+
+const BinningHash = "84177b6dfb90309a6c054ab9fea42293bd49033c221651c756eb40188f4d6ce8"
+
+func (l *BinningDescriptorSetLayout) Binding0() *cpu.BufferDescriptor {
+ return (*cpu.BufferDescriptor)(unsafe.Pointer(&l.binding0))
+}
+
+func (l *BinningDescriptorSetLayout) Binding1() *cpu.BufferDescriptor {
+ return (*cpu.BufferDescriptor)(unsafe.Pointer(&l.binding1))
+}
diff --git a/vendor/gioui.org/shader/piet/binning_abi.h b/vendor/gioui.org/shader/piet/binning_abi.h
new file mode 100644
index 0000000..0152f34
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/binning_abi.h
@@ -0,0 +1,17 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+struct binning_descriptor_set_layout {
+ struct buffer_descriptor binding0;
+ struct buffer_descriptor binding1;
+};
+
+extern coroutine binning_coroutine_begin(struct program_data *data,
+ int32_t workgroupX, int32_t workgroupY, int32_t workgroupZ,
+ void *workgroupMemory,
+ int32_t firstSubgroup,
+ int32_t subgroupCount) ATTR_HIDDEN;
+
+extern bool binning_coroutine_await(coroutine r, yield_result *res) ATTR_HIDDEN;
+extern void binning_coroutine_destroy(coroutine r) ATTR_HIDDEN;
+
+extern const struct program_info binning_program_info ATTR_HIDDEN;
diff --git a/vendor/gioui.org/shader/piet/binning_abi_nosupport.go b/vendor/gioui.org/shader/piet/binning_abi_nosupport.go
new file mode 100644
index 0000000..81b9b56
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/binning_abi_nosupport.go
@@ -0,0 +1,22 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build !(linux && (arm64 || arm || amd64))
+// +build !linux !arm64,!arm,!amd64
+
+package piet
+
+import "gioui.org/cpu"
+
+var BinningProgramInfo *cpu.ProgramInfo
+
+type BinningDescriptorSetLayout struct{}
+
+const BinningHash = ""
+
+func (l *BinningDescriptorSetLayout) Binding0() *cpu.BufferDescriptor {
+ panic("unsupported")
+}
+
+func (l *BinningDescriptorSetLayout) Binding1() *cpu.BufferDescriptor {
+ panic("unsupported")
+}
diff --git a/vendor/gioui.org/shader/piet/binning_linux_amd64.syso b/vendor/gioui.org/shader/piet/binning_linux_amd64.syso
new file mode 100644
index 0000000..329d95d
Binary files /dev/null and b/vendor/gioui.org/shader/piet/binning_linux_amd64.syso differ
diff --git a/vendor/gioui.org/shader/piet/binning_linux_arm.syso b/vendor/gioui.org/shader/piet/binning_linux_arm.syso
new file mode 100644
index 0000000..a8f21a7
Binary files /dev/null and b/vendor/gioui.org/shader/piet/binning_linux_arm.syso differ
diff --git a/vendor/gioui.org/shader/piet/binning_linux_arm64.syso b/vendor/gioui.org/shader/piet/binning_linux_arm64.syso
new file mode 100644
index 0000000..02938c5
Binary files /dev/null and b/vendor/gioui.org/shader/piet/binning_linux_arm64.syso differ
diff --git a/vendor/gioui.org/shader/piet/bins.h b/vendor/gioui.org/shader/piet/bins.h
new file mode 100644
index 0000000..853adab
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/bins.h
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense
+
+// Code auto-generated by piet-gpu-derive
+
+struct BinInstanceRef {
+ uint offset;
+};
+
+struct BinInstance {
+ uint element_ix;
+};
+
+#define BinInstance_size 4
+
+BinInstanceRef BinInstance_index(BinInstanceRef ref, uint index) {
+ return BinInstanceRef(ref.offset + index * BinInstance_size);
+}
+
+BinInstance BinInstance_read(Alloc a, BinInstanceRef ref) {
+ uint ix = ref.offset >> 2;
+ uint raw0 = read_mem(a, ix + 0);
+ BinInstance s;
+ s.element_ix = raw0;
+ return s;
+}
+
+void BinInstance_write(Alloc a, BinInstanceRef ref, BinInstance s) {
+ uint ix = ref.offset >> 2;
+ write_mem(a, ix + 0, s.element_ix);
+}
+
diff --git a/vendor/gioui.org/shader/piet/coarse.comp b/vendor/gioui.org/shader/piet/coarse.comp
new file mode 100644
index 0000000..731da97
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/coarse.comp
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense
+
+// The coarse rasterizer stage of the pipeline.
+//
+// As input we have the ordered partitions of paths from the binning phase and
+// the annotated tile list of segments and backdrop per path.
+//
+// Each workgroup operating on one bin by stream compacting
+// the elements corresponding to the bin.
+//
+// As output we have an ordered command stream per tile. Every tile from a path (backdrop + segment list) will be encoded.
+
+#version 450
+#extension GL_GOOGLE_include_directive : enable
+
+#include "mem.h"
+#include "setup.h"
+
+layout(local_size_x = N_TILE, local_size_y = 1) in;
+
+layout(set = 0, binding = 1) readonly buffer ConfigBuf {
+ Config conf;
+};
+
+#include "annotated.h"
+#include "bins.h"
+#include "tile.h"
+#include "ptcl.h"
+
+#define LG_N_PART_READ (7 + LG_WG_FACTOR)
+#define N_PART_READ (1 << LG_N_PART_READ)
+
+shared uint sh_elements[N_TILE];
+
+// Number of elements in the partition; prefix sum.
+shared uint sh_part_count[N_PART_READ];
+shared Alloc sh_part_elements[N_PART_READ];
+
+shared uint sh_bitmaps[N_SLICE][N_TILE];
+
+shared uint sh_tile_count[N_TILE];
+// The width of the tile rect for the element, intersected with this bin
+shared uint sh_tile_width[N_TILE];
+shared uint sh_tile_x0[N_TILE];
+shared uint sh_tile_y0[N_TILE];
+
+// These are set up so base + tile_y * stride + tile_x points to a Tile.
+shared uint sh_tile_base[N_TILE];
+shared uint sh_tile_stride[N_TILE];
+
+#ifdef MEM_DEBUG
+// Store allocs only when MEM_DEBUG to save shared memory traffic.
+shared Alloc sh_tile_alloc[N_TILE];
+
+void write_tile_alloc(uint el_ix, Alloc a) {
+ sh_tile_alloc[el_ix] = a;
+}
+
+Alloc read_tile_alloc(uint el_ix, bool mem_ok) {
+ return sh_tile_alloc[el_ix];
+}
+#else
+void write_tile_alloc(uint el_ix, Alloc a) {
+ // No-op
+}
+
+Alloc read_tile_alloc(uint el_ix, bool mem_ok) {
+ // All memory.
+ return new_alloc(0, memory.length()*4, mem_ok);
+}
+#endif
+
+// The maximum number of commands per annotated element.
+#define ANNO_COMMANDS 2
+
+// Perhaps cmd_alloc should be a global? This is a style question.
+bool alloc_cmd(inout Alloc cmd_alloc, inout CmdRef cmd_ref, inout uint cmd_limit) {
+ if (cmd_ref.offset < cmd_limit) {
+ return true;
+ }
+ MallocResult new_cmd = malloc(PTCL_INITIAL_ALLOC);
+ if (new_cmd.failed) {
+ return false;
+ }
+ CmdJump jump = CmdJump(new_cmd.alloc.offset);
+ Cmd_Jump_write(cmd_alloc, cmd_ref, jump);
+ cmd_alloc = new_cmd.alloc;
+ cmd_ref = CmdRef(cmd_alloc.offset);
+ // Reserve space for the maximum number of commands and a potential jump.
+ cmd_limit = cmd_alloc.offset + PTCL_INITIAL_ALLOC - (ANNO_COMMANDS + 1) * Cmd_size;
+ return true;
+}
+
+void write_fill(Alloc alloc, inout CmdRef cmd_ref, uint flags, Tile tile, float linewidth) {
+ if (fill_mode_from_flags(flags) == MODE_NONZERO) {
+ if (tile.tile.offset != 0) {
+ CmdFill cmd_fill = CmdFill(tile.tile.offset, tile.backdrop);
+ Cmd_Fill_write(alloc, cmd_ref, cmd_fill);
+ cmd_ref.offset += 4 + CmdFill_size;
+ } else {
+ Cmd_Solid_write(alloc, cmd_ref);
+ cmd_ref.offset += 4;
+ }
+ } else {
+ CmdStroke cmd_stroke = CmdStroke(tile.tile.offset, 0.5 * linewidth);
+ Cmd_Stroke_write(alloc, cmd_ref, cmd_stroke);
+ cmd_ref.offset += 4 + CmdStroke_size;
+ }
+}
+
+void main() {
+ // Could use either linear or 2d layouts for both dispatch and
+ // invocations within the workgroup. We'll use variables to abstract.
+ uint width_in_bins = (conf.width_in_tiles + N_TILE_X - 1)/N_TILE_X;
+ uint bin_ix = width_in_bins * gl_WorkGroupID.y + gl_WorkGroupID.x;
+ uint partition_ix = 0;
+ uint n_partitions = (conf.n_elements + N_TILE - 1) / N_TILE;
+ uint th_ix = gl_LocalInvocationID.x;
+
+ // Coordinates of top left of bin, in tiles.
+ uint bin_tile_x = N_TILE_X * gl_WorkGroupID.x;
+ uint bin_tile_y = N_TILE_Y * gl_WorkGroupID.y;
+
+ // Per-tile state
+ uint tile_x = gl_LocalInvocationID.x % N_TILE_X;
+ uint tile_y = gl_LocalInvocationID.x / N_TILE_X;
+ uint this_tile_ix = (bin_tile_y + tile_y) * conf.width_in_tiles + bin_tile_x + tile_x;
+ Alloc cmd_alloc = slice_mem(conf.ptcl_alloc, this_tile_ix * PTCL_INITIAL_ALLOC, PTCL_INITIAL_ALLOC);
+ CmdRef cmd_ref = CmdRef(cmd_alloc.offset);
+ // Reserve space for the maximum number of commands and a potential jump.
+ uint cmd_limit = cmd_ref.offset + PTCL_INITIAL_ALLOC - (ANNO_COMMANDS + 1) * Cmd_size;
+ // The nesting depth of the clip stack
+ uint clip_depth = 0;
+ // State for the "clip zero" optimization. If it's nonzero, then we are
+ // currently in a clip for which the entire tile has an alpha of zero, and
+ // the value is the depth after the "begin clip" of that element.
+ uint clip_zero_depth = 0;
+ // State for the "clip one" optimization. If bit `i` is set, then that means
+ // that the clip pushed at depth `i` has an alpha of all one.
+ uint clip_one_mask = 0;
+
+ // I'm sure we can figure out how to do this with at least one fewer register...
+ // Items up to rd_ix have been read from sh_elements
+ uint rd_ix = 0;
+ // Items up to wr_ix have been written into sh_elements
+ uint wr_ix = 0;
+ // Items between part_start_ix and ready_ix are ready to be transferred from sh_part_elements
+ uint part_start_ix = 0;
+ uint ready_ix = 0;
+
+ // Leave room for the fine rasterizer scratch allocation.
+ Alloc scratch_alloc = slice_mem(cmd_alloc, 0, Alloc_size);
+ cmd_ref.offset += Alloc_size;
+
+ uint num_begin_slots = 0;
+ uint begin_slot = 0;
+ bool mem_ok = mem_error == NO_ERROR;
+ while (true) {
+ for (uint i = 0; i < N_SLICE; i++) {
+ sh_bitmaps[i][th_ix] = 0;
+ }
+
+ // parallel read of input partitions
+ do {
+ if (ready_ix == wr_ix && partition_ix < n_partitions) {
+ part_start_ix = ready_ix;
+ uint count = 0;
+ if (th_ix < N_PART_READ && partition_ix + th_ix < n_partitions) {
+ uint in_ix = (conf.bin_alloc.offset >> 2) + ((partition_ix + th_ix) * N_TILE + bin_ix) * 2;
+ count = read_mem(conf.bin_alloc, in_ix);
+ uint offset = read_mem(conf.bin_alloc, in_ix + 1);
+ sh_part_elements[th_ix] = new_alloc(offset, count*BinInstance_size, mem_ok);
+ }
+ // prefix sum of counts
+ for (uint i = 0; i < LG_N_PART_READ; i++) {
+ if (th_ix < N_PART_READ) {
+ sh_part_count[th_ix] = count;
+ }
+ barrier();
+ if (th_ix < N_PART_READ) {
+ if (th_ix >= (1 << i)) {
+ count += sh_part_count[th_ix - (1 << i)];
+ }
+ }
+ barrier();
+ }
+ if (th_ix < N_PART_READ) {
+ sh_part_count[th_ix] = part_start_ix + count;
+ }
+ barrier();
+ ready_ix = sh_part_count[N_PART_READ - 1];
+ partition_ix += N_PART_READ;
+ }
+ // use binary search to find element to read
+ uint ix = rd_ix + th_ix;
+ if (ix >= wr_ix && ix < ready_ix && mem_ok) {
+ uint part_ix = 0;
+ for (uint i = 0; i < LG_N_PART_READ; i++) {
+ uint probe = part_ix + ((N_PART_READ / 2) >> i);
+ if (ix >= sh_part_count[probe - 1]) {
+ part_ix = probe;
+ }
+ }
+ ix -= part_ix > 0 ? sh_part_count[part_ix - 1] : part_start_ix;
+ Alloc bin_alloc = sh_part_elements[part_ix];
+ BinInstanceRef inst_ref = BinInstanceRef(bin_alloc.offset);
+ BinInstance inst = BinInstance_read(bin_alloc, BinInstance_index(inst_ref, ix));
+ sh_elements[th_ix] = inst.element_ix;
+ }
+ barrier();
+
+ wr_ix = min(rd_ix + N_TILE, ready_ix);
+ } while (wr_ix - rd_ix < N_TILE && (wr_ix < ready_ix || partition_ix < n_partitions));
+
+ // We've done the merge and filled the buffer.
+
+ // Read one element, compute coverage.
+ uint tag = Annotated_Nop;
+ uint element_ix;
+ AnnotatedRef ref;
+ if (th_ix + rd_ix < wr_ix) {
+ element_ix = sh_elements[th_ix];
+ ref = AnnotatedRef(conf.anno_alloc.offset + element_ix * Annotated_size);
+ tag = Annotated_tag(conf.anno_alloc, ref).tag;
+ }
+
+ // Bounding box of element in pixel coordinates.
+ uint tile_count;
+ switch (tag) {
+ case Annotated_Color:
+ case Annotated_Image:
+ case Annotated_BeginClip:
+ case Annotated_EndClip:
+ // We have one "path" for each element, even if the element isn't
+ // actually a path (currently EndClip, but images etc in the future).
+ uint path_ix = element_ix;
+ Path path = Path_read(conf.tile_alloc, PathRef(conf.tile_alloc.offset + path_ix * Path_size));
+ uint stride = path.bbox.z - path.bbox.x;
+ sh_tile_stride[th_ix] = stride;
+ int dx = int(path.bbox.x) - int(bin_tile_x);
+ int dy = int(path.bbox.y) - int(bin_tile_y);
+ int x0 = clamp(dx, 0, N_TILE_X);
+ int y0 = clamp(dy, 0, N_TILE_Y);
+ int x1 = clamp(int(path.bbox.z) - int(bin_tile_x), 0, N_TILE_X);
+ int y1 = clamp(int(path.bbox.w) - int(bin_tile_y), 0, N_TILE_Y);
+ sh_tile_width[th_ix] = uint(x1 - x0);
+ sh_tile_x0[th_ix] = x0;
+ sh_tile_y0[th_ix] = y0;
+ tile_count = uint(x1 - x0) * uint(y1 - y0);
+ // base relative to bin
+ uint base = path.tiles.offset - uint(dy * stride + dx) * Tile_size;
+ sh_tile_base[th_ix] = base;
+ Alloc path_alloc = new_alloc(path.tiles.offset, (path.bbox.z - path.bbox.x) * (path.bbox.w - path.bbox.y) * Tile_size, mem_ok);
+ write_tile_alloc(th_ix, path_alloc);
+ break;
+ default:
+ tile_count = 0;
+ break;
+ }
+
+ // Prefix sum of sh_tile_count
+ sh_tile_count[th_ix] = tile_count;
+ for (uint i = 0; i < LG_N_TILE; i++) {
+ barrier();
+ if (th_ix >= (1 << i)) {
+ tile_count += sh_tile_count[th_ix - (1 << i)];
+ }
+ barrier();
+ sh_tile_count[th_ix] = tile_count;
+ }
+ barrier();
+ uint total_tile_count = sh_tile_count[N_TILE - 1];
+ for (uint ix = th_ix; ix < total_tile_count; ix += N_TILE) {
+ // Binary search to find element
+ uint el_ix = 0;
+ for (uint i = 0; i < LG_N_TILE; i++) {
+ uint probe = el_ix + ((N_TILE / 2) >> i);
+ if (ix >= sh_tile_count[probe - 1]) {
+ el_ix = probe;
+ }
+ }
+ AnnotatedRef ref = AnnotatedRef(conf.anno_alloc.offset + sh_elements[el_ix] * Annotated_size);
+ uint tag = Annotated_tag(conf.anno_alloc, ref).tag;
+ uint seq_ix = ix - (el_ix > 0 ? sh_tile_count[el_ix - 1] : 0);
+ uint width = sh_tile_width[el_ix];
+ uint x = sh_tile_x0[el_ix] + seq_ix % width;
+ uint y = sh_tile_y0[el_ix] + seq_ix / width;
+ bool include_tile = false;
+ if (tag == Annotated_BeginClip || tag == Annotated_EndClip) {
+ include_tile = true;
+ } else if (mem_ok) {
+ Tile tile = Tile_read(read_tile_alloc(el_ix, mem_ok), TileRef(sh_tile_base[el_ix] + (sh_tile_stride[el_ix] * y + x) * Tile_size));
+ // Include the path in the tile if
+ // - the tile contains at least a segment (tile offset non-zero)
+ // - the tile is completely covered (backdrop non-zero)
+ include_tile = tile.tile.offset != 0 || tile.backdrop != 0;
+ }
+ if (include_tile) {
+ uint el_slice = el_ix / 32;
+ uint el_mask = 1 << (el_ix & 31);
+ atomicOr(sh_bitmaps[el_slice][y * N_TILE_X + x], el_mask);
+ }
+ }
+
+ barrier();
+
+ // Output non-segment elements for this tile. The thread does a sequential walk
+ // through the non-segment elements.
+ uint slice_ix = 0;
+ uint bitmap = sh_bitmaps[0][th_ix];
+ while (mem_ok) {
+ if (bitmap == 0) {
+ slice_ix++;
+ if (slice_ix == N_SLICE) {
+ break;
+ }
+ bitmap = sh_bitmaps[slice_ix][th_ix];
+ if (bitmap == 0) {
+ continue;
+ }
+ }
+ uint element_ref_ix = slice_ix * 32 + findLSB(bitmap);
+ uint element_ix = sh_elements[element_ref_ix];
+
+ // Clear LSB
+ bitmap &= bitmap - 1;
+
+ // At this point, we read the element again from global memory.
+ // If that turns out to be expensive, maybe we can pack it into
+ // shared memory (or perhaps just the tag).
+ ref = AnnotatedRef(conf.anno_alloc.offset + element_ix * Annotated_size);
+ AnnotatedTag tag = Annotated_tag(conf.anno_alloc, ref);
+
+ if (clip_zero_depth == 0) {
+ switch (tag.tag) {
+ case Annotated_Color:
+ Tile tile = Tile_read(read_tile_alloc(element_ref_ix, mem_ok), TileRef(sh_tile_base[element_ref_ix]
+ + (sh_tile_stride[element_ref_ix] * tile_y + tile_x) * Tile_size));
+ AnnoColor fill = Annotated_Color_read(conf.anno_alloc, ref);
+ if (!alloc_cmd(cmd_alloc, cmd_ref, cmd_limit)) {
+ break;
+ }
+ write_fill(cmd_alloc, cmd_ref, tag.flags, tile, fill.linewidth);
+ Cmd_Color_write(cmd_alloc, cmd_ref, CmdColor(fill.rgba_color));
+ cmd_ref.offset += 4 + CmdColor_size;
+ break;
+ case Annotated_Image:
+ tile = Tile_read(read_tile_alloc(element_ref_ix, mem_ok), TileRef(sh_tile_base[element_ref_ix]
+ + (sh_tile_stride[element_ref_ix] * tile_y + tile_x) * Tile_size));
+ AnnoImage fill_img = Annotated_Image_read(conf.anno_alloc, ref);
+ if (!alloc_cmd(cmd_alloc, cmd_ref, cmd_limit)) {
+ break;
+ }
+ write_fill(cmd_alloc, cmd_ref, tag.flags, tile, fill_img.linewidth);
+ Cmd_Image_write(cmd_alloc, cmd_ref, CmdImage(fill_img.index, fill_img.offset));
+ cmd_ref.offset += 4 + CmdImage_size;
+ break;
+ case Annotated_BeginClip:
+ tile = Tile_read(read_tile_alloc(element_ref_ix, mem_ok), TileRef(sh_tile_base[element_ref_ix]
+ + (sh_tile_stride[element_ref_ix] * tile_y + tile_x) * Tile_size));
+ if (tile.tile.offset == 0 && tile.backdrop == 0) {
+ clip_zero_depth = clip_depth + 1;
+ } else if (tile.tile.offset == 0 && clip_depth < 32) {
+ clip_one_mask |= (1 << clip_depth);
+ } else {
+ AnnoBeginClip begin_clip = Annotated_BeginClip_read(conf.anno_alloc, ref);
+ if (!alloc_cmd(cmd_alloc, cmd_ref, cmd_limit)) {
+ break;
+ }
+ write_fill(cmd_alloc, cmd_ref, tag.flags, tile, begin_clip.linewidth);
+ Cmd_BeginClip_write(cmd_alloc, cmd_ref);
+ cmd_ref.offset += 4;
+ if (clip_depth < 32) {
+ clip_one_mask &= ~(1 << clip_depth);
+ }
+ begin_slot++;
+ num_begin_slots = max(num_begin_slots, begin_slot);
+ }
+ clip_depth++;
+ break;
+ case Annotated_EndClip:
+ clip_depth--;
+ if (clip_depth >= 32 || (clip_one_mask & (1 << clip_depth)) == 0) {
+ if (!alloc_cmd(cmd_alloc, cmd_ref, cmd_limit)) {
+ break;
+ }
+ Cmd_Solid_write(cmd_alloc, cmd_ref);
+ cmd_ref.offset += 4;
+ begin_slot--;
+ Cmd_EndClip_write(cmd_alloc, cmd_ref);
+ cmd_ref.offset += 4;
+ }
+ break;
+ }
+ } else {
+ // In "clip zero" state, suppress all drawing
+ switch (tag.tag) {
+ case Annotated_BeginClip:
+ clip_depth++;
+ break;
+ case Annotated_EndClip:
+ if (clip_depth == clip_zero_depth) {
+ clip_zero_depth = 0;
+ }
+ clip_depth--;
+ break;
+ }
+ }
+ }
+ barrier();
+
+ rd_ix += N_TILE;
+ if (rd_ix >= ready_ix && partition_ix >= n_partitions) break;
+ }
+ if (bin_tile_x + tile_x < conf.width_in_tiles && bin_tile_y + tile_y < conf.height_in_tiles) {
+ Cmd_End_write(cmd_alloc, cmd_ref);
+ if (num_begin_slots > 0) {
+ // Write scratch allocation: one state per BeginClip per rasterizer chunk.
+ uint scratch_size = num_begin_slots * TILE_WIDTH_PX * TILE_HEIGHT_PX * CLIP_STATE_SIZE * 4;
+ MallocResult scratch = malloc(scratch_size);
+ // Ignore scratch.failed; we don't use the allocation and kernel4
+ // checks for memory overflow before using it.
+ alloc_write(scratch_alloc, scratch_alloc.offset, scratch.alloc);
+ }
+ }
+}
diff --git a/vendor/gioui.org/shader/piet/coarse_abi.c b/vendor/gioui.org/shader/piet/coarse_abi.c
new file mode 100644
index 0000000..dbecf9f
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/coarse_abi.c
@@ -0,0 +1,23 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+#include
+#include
+#include "abi.h"
+#include "runtime.h"
+#include "coarse_abi.h"
+
+const struct program_info coarse_program_info = {
+ .has_cbarriers = 1,
+ .min_memory_size = 100000,
+ .desc_set_size = sizeof(struct coarse_descriptor_set_layout),
+ .workgroup_size_x = 128,
+ .workgroup_size_y = 1,
+ .workgroup_size_z = 1,
+ .begin = coarse_coroutine_begin,
+ .await = coarse_coroutine_await,
+ .destroy = coarse_coroutine_destroy,
+};
diff --git a/vendor/gioui.org/shader/piet/coarse_abi.go b/vendor/gioui.org/shader/piet/coarse_abi.go
new file mode 100644
index 0000000..dfd977d
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/coarse_abi.go
@@ -0,0 +1,35 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+package piet
+
+import "gioui.org/cpu"
+import "unsafe"
+
+/*
+#cgo LDFLAGS: -lm
+
+#include
+#include
+#include "abi.h"
+#include "runtime.h"
+#include "coarse_abi.h"
+*/
+import "C"
+
+var CoarseProgramInfo = (*cpu.ProgramInfo)(unsafe.Pointer(&C.coarse_program_info))
+
+type CoarseDescriptorSetLayout = C.struct_coarse_descriptor_set_layout
+
+const CoarseHash = "e7ef250c08701490aed979a889cca73943b988bdb5e4ca4b02735aebcf5e5505"
+
+func (l *CoarseDescriptorSetLayout) Binding0() *cpu.BufferDescriptor {
+ return (*cpu.BufferDescriptor)(unsafe.Pointer(&l.binding0))
+}
+
+func (l *CoarseDescriptorSetLayout) Binding1() *cpu.BufferDescriptor {
+ return (*cpu.BufferDescriptor)(unsafe.Pointer(&l.binding1))
+}
diff --git a/vendor/gioui.org/shader/piet/coarse_abi.h b/vendor/gioui.org/shader/piet/coarse_abi.h
new file mode 100644
index 0000000..24d874d
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/coarse_abi.h
@@ -0,0 +1,17 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+struct coarse_descriptor_set_layout {
+ struct buffer_descriptor binding0;
+ struct buffer_descriptor binding1;
+};
+
+extern coroutine coarse_coroutine_begin(struct program_data *data,
+ int32_t workgroupX, int32_t workgroupY, int32_t workgroupZ,
+ void *workgroupMemory,
+ int32_t firstSubgroup,
+ int32_t subgroupCount) ATTR_HIDDEN;
+
+extern bool coarse_coroutine_await(coroutine r, yield_result *res) ATTR_HIDDEN;
+extern void coarse_coroutine_destroy(coroutine r) ATTR_HIDDEN;
+
+extern const struct program_info coarse_program_info ATTR_HIDDEN;
diff --git a/vendor/gioui.org/shader/piet/coarse_abi_nosupport.go b/vendor/gioui.org/shader/piet/coarse_abi_nosupport.go
new file mode 100644
index 0000000..00e2ad8
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/coarse_abi_nosupport.go
@@ -0,0 +1,22 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build !(linux && (arm64 || arm || amd64))
+// +build !linux !arm64,!arm,!amd64
+
+package piet
+
+import "gioui.org/cpu"
+
+var CoarseProgramInfo *cpu.ProgramInfo
+
+type CoarseDescriptorSetLayout struct{}
+
+const CoarseHash = ""
+
+func (l *CoarseDescriptorSetLayout) Binding0() *cpu.BufferDescriptor {
+ panic("unsupported")
+}
+
+func (l *CoarseDescriptorSetLayout) Binding1() *cpu.BufferDescriptor {
+ panic("unsupported")
+}
diff --git a/vendor/gioui.org/shader/piet/coarse_linux_amd64.syso b/vendor/gioui.org/shader/piet/coarse_linux_amd64.syso
new file mode 100644
index 0000000..1ac8407
Binary files /dev/null and b/vendor/gioui.org/shader/piet/coarse_linux_amd64.syso differ
diff --git a/vendor/gioui.org/shader/piet/coarse_linux_arm.syso b/vendor/gioui.org/shader/piet/coarse_linux_arm.syso
new file mode 100644
index 0000000..3f5ee88
Binary files /dev/null and b/vendor/gioui.org/shader/piet/coarse_linux_arm.syso differ
diff --git a/vendor/gioui.org/shader/piet/coarse_linux_arm64.syso b/vendor/gioui.org/shader/piet/coarse_linux_arm64.syso
new file mode 100644
index 0000000..da3b563
Binary files /dev/null and b/vendor/gioui.org/shader/piet/coarse_linux_arm64.syso differ
diff --git a/vendor/gioui.org/shader/piet/elements.comp b/vendor/gioui.org/shader/piet/elements.comp
new file mode 100644
index 0000000..17ef1ee
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/elements.comp
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense
+
+// The element processing stage, first in the pipeline.
+//
+// This stage is primarily about applying transforms and computing bounding
+// boxes. It is organized as a scan over the input elements, producing
+// annotated output elements.
+
+#version 450
+#extension GL_GOOGLE_include_directive : enable
+
+#include "mem.h"
+#include "setup.h"
+
+#define N_ROWS 4
+#define WG_SIZE 32
+#define LG_WG_SIZE 5
+#define PARTITION_SIZE (WG_SIZE * N_ROWS)
+
+layout(local_size_x = WG_SIZE, local_size_y = 1) in;
+
+layout(set = 0, binding = 1) readonly buffer ConfigBuf {
+ Config conf;
+};
+
+layout(set = 0, binding = 2) readonly buffer SceneBuf {
+ uint[] scene;
+};
+
+// It would be better to use the Vulkan memory model than
+// "volatile" but shooting for compatibility here rather
+// than doing things right.
+layout(set = 0, binding = 3) volatile buffer StateBuf {
+ uint part_counter;
+ uint[] state;
+};
+
+#include "scene.h"
+#include "state.h"
+#include "annotated.h"
+#include "pathseg.h"
+#include "tile.h"
+
+#define StateBuf_stride (4 + 2 * State_size)
+
+StateRef state_aggregate_ref(uint partition_ix) {
+ return StateRef(4 + partition_ix * StateBuf_stride);
+}
+
+StateRef state_prefix_ref(uint partition_ix) {
+ return StateRef(4 + partition_ix * StateBuf_stride + State_size);
+}
+
+uint state_flag_index(uint partition_ix) {
+ return partition_ix * (StateBuf_stride / 4);
+}
+
+// These correspond to X, A, P respectively in the prefix sum paper.
+#define FLAG_NOT_READY 0
+#define FLAG_AGGREGATE_READY 1
+#define FLAG_PREFIX_READY 2
+
+#define FLAG_SET_LINEWIDTH 1
+#define FLAG_SET_BBOX 2
+#define FLAG_RESET_BBOX 4
+#define FLAG_SET_FILL_MODE 8
+// Fill modes take up the next bit. Non-zero fill is 0, stroke is 1.
+#define LG_FILL_MODE 4
+#define FILL_MODE_BITS 1
+#define FILL_MODE_MASK (FILL_MODE_BITS << LG_FILL_MODE)
+
+// This is almost like a monoid (the interaction between transformation and
+// bounding boxes is approximate)
+State combine_state(State a, State b) {
+ State c;
+ c.bbox.x = min(a.mat.x * b.bbox.x, a.mat.x * b.bbox.z) + min(a.mat.z * b.bbox.y, a.mat.z * b.bbox.w) + a.translate.x;
+ c.bbox.y = min(a.mat.y * b.bbox.x, a.mat.y * b.bbox.z) + min(a.mat.w * b.bbox.y, a.mat.w * b.bbox.w) + a.translate.y;
+ c.bbox.z = max(a.mat.x * b.bbox.x, a.mat.x * b.bbox.z) + max(a.mat.z * b.bbox.y, a.mat.z * b.bbox.w) + a.translate.x;
+ c.bbox.w = max(a.mat.y * b.bbox.x, a.mat.y * b.bbox.z) + max(a.mat.w * b.bbox.y, a.mat.w * b.bbox.w) + a.translate.y;
+ if ((a.flags & FLAG_RESET_BBOX) == 0 && b.bbox.z <= b.bbox.x && b.bbox.w <= b.bbox.y) {
+ c.bbox = a.bbox;
+ } else if ((a.flags & FLAG_RESET_BBOX) == 0 && (b.flags & FLAG_SET_BBOX) == 0 &&
+ (a.bbox.z > a.bbox.x || a.bbox.w > a.bbox.y))
+ {
+ c.bbox.xy = min(a.bbox.xy, c.bbox.xy);
+ c.bbox.zw = max(a.bbox.zw, c.bbox.zw);
+ }
+ // It would be more concise to cast to matrix types; ah well.
+ c.mat.x = a.mat.x * b.mat.x + a.mat.z * b.mat.y;
+ c.mat.y = a.mat.y * b.mat.x + a.mat.w * b.mat.y;
+ c.mat.z = a.mat.x * b.mat.z + a.mat.z * b.mat.w;
+ c.mat.w = a.mat.y * b.mat.z + a.mat.w * b.mat.w;
+ c.translate.x = a.mat.x * b.translate.x + a.mat.z * b.translate.y + a.translate.x;
+ c.translate.y = a.mat.y * b.translate.x + a.mat.w * b.translate.y + a.translate.y;
+ c.linewidth = (b.flags & FLAG_SET_LINEWIDTH) == 0 ? a.linewidth : b.linewidth;
+ c.flags = (a.flags & (FLAG_SET_LINEWIDTH | FLAG_SET_BBOX | FLAG_SET_FILL_MODE)) | b.flags;
+ c.flags |= (a.flags & FLAG_RESET_BBOX) >> 1;
+ uint fill_mode = (b.flags & FLAG_SET_FILL_MODE) == 0 ? a.flags : b.flags;
+ fill_mode &= FILL_MODE_MASK;
+ c.flags = (c.flags & ~FILL_MODE_MASK) | fill_mode;
+ c.path_count = a.path_count + b.path_count;
+ c.pathseg_count = a.pathseg_count + b.pathseg_count;
+ c.trans_count = a.trans_count + b.trans_count;
+ return c;
+}
+
+State map_element(ElementRef ref) {
+ // TODO: it would *probably* be more efficient to make the memory read patterns less
+ // divergent, though it would be more wasted memory.
+ uint tag = Element_tag(ref).tag;
+ State c;
+ c.bbox = vec4(0.0, 0.0, 0.0, 0.0);
+ c.mat = vec4(1.0, 0.0, 0.0, 1.0);
+ c.translate = vec2(0.0, 0.0);
+ c.linewidth = 1.0; // TODO should be 0.0
+ c.flags = 0;
+ c.path_count = 0;
+ c.pathseg_count = 0;
+ c.trans_count = 0;
+ switch (tag) {
+ case Element_Line:
+ LineSeg line = Element_Line_read(ref);
+ c.bbox.xy = min(line.p0, line.p1);
+ c.bbox.zw = max(line.p0, line.p1);
+ c.pathseg_count = 1;
+ break;
+ case Element_Quad:
+ QuadSeg quad = Element_Quad_read(ref);
+ c.bbox.xy = min(min(quad.p0, quad.p1), quad.p2);
+ c.bbox.zw = max(max(quad.p0, quad.p1), quad.p2);
+ c.pathseg_count = 1;
+ break;
+ case Element_Cubic:
+ CubicSeg cubic = Element_Cubic_read(ref);
+ c.bbox.xy = min(min(cubic.p0, cubic.p1), min(cubic.p2, cubic.p3));
+ c.bbox.zw = max(max(cubic.p0, cubic.p1), max(cubic.p2, cubic.p3));
+ c.pathseg_count = 1;
+ break;
+ case Element_FillColor:
+ case Element_FillImage:
+ case Element_BeginClip:
+ c.flags = FLAG_RESET_BBOX;
+ c.path_count = 1;
+ break;
+ case Element_EndClip:
+ c.path_count = 1;
+ break;
+ case Element_SetLineWidth:
+ SetLineWidth lw = Element_SetLineWidth_read(ref);
+ c.linewidth = lw.width;
+ c.flags = FLAG_SET_LINEWIDTH;
+ break;
+ case Element_Transform:
+ Transform t = Element_Transform_read(ref);
+ c.mat = t.mat;
+ c.translate = t.translate;
+ c.trans_count = 1;
+ break;
+ case Element_SetFillMode:
+ SetFillMode fm = Element_SetFillMode_read(ref);
+ c.flags = FLAG_SET_FILL_MODE | (fm.fill_mode << LG_FILL_MODE);
+ break;
+ }
+ return c;
+}
+
+// Get the bounding box of a circle transformed by the matrix into an ellipse.
+vec2 get_linewidth(State st) {
+ // See https://www.iquilezles.org/www/articles/ellipses/ellipses.htm
+ return 0.5 * st.linewidth * vec2(length(st.mat.xz), length(st.mat.yw));
+}
+
+shared State sh_state[WG_SIZE];
+
+shared uint sh_part_ix;
+shared State sh_prefix;
+
+void main() {
+ State th_state[N_ROWS];
+ // Determine partition to process by atomic counter (described in Section
+ // 4.4 of prefix sum paper).
+ if (gl_LocalInvocationID.x == 0) {
+ sh_part_ix = atomicAdd(part_counter, 1);
+ }
+ barrier();
+ uint part_ix = sh_part_ix;
+
+ uint ix = part_ix * PARTITION_SIZE + gl_LocalInvocationID.x * N_ROWS;
+ ElementRef ref = ElementRef(ix * Element_size);
+
+ th_state[0] = map_element(ref);
+ for (uint i = 1; i < N_ROWS; i++) {
+ // discussion question: would it be faster to load using more coherent patterns
+ // into thread memory? This is kinda strided.
+ th_state[i] = combine_state(th_state[i - 1], map_element(Element_index(ref, i)));
+ }
+ State agg = th_state[N_ROWS - 1];
+ sh_state[gl_LocalInvocationID.x] = agg;
+ for (uint i = 0; i < LG_WG_SIZE; i++) {
+ barrier();
+ if (gl_LocalInvocationID.x >= (1 << i)) {
+ State other = sh_state[gl_LocalInvocationID.x - (1 << i)];
+ agg = combine_state(other, agg);
+ }
+ barrier();
+ sh_state[gl_LocalInvocationID.x] = agg;
+ }
+
+ State exclusive;
+ exclusive.bbox = vec4(0.0, 0.0, 0.0, 0.0);
+ exclusive.mat = vec4(1.0, 0.0, 0.0, 1.0);
+ exclusive.translate = vec2(0.0, 0.0);
+ exclusive.linewidth = 1.0; //TODO should be 0.0
+ exclusive.flags = 0;
+ exclusive.path_count = 0;
+ exclusive.pathseg_count = 0;
+ exclusive.trans_count = 0;
+
+ // Publish aggregate for this partition
+ if (gl_LocalInvocationID.x == WG_SIZE - 1) {
+ // Note: with memory model, we'd want to generate the atomic store version of this.
+ State_write(state_aggregate_ref(part_ix), agg);
+ }
+ memoryBarrierBuffer();
+ if (gl_LocalInvocationID.x == WG_SIZE - 1) {
+ uint flag = FLAG_AGGREGATE_READY;
+ if (part_ix == 0) {
+ State_write(state_prefix_ref(part_ix), agg);
+ flag = FLAG_PREFIX_READY;
+ }
+ state[state_flag_index(part_ix)] = flag;
+ if (part_ix != 0) {
+ // step 4 of paper: decoupled lookback
+ uint look_back_ix = part_ix - 1;
+
+ State their_agg;
+ uint their_ix = 0;
+ while (true) {
+ flag = state[state_flag_index(look_back_ix)];
+ if (flag == FLAG_PREFIX_READY) {
+ State their_prefix = State_read(state_prefix_ref(look_back_ix));
+ exclusive = combine_state(their_prefix, exclusive);
+ break;
+ } else if (flag == FLAG_AGGREGATE_READY) {
+ their_agg = State_read(state_aggregate_ref(look_back_ix));
+ exclusive = combine_state(their_agg, exclusive);
+ look_back_ix--;
+ their_ix = 0;
+ continue;
+ }
+ // else spin
+
+ // Unfortunately there's no guarantee of forward progress of other
+ // workgroups, so compute a bit of the aggregate before trying again.
+ // In the worst case, spinning stops when the aggregate is complete.
+ ElementRef ref = ElementRef((look_back_ix * PARTITION_SIZE + their_ix) * Element_size);
+ State s = map_element(ref);
+ if (their_ix == 0) {
+ their_agg = s;
+ } else {
+ their_agg = combine_state(their_agg, s);
+ }
+ their_ix++;
+ if (their_ix == PARTITION_SIZE) {
+ exclusive = combine_state(their_agg, exclusive);
+ if (look_back_ix == 0) {
+ break;
+ }
+ look_back_ix--;
+ their_ix = 0;
+ }
+ }
+
+ // step 5 of paper: compute inclusive prefix
+ State inclusive_prefix = combine_state(exclusive, agg);
+ sh_prefix = exclusive;
+ State_write(state_prefix_ref(part_ix), inclusive_prefix);
+ }
+ }
+ memoryBarrierBuffer();
+ if (gl_LocalInvocationID.x == WG_SIZE - 1 && part_ix != 0) {
+ state[state_flag_index(part_ix)] = FLAG_PREFIX_READY;
+ }
+ barrier();
+ if (part_ix != 0) {
+ exclusive = sh_prefix;
+ }
+
+ State row = exclusive;
+ if (gl_LocalInvocationID.x > 0) {
+ State other = sh_state[gl_LocalInvocationID.x - 1];
+ row = combine_state(row, other);
+ }
+ for (uint i = 0; i < N_ROWS; i++) {
+ State st = combine_state(row, th_state[i]);
+
+ // Here we read again from the original scene. There may be
+ // gains to be had from stashing in shared memory or possibly
+ // registers (though register pressure is an issue).
+ ElementRef this_ref = Element_index(ref, i);
+ ElementTag tag = Element_tag(this_ref);
+ uint fill_mode = fill_mode_from_flags(st.flags >> LG_FILL_MODE);
+ bool is_stroke = fill_mode == MODE_STROKE;
+ switch (tag.tag) {
+ case Element_Line:
+ LineSeg line = Element_Line_read(this_ref);
+ PathCubic path_cubic;
+ path_cubic.p0 = line.p0;
+ path_cubic.p1 = mix(line.p0, line.p1, 1.0 / 3.0);
+ path_cubic.p2 = mix(line.p1, line.p0, 1.0 / 3.0);
+ path_cubic.p3 = line.p1;
+ path_cubic.path_ix = st.path_count;
+ path_cubic.trans_ix = st.trans_count;
+ if (is_stroke) {
+ path_cubic.stroke = get_linewidth(st);
+ } else {
+ path_cubic.stroke = vec2(0.0);
+ }
+ PathSegRef path_out_ref = PathSegRef(conf.pathseg_alloc.offset + (st.pathseg_count - 1) * PathSeg_size);
+ PathSeg_Cubic_write(conf.pathseg_alloc, path_out_ref, fill_mode, path_cubic);
+ break;
+ case Element_Quad:
+ QuadSeg quad = Element_Quad_read(this_ref);
+ path_cubic.p0 = quad.p0;
+ path_cubic.p1 = mix(quad.p1, quad.p0, 1.0 / 3.0);
+ path_cubic.p2 = mix(quad.p1, quad.p2, 1.0 / 3.0);
+ path_cubic.p3 = quad.p2;
+ path_cubic.path_ix = st.path_count;
+ path_cubic.trans_ix = st.trans_count;
+ if (is_stroke) {
+ path_cubic.stroke = get_linewidth(st);
+ } else {
+ path_cubic.stroke = vec2(0.0);
+ }
+ path_out_ref = PathSegRef(conf.pathseg_alloc.offset + (st.pathseg_count - 1) * PathSeg_size);
+ PathSeg_Cubic_write(conf.pathseg_alloc, path_out_ref, fill_mode, path_cubic);
+ break;
+ case Element_Cubic:
+ CubicSeg cubic = Element_Cubic_read(this_ref);
+ path_cubic.p0 = cubic.p0;
+ path_cubic.p1 = cubic.p1;
+ path_cubic.p2 = cubic.p2;
+ path_cubic.p3 = cubic.p3;
+ path_cubic.path_ix = st.path_count;
+ path_cubic.trans_ix = st.trans_count;
+ if (is_stroke) {
+ path_cubic.stroke = get_linewidth(st);
+ } else {
+ path_cubic.stroke = vec2(0.0);
+ }
+ path_out_ref = PathSegRef(conf.pathseg_alloc.offset + (st.pathseg_count - 1) * PathSeg_size);
+ PathSeg_Cubic_write(conf.pathseg_alloc, path_out_ref, fill_mode, path_cubic);
+ break;
+ case Element_FillColor:
+ FillColor fill = Element_FillColor_read(this_ref);
+ AnnoColor anno_fill;
+ anno_fill.rgba_color = fill.rgba_color;
+ if (is_stroke) {
+ vec2 lw = get_linewidth(st);
+ anno_fill.bbox = st.bbox + vec4(-lw, lw);
+ anno_fill.linewidth = st.linewidth * sqrt(abs(st.mat.x * st.mat.w - st.mat.y * st.mat.z));
+ } else {
+ anno_fill.bbox = st.bbox;
+ anno_fill.linewidth = 0.0;
+ }
+ AnnotatedRef out_ref = AnnotatedRef(conf.anno_alloc.offset + (st.path_count - 1) * Annotated_size);
+ Annotated_Color_write(conf.anno_alloc, out_ref, fill_mode, anno_fill);
+ break;
+ case Element_FillImage:
+ FillImage fill_img = Element_FillImage_read(this_ref);
+ AnnoImage anno_img;
+ anno_img.index = fill_img.index;
+ anno_img.offset = fill_img.offset;
+ if (is_stroke) {
+ vec2 lw = get_linewidth(st);
+ anno_img.bbox = st.bbox + vec4(-lw, lw);
+ anno_img.linewidth = st.linewidth * sqrt(abs(st.mat.x * st.mat.w - st.mat.y * st.mat.z));
+ } else {
+ anno_img.bbox = st.bbox;
+ anno_img.linewidth = 0.0;
+ }
+ out_ref = AnnotatedRef(conf.anno_alloc.offset + (st.path_count - 1) * Annotated_size);
+ Annotated_Image_write(conf.anno_alloc, out_ref, fill_mode, anno_img);
+ break;
+ case Element_BeginClip:
+ Clip begin_clip = Element_BeginClip_read(this_ref);
+ AnnoBeginClip anno_begin_clip;
+ // This is the absolute bbox, it's been transformed during encoding.
+ anno_begin_clip.bbox = begin_clip.bbox;
+ if (is_stroke) {
+ vec2 lw = get_linewidth(st);
+ anno_begin_clip.linewidth = st.linewidth * sqrt(abs(st.mat.x * st.mat.w - st.mat.y * st.mat.z));
+ } else {
+ anno_fill.linewidth = 0.0;
+ }
+ out_ref = AnnotatedRef(conf.anno_alloc.offset + (st.path_count - 1) * Annotated_size);
+ Annotated_BeginClip_write(conf.anno_alloc, out_ref, fill_mode, anno_begin_clip);
+ break;
+ case Element_EndClip:
+ Clip end_clip = Element_EndClip_read(this_ref);
+ // This bbox is expected to be the same as the begin one.
+ AnnoEndClip anno_end_clip = AnnoEndClip(end_clip.bbox);
+ out_ref = AnnotatedRef(conf.anno_alloc.offset + (st.path_count - 1) * Annotated_size);
+ Annotated_EndClip_write(conf.anno_alloc, out_ref, anno_end_clip);
+ break;
+ case Element_Transform:
+ TransformSeg transform = TransformSeg(st.mat, st.translate);
+ TransformSegRef trans_ref = TransformSegRef(conf.trans_alloc.offset + (st.trans_count - 1) * TransformSeg_size);
+ TransformSeg_write(conf.trans_alloc, trans_ref, transform);
+ break;
+ }
+ }
+}
diff --git a/vendor/gioui.org/shader/piet/elements_abi.c b/vendor/gioui.org/shader/piet/elements_abi.c
new file mode 100644
index 0000000..bacf32d
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/elements_abi.c
@@ -0,0 +1,23 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+#include
+#include
+#include "abi.h"
+#include "runtime.h"
+#include "elements_abi.h"
+
+const struct program_info elements_program_info = {
+ .has_cbarriers = 1,
+ .min_memory_size = 100000,
+ .desc_set_size = sizeof(struct elements_descriptor_set_layout),
+ .workgroup_size_x = 32,
+ .workgroup_size_y = 1,
+ .workgroup_size_z = 1,
+ .begin = elements_coroutine_begin,
+ .await = elements_coroutine_await,
+ .destroy = elements_coroutine_destroy,
+};
diff --git a/vendor/gioui.org/shader/piet/elements_abi.go b/vendor/gioui.org/shader/piet/elements_abi.go
new file mode 100644
index 0000000..a85eeca
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/elements_abi.go
@@ -0,0 +1,43 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+package piet
+
+import "gioui.org/cpu"
+import "unsafe"
+
+/*
+#cgo LDFLAGS: -lm
+
+#include
+#include
+#include "abi.h"
+#include "runtime.h"
+#include "elements_abi.h"
+*/
+import "C"
+
+var ElementsProgramInfo = (*cpu.ProgramInfo)(unsafe.Pointer(&C.elements_program_info))
+
+type ElementsDescriptorSetLayout = C.struct_elements_descriptor_set_layout
+
+const ElementsHash = "0f18de15866045b36217068789c9c8715a63e0f9f120c53ea2d4d76f53e443c3"
+
+func (l *ElementsDescriptorSetLayout) Binding0() *cpu.BufferDescriptor {
+ return (*cpu.BufferDescriptor)(unsafe.Pointer(&l.binding0))
+}
+
+func (l *ElementsDescriptorSetLayout) Binding1() *cpu.BufferDescriptor {
+ return (*cpu.BufferDescriptor)(unsafe.Pointer(&l.binding1))
+}
+
+func (l *ElementsDescriptorSetLayout) Binding2() *cpu.BufferDescriptor {
+ return (*cpu.BufferDescriptor)(unsafe.Pointer(&l.binding2))
+}
+
+func (l *ElementsDescriptorSetLayout) Binding3() *cpu.BufferDescriptor {
+ return (*cpu.BufferDescriptor)(unsafe.Pointer(&l.binding3))
+}
diff --git a/vendor/gioui.org/shader/piet/elements_abi.h b/vendor/gioui.org/shader/piet/elements_abi.h
new file mode 100644
index 0000000..c455224
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/elements_abi.h
@@ -0,0 +1,19 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+struct elements_descriptor_set_layout {
+ struct buffer_descriptor binding0;
+ struct buffer_descriptor binding1;
+ struct buffer_descriptor binding2;
+ struct buffer_descriptor binding3;
+};
+
+extern coroutine elements_coroutine_begin(struct program_data *data,
+ int32_t workgroupX, int32_t workgroupY, int32_t workgroupZ,
+ void *workgroupMemory,
+ int32_t firstSubgroup,
+ int32_t subgroupCount) ATTR_HIDDEN;
+
+extern bool elements_coroutine_await(coroutine r, yield_result *res) ATTR_HIDDEN;
+extern void elements_coroutine_destroy(coroutine r) ATTR_HIDDEN;
+
+extern const struct program_info elements_program_info ATTR_HIDDEN;
diff --git a/vendor/gioui.org/shader/piet/elements_abi_nosupport.go b/vendor/gioui.org/shader/piet/elements_abi_nosupport.go
new file mode 100644
index 0000000..8b30234
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/elements_abi_nosupport.go
@@ -0,0 +1,30 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build !(linux && (arm64 || arm || amd64))
+// +build !linux !arm64,!arm,!amd64
+
+package piet
+
+import "gioui.org/cpu"
+
+var ElementsProgramInfo *cpu.ProgramInfo
+
+type ElementsDescriptorSetLayout struct{}
+
+const ElementsHash = ""
+
+func (l *ElementsDescriptorSetLayout) Binding0() *cpu.BufferDescriptor {
+ panic("unsupported")
+}
+
+func (l *ElementsDescriptorSetLayout) Binding1() *cpu.BufferDescriptor {
+ panic("unsupported")
+}
+
+func (l *ElementsDescriptorSetLayout) Binding2() *cpu.BufferDescriptor {
+ panic("unsupported")
+}
+
+func (l *ElementsDescriptorSetLayout) Binding3() *cpu.BufferDescriptor {
+ panic("unsupported")
+}
diff --git a/vendor/gioui.org/shader/piet/elements_linux_amd64.syso b/vendor/gioui.org/shader/piet/elements_linux_amd64.syso
new file mode 100644
index 0000000..bba953b
Binary files /dev/null and b/vendor/gioui.org/shader/piet/elements_linux_amd64.syso differ
diff --git a/vendor/gioui.org/shader/piet/elements_linux_arm.syso b/vendor/gioui.org/shader/piet/elements_linux_arm.syso
new file mode 100644
index 0000000..1325ac8
Binary files /dev/null and b/vendor/gioui.org/shader/piet/elements_linux_arm.syso differ
diff --git a/vendor/gioui.org/shader/piet/elements_linux_arm64.syso b/vendor/gioui.org/shader/piet/elements_linux_arm64.syso
new file mode 100644
index 0000000..6855eb6
Binary files /dev/null and b/vendor/gioui.org/shader/piet/elements_linux_arm64.syso differ
diff --git a/vendor/gioui.org/shader/piet/gen.go b/vendor/gioui.org/shader/piet/gen.go
new file mode 100644
index 0000000..4b80e92
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/gen.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: Unlicense OR MIT
+
+package piet
+
+//go:generate go run ../cmd/convertshaders -package piet -dir .
diff --git a/vendor/gioui.org/shader/piet/gencpu.sh b/vendor/gioui.org/shader/piet/gencpu.sh
new file mode 100644
index 0000000..d6f6f7d
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/gencpu.sh
@@ -0,0 +1,44 @@
+#!/bin/sh
+
+# SPDX-License-Identifier: Unlicense OR MIT
+
+set -e
+
+OBJCOPY_ARM64=$ANDROID_SDK_ROOT/ndk/21.3.6528147/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/aarch64-linux-android/bin/objcopy
+OBJCOPY_ARM=$ANDROID_SDK_ROOT/ndk/21.3.6528147/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/arm-linux-androideabi/bin/objcopy
+SWIFTSHADER=$HOME/.cache/swiftshader
+
+export CGO_ENABLED=1
+export GOARCH=386
+export VK_ICD_FILENAMES=$SWIFTSHADER/build.32bit/Linux/vk_swiftshader_icd.json
+
+export SWIFTSHADER_TRIPLE=armv7a-none-eabi
+go run gioui.org/cpu/cmd/compile -arch arm -objcopy $OBJCOPY_ARM -layout "0:buffer,1:buffer,2:image,3:image" kernel4.comp
+go run gioui.org/cpu/cmd/compile -arch arm -objcopy $OBJCOPY_ARM -layout "0:buffer,1:buffer" coarse.comp
+go run gioui.org/cpu/cmd/compile -arch arm -objcopy $OBJCOPY_ARM -layout "0:buffer,1:buffer" binning.comp
+go run gioui.org/cpu/cmd/compile -arch arm -objcopy $OBJCOPY_ARM -layout "0:buffer,1:buffer" backdrop.comp
+go run gioui.org/cpu/cmd/compile -arch arm -objcopy $OBJCOPY_ARM -layout "0:buffer,1:buffer" path_coarse.comp
+go run gioui.org/cpu/cmd/compile -arch arm -objcopy $OBJCOPY_ARM -layout "0:buffer,1:buffer" tile_alloc.comp
+go run gioui.org/cpu/cmd/compile -arch arm -objcopy $OBJCOPY_ARM -layout "0:buffer,1:buffer,2:buffer,3:buffer" elements.comp
+
+export GOARCH=amd64
+export VK_ICD_FILENAMES=$SWIFTSHADER/build.64bit/Linux/vk_swiftshader_icd.json
+export SWIFTSHADER_TRIPLE=x86_64-unknown-none-gnu
+
+go run gioui.org/cpu/cmd/compile -arch amd64 -layout "0:buffer,1:buffer,2:image,3:image" kernel4.comp
+go run gioui.org/cpu/cmd/compile -arch amd64 -layout "0:buffer,1:buffer" coarse.comp
+go run gioui.org/cpu/cmd/compile -arch amd64 -layout "0:buffer,1:buffer" binning.comp
+go run gioui.org/cpu/cmd/compile -arch amd64 -layout "0:buffer,1:buffer" backdrop.comp
+go run gioui.org/cpu/cmd/compile -arch amd64 -layout "0:buffer,1:buffer" path_coarse.comp
+go run gioui.org/cpu/cmd/compile -arch amd64 -layout "0:buffer,1:buffer" tile_alloc.comp
+go run gioui.org/cpu/cmd/compile -arch amd64 -layout "0:buffer,1:buffer,2:buffer,3:buffer" elements.comp
+
+export SWIFTSHADER_TRIPLE=aarch64-unknown-linux-gnu
+
+go run gioui.org/cpu/cmd/compile -arch arm64 -objcopy $OBJCOPY_ARM64 -layout "0:buffer,1:buffer,2:image,3:image" kernel4.comp
+go run gioui.org/cpu/cmd/compile -arch arm64 -objcopy $OBJCOPY_ARM64 -layout "0:buffer,1:buffer" coarse.comp
+go run gioui.org/cpu/cmd/compile -arch arm64 -objcopy $OBJCOPY_ARM64 -layout "0:buffer,1:buffer" binning.comp
+go run gioui.org/cpu/cmd/compile -arch arm64 -objcopy $OBJCOPY_ARM64 -layout "0:buffer,1:buffer" backdrop.comp
+go run gioui.org/cpu/cmd/compile -arch arm64 -objcopy $OBJCOPY_ARM64 -layout "0:buffer,1:buffer" path_coarse.comp
+go run gioui.org/cpu/cmd/compile -arch arm64 -objcopy $OBJCOPY_ARM64 -layout "0:buffer,1:buffer" tile_alloc.comp
+go run gioui.org/cpu/cmd/compile -arch arm64 -objcopy $OBJCOPY_ARM64 -layout "0:buffer,1:buffer,2:buffer,3:buffer" elements.comp
diff --git a/vendor/gioui.org/shader/piet/kernel4.comp b/vendor/gioui.org/shader/piet/kernel4.comp
new file mode 100644
index 0000000..3f8ef65
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/kernel4.comp
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense
+
+// This is "kernel 4" in a 4-kernel pipeline. It renders the commands
+// in the per-tile command list to an image.
+
+// Right now, this kernel stores the image in a buffer, but a better
+// plan is to use a texture. This is because of limited support.
+
+#version 450
+#extension GL_GOOGLE_include_directive : enable
+#ifdef ENABLE_IMAGE_INDICES
+#extension GL_EXT_nonuniform_qualifier : enable
+#endif
+
+#include "mem.h"
+#include "setup.h"
+
+#define CHUNK_X 2
+#define CHUNK_Y 4
+#define CHUNK CHUNK_X * CHUNK_Y
+#define CHUNK_DX (TILE_WIDTH_PX / CHUNK_X)
+#define CHUNK_DY (TILE_HEIGHT_PX / CHUNK_Y)
+layout(local_size_x = CHUNK_DX, local_size_y = CHUNK_DY) in;
+
+layout(set = 0, binding = 1) restrict readonly buffer ConfigBuf {
+ Config conf;
+};
+
+layout(rgba8, set = 0, binding = 2) uniform restrict writeonly image2D image;
+
+#ifdef ENABLE_IMAGE_INDICES
+layout(rgba8, set = 0, binding = 3) uniform restrict readonly image2D images[];
+#else
+layout(rgba8, set = 0, binding = 3) uniform restrict readonly image2D images;
+#endif
+
+#include "ptcl.h"
+#include "tile.h"
+
+mediump vec3 tosRGB(mediump vec3 rgb) {
+ bvec3 cutoff = greaterThanEqual(rgb, vec3(0.0031308));
+ mediump vec3 below = vec3(12.92)*rgb;
+ mediump vec3 above = vec3(1.055)*pow(rgb, vec3(0.41666)) - vec3(0.055);
+ return mix(below, above, cutoff);
+}
+
+mediump vec3 fromsRGB(mediump vec3 srgb) {
+ // Formula from EXT_sRGB.
+ bvec3 cutoff = greaterThanEqual(srgb, vec3(0.04045));
+ mediump vec3 below = srgb/vec3(12.92);
+ mediump vec3 above = pow((srgb + vec3(0.055))/vec3(1.055), vec3(2.4));
+ return mix(below, above, cutoff);
+}
+
+// unpacksRGB unpacks a color in the sRGB color space to a vec4 in the linear color
+// space.
+mediump vec4 unpacksRGB(uint srgba) {
+ mediump vec4 color = unpackUnorm4x8(srgba).wzyx;
+ return vec4(fromsRGB(color.rgb), color.a);
+}
+
+// packsRGB packs a color in the linear color space into its 8-bit sRGB equivalent.
+uint packsRGB(mediump vec4 rgba) {
+ rgba = vec4(tosRGB(rgba.rgb), rgba.a);
+ return packUnorm4x8(rgba.wzyx);
+}
+
+uvec2 chunk_offset(uint i) {
+ return uvec2(i % CHUNK_X * CHUNK_DX, i / CHUNK_X * CHUNK_DY);
+}
+
+mediump vec4[CHUNK] fillImage(uvec2 xy, CmdImage cmd_img) {
+ mediump vec4 rgba[CHUNK];
+ for (uint i = 0; i < CHUNK; i++) {
+ ivec2 uv = ivec2(xy + chunk_offset(i)) + cmd_img.offset;
+ mediump vec4 fg_rgba;
+#ifdef ENABLE_IMAGE_INDICES
+ fg_rgba = imageLoad(images[cmd_img.index], uv);
+#else
+ fg_rgba = imageLoad(images, uv);
+#endif
+ fg_rgba.rgb = fromsRGB(fg_rgba.rgb);
+ rgba[i] = fg_rgba;
+ }
+ return rgba;
+}
+
+void main() {
+ uint tile_ix = gl_WorkGroupID.y * conf.width_in_tiles + gl_WorkGroupID.x;
+ Alloc cmd_alloc = slice_mem(conf.ptcl_alloc, tile_ix * PTCL_INITIAL_ALLOC, PTCL_INITIAL_ALLOC);
+ CmdRef cmd_ref = CmdRef(cmd_alloc.offset);
+
+ // Read scrach space allocation, written first in the command list.
+ Alloc scratch_alloc = alloc_read(cmd_alloc, cmd_ref.offset);
+ cmd_ref.offset += Alloc_size;
+
+ uvec2 xy_uint = uvec2(gl_LocalInvocationID.x + TILE_WIDTH_PX * gl_WorkGroupID.x, gl_LocalInvocationID.y + TILE_HEIGHT_PX * gl_WorkGroupID.y);
+ vec2 xy = vec2(xy_uint);
+ mediump vec4 rgba[CHUNK];
+ for (uint i = 0; i < CHUNK; i++) {
+ rgba[i] = vec4(0.0);
+ // TODO: remove this debug image support when the actual image method is plumbed.
+#ifdef DEBUG_IMAGES
+#ifdef ENABLE_IMAGE_INDICES
+ if (xy_uint.x < 1024 && xy_uint.y < 1024) {
+ rgba[i] = imageLoad(images[gl_WorkGroupID.x / 64], ivec2(xy_uint + chunk_offset(i))/4);
+ }
+#else
+ if (xy_uint.x < 1024 && xy_uint.y < 1024) {
+ rgb[i] = imageLoad(images[0], ivec2(xy_uint + chunk_offset(i))/4).rgb;
+ }
+#endif
+#endif
+ }
+
+ mediump float area[CHUNK];
+ uint clip_depth = 0;
+ bool mem_ok = mem_error == NO_ERROR;
+ while (mem_ok) {
+ uint tag = Cmd_tag(cmd_alloc, cmd_ref).tag;
+ if (tag == Cmd_End) {
+ break;
+ }
+ switch (tag) {
+ case Cmd_Stroke:
+ // Calculate distance field from all the line segments in this tile.
+ CmdStroke stroke = Cmd_Stroke_read(cmd_alloc, cmd_ref);
+ mediump float df[CHUNK];
+ for (uint k = 0; k < CHUNK; k++) df[k] = 1e9;
+ TileSegRef tile_seg_ref = TileSegRef(stroke.tile_ref);
+ do {
+ TileSeg seg = TileSeg_read(new_alloc(tile_seg_ref.offset, TileSeg_size, mem_ok), tile_seg_ref);
+ vec2 line_vec = seg.vector;
+ for (uint k = 0; k < CHUNK; k++) {
+ vec2 dpos = xy + vec2(0.5, 0.5) - seg.origin;
+ dpos += vec2(chunk_offset(k));
+ float t = clamp(dot(line_vec, dpos) / dot(line_vec, line_vec), 0.0, 1.0);
+ df[k] = min(df[k], length(line_vec * t - dpos));
+ }
+ tile_seg_ref = seg.next;
+ } while (tile_seg_ref.offset != 0);
+ for (uint k = 0; k < CHUNK; k++) {
+ area[k] = clamp(stroke.half_width + 0.5 - df[k], 0.0, 1.0);
+ }
+ cmd_ref.offset += 4 + CmdStroke_size;
+ break;
+ case Cmd_Fill:
+ CmdFill fill = Cmd_Fill_read(cmd_alloc, cmd_ref);
+ for (uint k = 0; k < CHUNK; k++) area[k] = float(fill.backdrop);
+ tile_seg_ref = TileSegRef(fill.tile_ref);
+ // Calculate coverage based on backdrop + coverage of each line segment
+ do {
+ TileSeg seg = TileSeg_read(new_alloc(tile_seg_ref.offset, TileSeg_size, mem_ok), tile_seg_ref);
+ for (uint k = 0; k < CHUNK; k++) {
+ vec2 my_xy = xy + vec2(chunk_offset(k));
+ vec2 start = seg.origin - my_xy;
+ vec2 end = start + seg.vector;
+ vec2 window = clamp(vec2(start.y, end.y), 0.0, 1.0);
+ if (window.x != window.y) {
+ vec2 t = (window - start.y) / seg.vector.y;
+ vec2 xs = vec2(mix(start.x, end.x, t.x), mix(start.x, end.x, t.y));
+ float xmin = min(min(xs.x, xs.y), 1.0) - 1e-6;
+ float xmax = max(xs.x, xs.y);
+ float b = min(xmax, 1.0);
+ float c = max(b, 0.0);
+ float d = max(xmin, 0.0);
+ float a = (b + 0.5 * (d * d - c * c) - xmin) / (xmax - xmin);
+ area[k] += a * (window.x - window.y);
+ }
+ area[k] += sign(seg.vector.x) * clamp(my_xy.y - seg.y_edge + 1.0, 0.0, 1.0);
+ }
+ tile_seg_ref = seg.next;
+ } while (tile_seg_ref.offset != 0);
+ for (uint k = 0; k < CHUNK; k++) {
+ area[k] = min(abs(area[k]), 1.0);
+ }
+ cmd_ref.offset += 4 + CmdFill_size;
+ break;
+ case Cmd_Solid:
+ for (uint k = 0; k < CHUNK; k++) {
+ area[k] = 1.0;
+ }
+ cmd_ref.offset += 4;
+ break;
+ case Cmd_Alpha:
+ CmdAlpha alpha = Cmd_Alpha_read(cmd_alloc, cmd_ref);
+ for (uint k = 0; k < CHUNK; k++) {
+ area[k] = alpha.alpha;
+ }
+ cmd_ref.offset += 4 + CmdAlpha_size;
+ break;
+ case Cmd_Color:
+ CmdColor color = Cmd_Color_read(cmd_alloc, cmd_ref);
+ mediump vec4 fg = unpacksRGB(color.rgba_color);
+ for (uint k = 0; k < CHUNK; k++) {
+ mediump vec4 fg_k = fg * area[k];
+ rgba[k] = rgba[k] * (1.0 - fg_k.a) + fg_k;
+ }
+ cmd_ref.offset += 4 + CmdColor_size;
+ break;
+ case Cmd_Image:
+ CmdImage fill_img = Cmd_Image_read(cmd_alloc, cmd_ref);
+ mediump vec4 img[CHUNK] = fillImage(xy_uint, fill_img);
+ for (uint k = 0; k < CHUNK; k++) {
+ mediump vec4 fg_k = img[k] * area[k];
+ rgba[k] = rgba[k] * (1.0 - fg_k.a) + fg_k;
+ }
+ cmd_ref.offset += 4 + CmdImage_size;
+ break;
+ case Cmd_BeginClip:
+ uint base_ix = (scratch_alloc.offset >> 2) + CLIP_STATE_SIZE * (clip_depth * TILE_WIDTH_PX * TILE_HEIGHT_PX +
+ gl_LocalInvocationID.x + TILE_WIDTH_PX * gl_LocalInvocationID.y);
+ for (uint k = 0; k < CHUNK; k++) {
+ uvec2 offset = chunk_offset(k);
+ uint srgb = packsRGB(vec4(rgba[k]));
+ mediump float alpha = clamp(abs(area[k]), 0.0, 1.0);
+ write_mem(scratch_alloc, base_ix + 0 + CLIP_STATE_SIZE * (offset.x + offset.y * TILE_WIDTH_PX), srgb);
+ write_mem(scratch_alloc, base_ix + 1 + CLIP_STATE_SIZE * (offset.x + offset.y * TILE_WIDTH_PX), floatBitsToUint(alpha));
+ rgba[k] = vec4(0.0);
+ }
+ clip_depth++;
+ cmd_ref.offset += 4;
+ break;
+ case Cmd_EndClip:
+ clip_depth--;
+ base_ix = (scratch_alloc.offset >> 2) + CLIP_STATE_SIZE * (clip_depth * TILE_WIDTH_PX * TILE_HEIGHT_PX +
+ gl_LocalInvocationID.x + TILE_WIDTH_PX * gl_LocalInvocationID.y);
+ for (uint k = 0; k < CHUNK; k++) {
+ uvec2 offset = chunk_offset(k);
+ uint srgb = read_mem(scratch_alloc, base_ix + 0 + CLIP_STATE_SIZE * (offset.x + offset.y * TILE_WIDTH_PX));
+ uint alpha = read_mem(scratch_alloc, base_ix + 1 + CLIP_STATE_SIZE * (offset.x + offset.y * TILE_WIDTH_PX));
+ mediump vec4 bg = unpacksRGB(srgb);
+ mediump vec4 fg = rgba[k] * area[k] * uintBitsToFloat(alpha);
+ rgba[k] = bg * (1.0 - fg.a) + fg;
+ }
+ cmd_ref.offset += 4;
+ break;
+ case Cmd_Jump:
+ cmd_ref = CmdRef(Cmd_Jump_read(cmd_alloc, cmd_ref).new_ref);
+ cmd_alloc.offset = cmd_ref.offset;
+ break;
+ }
+ }
+
+ for (uint i = 0; i < CHUNK; i++) {
+ imageStore(image, ivec2(xy_uint + chunk_offset(i)), vec4(tosRGB(rgba[i].rgb), rgba[i].a));
+ }
+}
diff --git a/vendor/gioui.org/shader/piet/kernel4_abi.c b/vendor/gioui.org/shader/piet/kernel4_abi.c
new file mode 100644
index 0000000..81c4ac4
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/kernel4_abi.c
@@ -0,0 +1,23 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+#include
+#include
+#include "abi.h"
+#include "runtime.h"
+#include "kernel4_abi.h"
+
+const struct program_info kernel4_program_info = {
+ .has_cbarriers = 0,
+ .min_memory_size = 100000,
+ .desc_set_size = sizeof(struct kernel4_descriptor_set_layout),
+ .workgroup_size_x = 16,
+ .workgroup_size_y = 8,
+ .workgroup_size_z = 1,
+ .begin = kernel4_coroutine_begin,
+ .await = kernel4_coroutine_await,
+ .destroy = kernel4_coroutine_destroy,
+};
diff --git a/vendor/gioui.org/shader/piet/kernel4_abi.go b/vendor/gioui.org/shader/piet/kernel4_abi.go
new file mode 100644
index 0000000..9804647
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/kernel4_abi.go
@@ -0,0 +1,43 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+package piet
+
+import "gioui.org/cpu"
+import "unsafe"
+
+/*
+#cgo LDFLAGS: -lm
+
+#include
+#include
+#include "abi.h"
+#include "runtime.h"
+#include "kernel4_abi.h"
+*/
+import "C"
+
+var Kernel4ProgramInfo = (*cpu.ProgramInfo)(unsafe.Pointer(&C.kernel4_program_info))
+
+type Kernel4DescriptorSetLayout = C.struct_kernel4_descriptor_set_layout
+
+const Kernel4Hash = "88ae29cf53c1819fad9680e85faaee30fcc934d1a978a717695c966ef051bf1d"
+
+func (l *Kernel4DescriptorSetLayout) Binding0() *cpu.BufferDescriptor {
+ return (*cpu.BufferDescriptor)(unsafe.Pointer(&l.binding0))
+}
+
+func (l *Kernel4DescriptorSetLayout) Binding1() *cpu.BufferDescriptor {
+ return (*cpu.BufferDescriptor)(unsafe.Pointer(&l.binding1))
+}
+
+func (l *Kernel4DescriptorSetLayout) Binding2() *cpu.ImageDescriptor {
+ return (*cpu.ImageDescriptor)(unsafe.Pointer(&l.binding2))
+}
+
+func (l *Kernel4DescriptorSetLayout) Binding3() *cpu.ImageDescriptor {
+ return (*cpu.ImageDescriptor)(unsafe.Pointer(&l.binding3))
+}
diff --git a/vendor/gioui.org/shader/piet/kernel4_abi.h b/vendor/gioui.org/shader/piet/kernel4_abi.h
new file mode 100644
index 0000000..0d3b4c9
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/kernel4_abi.h
@@ -0,0 +1,19 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+struct kernel4_descriptor_set_layout {
+ struct buffer_descriptor binding0;
+ struct buffer_descriptor binding1;
+ struct image_descriptor binding2;
+ struct image_descriptor binding3;
+};
+
+extern coroutine kernel4_coroutine_begin(struct program_data *data,
+ int32_t workgroupX, int32_t workgroupY, int32_t workgroupZ,
+ void *workgroupMemory,
+ int32_t firstSubgroup,
+ int32_t subgroupCount) ATTR_HIDDEN;
+
+extern bool kernel4_coroutine_await(coroutine r, yield_result *res) ATTR_HIDDEN;
+extern void kernel4_coroutine_destroy(coroutine r) ATTR_HIDDEN;
+
+extern const struct program_info kernel4_program_info ATTR_HIDDEN;
diff --git a/vendor/gioui.org/shader/piet/kernel4_abi_nosupport.go b/vendor/gioui.org/shader/piet/kernel4_abi_nosupport.go
new file mode 100644
index 0000000..6cff47f
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/kernel4_abi_nosupport.go
@@ -0,0 +1,30 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build !(linux && (arm64 || arm || amd64))
+// +build !linux !arm64,!arm,!amd64
+
+package piet
+
+import "gioui.org/cpu"
+
+var Kernel4ProgramInfo *cpu.ProgramInfo
+
+type Kernel4DescriptorSetLayout struct{}
+
+const Kernel4Hash = ""
+
+func (l *Kernel4DescriptorSetLayout) Binding0() *cpu.BufferDescriptor {
+ panic("unsupported")
+}
+
+func (l *Kernel4DescriptorSetLayout) Binding1() *cpu.BufferDescriptor {
+ panic("unsupported")
+}
+
+func (l *Kernel4DescriptorSetLayout) Binding2() *cpu.ImageDescriptor {
+ panic("unsupported")
+}
+
+func (l *Kernel4DescriptorSetLayout) Binding3() *cpu.ImageDescriptor {
+ panic("unsupported")
+}
diff --git a/vendor/gioui.org/shader/piet/kernel4_linux_amd64.syso b/vendor/gioui.org/shader/piet/kernel4_linux_amd64.syso
new file mode 100644
index 0000000..bdf0e46
Binary files /dev/null and b/vendor/gioui.org/shader/piet/kernel4_linux_amd64.syso differ
diff --git a/vendor/gioui.org/shader/piet/kernel4_linux_arm.syso b/vendor/gioui.org/shader/piet/kernel4_linux_arm.syso
new file mode 100644
index 0000000..6fab185
Binary files /dev/null and b/vendor/gioui.org/shader/piet/kernel4_linux_arm.syso differ
diff --git a/vendor/gioui.org/shader/piet/kernel4_linux_arm64.syso b/vendor/gioui.org/shader/piet/kernel4_linux_arm64.syso
new file mode 100644
index 0000000..b7994d7
Binary files /dev/null and b/vendor/gioui.org/shader/piet/kernel4_linux_arm64.syso differ
diff --git a/vendor/gioui.org/shader/piet/mem.h b/vendor/gioui.org/shader/piet/mem.h
new file mode 100644
index 0000000..9e81f04
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/mem.h
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense
+
+layout(set = 0, binding = 0) buffer Memory {
+ // offset into memory of the next allocation, initialized by the user.
+ uint mem_offset;
+ // mem_error tracks the status of memory accesses, initialized to NO_ERROR
+ // by the user. ERR_MALLOC_FAILED is reported for insufficient memory.
+ // If MEM_DEBUG is defined the following errors are reported:
+ // - ERR_OUT_OF_BOUNDS is reported for out of bounds writes.
+ // - ERR_UNALIGNED_ACCESS for memory access not aligned to 32-bit words.
+ uint mem_error;
+ uint[] memory;
+};
+
+// Uncomment this line to add the size field to Alloc and enable memory checks.
+// Note that the Config struct in setup.h grows size fields as well.
+//#define MEM_DEBUG
+
+#define NO_ERROR 0
+#define ERR_MALLOC_FAILED 1
+#define ERR_OUT_OF_BOUNDS 2
+#define ERR_UNALIGNED_ACCESS 3
+
+#ifdef MEM_DEBUG
+#define Alloc_size 16
+#else
+#define Alloc_size 8
+#endif
+
+// Alloc represents a memory allocation.
+struct Alloc {
+ // offset in bytes into memory.
+ uint offset;
+#ifdef MEM_DEBUG
+ // size in bytes of the allocation.
+ uint size;
+#endif
+};
+
+struct MallocResult {
+ Alloc alloc;
+ // failed is true if the allocation overflowed memory.
+ bool failed;
+};
+
+// new_alloc synthesizes an Alloc from an offset and size.
+Alloc new_alloc(uint offset, uint size, bool mem_ok) {
+ Alloc a;
+ a.offset = offset;
+#ifdef MEM_DEBUG
+ if (mem_ok) {
+ a.size = size;
+ } else {
+ a.size = 0;
+ }
+#endif
+ return a;
+}
+
+// malloc allocates size bytes of memory.
+MallocResult malloc(uint size) {
+ MallocResult r;
+ uint offset = atomicAdd(mem_offset, size);
+ r.failed = offset + size > memory.length() * 4;
+ r.alloc = new_alloc(offset, size, !r.failed);
+ if (r.failed) {
+ atomicMax(mem_error, ERR_MALLOC_FAILED);
+ return r;
+ }
+#ifdef MEM_DEBUG
+ if ((size & 3) != 0) {
+ r.failed = true;
+ atomicMax(mem_error, ERR_UNALIGNED_ACCESS);
+ return r;
+ }
+#endif
+ return r;
+}
+
+// touch_mem checks whether access to the memory word at offset is valid.
+// If MEM_DEBUG is defined, touch_mem returns false if offset is out of bounds.
+// Offset is in words.
+bool touch_mem(Alloc alloc, uint offset) {
+#ifdef MEM_DEBUG
+ if (offset < alloc.offset/4 || offset >= (alloc.offset + alloc.size)/4) {
+ atomicMax(mem_error, ERR_OUT_OF_BOUNDS);
+ return false;
+ }
+#endif
+ return true;
+}
+
+// write_mem writes val to memory at offset.
+// Offset is in words.
+void write_mem(Alloc alloc, uint offset, uint val) {
+ if (!touch_mem(alloc, offset)) {
+ return;
+ }
+ memory[offset] = val;
+}
+
+// read_mem reads the value from memory at offset.
+// Offset is in words.
+uint read_mem(Alloc alloc, uint offset) {
+ if (!touch_mem(alloc, offset)) {
+ return 0;
+ }
+ uint v = memory[offset];
+ return v;
+}
+
+// slice_mem returns a sub-allocation inside another. Offset and size are in
+// bytes, relative to a.offset.
+Alloc slice_mem(Alloc a, uint offset, uint size) {
+#ifdef MEM_DEBUG
+ if ((offset & 3) != 0 || (size & 3) != 0) {
+ atomicMax(mem_error, ERR_UNALIGNED_ACCESS);
+ return Alloc(0, 0);
+ }
+ if (offset + size > a.size) {
+ // slice_mem is sometimes used for slices outside bounds,
+ // but never written.
+ return Alloc(0, 0);
+ }
+ return Alloc(a.offset + offset, size);
+#else
+ return Alloc(a.offset + offset);
+#endif
+}
+
+// alloc_write writes alloc to memory at offset bytes.
+void alloc_write(Alloc a, uint offset, Alloc alloc) {
+ write_mem(a, offset >> 2, alloc.offset);
+#ifdef MEM_DEBUG
+ write_mem(a, (offset >> 2) + 1, alloc.size);
+#endif
+}
+
+// alloc_read reads an Alloc from memory at offset bytes.
+Alloc alloc_read(Alloc a, uint offset) {
+ Alloc alloc;
+ alloc.offset = read_mem(a, offset >> 2);
+#ifdef MEM_DEBUG
+ alloc.size = read_mem(a, (offset >> 2) + 1);
+#endif
+ return alloc;
+}
diff --git a/vendor/gioui.org/shader/piet/path_coarse.comp b/vendor/gioui.org/shader/piet/path_coarse.comp
new file mode 100644
index 0000000..ea525f5
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/path_coarse.comp
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense
+
+// Coarse rasterization of path segments.
+
+// Allocation and initialization of tiles for paths.
+
+#version 450
+#extension GL_GOOGLE_include_directive : enable
+
+#include "mem.h"
+#include "setup.h"
+
+#define LG_COARSE_WG 5
+#define COARSE_WG (1 << LG_COARSE_WG)
+
+layout(local_size_x = COARSE_WG, local_size_y = 1) in;
+
+layout(set = 0, binding = 1) readonly buffer ConfigBuf {
+ Config conf;
+};
+
+#include "pathseg.h"
+#include "tile.h"
+
+// scale factors useful for converting coordinates to tiles
+#define SX (1.0 / float(TILE_WIDTH_PX))
+#define SY (1.0 / float(TILE_HEIGHT_PX))
+
+#define ACCURACY 0.25
+#define Q_ACCURACY (ACCURACY * 0.1)
+#define REM_ACCURACY (ACCURACY - Q_ACCURACY)
+#define MAX_HYPOT2 (432.0 * Q_ACCURACY * Q_ACCURACY)
+#define MAX_QUADS 16
+
+vec2 eval_quad(vec2 p0, vec2 p1, vec2 p2, float t) {
+ float mt = 1.0 - t;
+ return p0 * (mt * mt) + (p1 * (mt * 2.0) + p2 * t) * t;
+}
+
+vec2 eval_cubic(vec2 p0, vec2 p1, vec2 p2, vec2 p3, float t) {
+ float mt = 1.0 - t;
+ return p0 * (mt * mt * mt) + (p1 * (mt * mt * 3.0) + (p2 * (mt * 3.0) + p3 * t) * t) * t;
+}
+
+struct SubdivResult {
+ float val;
+ float a0;
+ float a2;
+};
+
+/// An approximation to $\int (1 + 4x^2) ^ -0.25 dx$
+///
+/// This is used for flattening curves.
+#define D 0.67
+float approx_parabola_integral(float x) {
+ return x * inversesqrt(sqrt(1.0 - D + (D * D * D * D + 0.25 * x * x)));
+}
+
+/// An approximation to the inverse parabola integral.
+#define B 0.39
+float approx_parabola_inv_integral(float x) {
+ return x * sqrt(1.0 - B + (B * B + 0.25 * x * x));
+}
+
+SubdivResult estimate_subdiv(vec2 p0, vec2 p1, vec2 p2, float sqrt_tol) {
+ vec2 d01 = p1 - p0;
+ vec2 d12 = p2 - p1;
+ vec2 dd = d01 - d12;
+ float cross = (p2.x - p0.x) * dd.y - (p2.y - p0.y) * dd.x;
+ float x0 = (d01.x * dd.x + d01.y * dd.y) / cross;
+ float x2 = (d12.x * dd.x + d12.y * dd.y) / cross;
+ float scale = abs(cross / (length(dd) * (x2 - x0)));
+
+ float a0 = approx_parabola_integral(x0);
+ float a2 = approx_parabola_integral(x2);
+ float val = 0.0;
+ if (scale < 1e9) {
+ float da = abs(a2 - a0);
+ float sqrt_scale = sqrt(scale);
+ if (sign(x0) == sign(x2)) {
+ val = da * sqrt_scale;
+ } else {
+ float xmin = sqrt_tol / sqrt_scale;
+ val = sqrt_tol * da / approx_parabola_integral(xmin);
+ }
+ }
+ return SubdivResult(val, a0, a2);
+}
+
+void main() {
+ uint element_ix = gl_GlobalInvocationID.x;
+ PathSegRef ref = PathSegRef(conf.pathseg_alloc.offset + element_ix * PathSeg_size);
+
+ PathSegTag tag = PathSegTag(PathSeg_Nop, 0);
+ if (element_ix < conf.n_pathseg) {
+ tag = PathSeg_tag(conf.pathseg_alloc, ref);
+ }
+ bool mem_ok = mem_error == NO_ERROR;
+ switch (tag.tag) {
+ case PathSeg_Cubic:
+ PathCubic cubic = PathSeg_Cubic_read(conf.pathseg_alloc, ref);
+
+ uint trans_ix = cubic.trans_ix;
+ if (trans_ix > 0) {
+ TransformSegRef trans_ref = TransformSegRef(conf.trans_alloc.offset + (trans_ix - 1) * TransformSeg_size);
+ TransformSeg trans = TransformSeg_read(conf.trans_alloc, trans_ref);
+ cubic.p0 = trans.mat.xy * cubic.p0.x + trans.mat.zw * cubic.p0.y + trans.translate;
+ cubic.p1 = trans.mat.xy * cubic.p1.x + trans.mat.zw * cubic.p1.y + trans.translate;
+ cubic.p2 = trans.mat.xy * cubic.p2.x + trans.mat.zw * cubic.p2.y + trans.translate;
+ cubic.p3 = trans.mat.xy * cubic.p3.x + trans.mat.zw * cubic.p3.y + trans.translate;
+ }
+
+ vec2 err_v = 3.0 * (cubic.p2 - cubic.p1) + cubic.p0 - cubic.p3;
+ float err = err_v.x * err_v.x + err_v.y * err_v.y;
+ // The number of quadratics.
+ uint n_quads = max(uint(ceil(pow(err * (1.0 / MAX_HYPOT2), 1.0 / 6.0))), 1);
+ n_quads = min(n_quads, MAX_QUADS);
+ SubdivResult keep_params[MAX_QUADS];
+ // Iterate over quadratics and tote up the estimated number of segments.
+ float val = 0.0;
+ vec2 qp0 = cubic.p0;
+ float step = 1.0 / float(n_quads);
+ for (uint i = 0; i < n_quads; i++) {
+ float t = float(i + 1) * step;
+ vec2 qp2 = eval_cubic(cubic.p0, cubic.p1, cubic.p2, cubic.p3, t);
+ vec2 qp1 = eval_cubic(cubic.p0, cubic.p1, cubic.p2, cubic.p3, t - 0.5 * step);
+ qp1 = 2.0 * qp1 - 0.5 * (qp0 + qp2);
+ SubdivResult params = estimate_subdiv(qp0, qp1, qp2, sqrt(REM_ACCURACY));
+ keep_params[i] = params;
+ val += params.val;
+
+ qp0 = qp2;
+ }
+ uint n = max(uint(ceil(val * 0.5 / sqrt(REM_ACCURACY))), 1);
+
+ bool is_stroke = fill_mode_from_flags(tag.flags) == MODE_STROKE;
+ uint path_ix = cubic.path_ix;
+ Path path = Path_read(conf.tile_alloc, PathRef(conf.tile_alloc.offset + path_ix * Path_size));
+ Alloc path_alloc = new_alloc(path.tiles.offset, (path.bbox.z - path.bbox.x) * (path.bbox.w - path.bbox.y) * Tile_size, mem_ok);
+ ivec4 bbox = ivec4(path.bbox);
+ vec2 p0 = cubic.p0;
+ qp0 = cubic.p0;
+ float v_step = val / float(n);
+ int n_out = 1;
+ float val_sum = 0.0;
+ for (uint i = 0; i < n_quads; i++) {
+ float t = float(i + 1) * step;
+ vec2 qp2 = eval_cubic(cubic.p0, cubic.p1, cubic.p2, cubic.p3, t);
+ vec2 qp1 = eval_cubic(cubic.p0, cubic.p1, cubic.p2, cubic.p3, t - 0.5 * step);
+ qp1 = 2.0 * qp1 - 0.5 * (qp0 + qp2);
+ SubdivResult params = keep_params[i];
+ float u0 = approx_parabola_inv_integral(params.a0);
+ float u2 = approx_parabola_inv_integral(params.a2);
+ float uscale = 1.0 / (u2 - u0);
+ float target = float(n_out) * v_step;
+ while (n_out == n || target < val_sum + params.val) {
+ vec2 p1;
+ if (n_out == n) {
+ p1 = cubic.p3;
+ } else {
+ float u = (target - val_sum) / params.val;
+ float a = mix(params.a0, params.a2, u);
+ float au = approx_parabola_inv_integral(a);
+ float t = (au - u0) * uscale;
+ p1 = eval_quad(qp0, qp1, qp2, t);
+ }
+
+ // Output line segment
+
+ // Bounding box of element in pixel coordinates.
+ float xmin = min(p0.x, p1.x) - cubic.stroke.x;
+ float xmax = max(p0.x, p1.x) + cubic.stroke.x;
+ float ymin = min(p0.y, p1.y) - cubic.stroke.y;
+ float ymax = max(p0.y, p1.y) + cubic.stroke.y;
+ float dx = p1.x - p0.x;
+ float dy = p1.y - p0.y;
+ // Set up for per-scanline coverage formula, below.
+ float invslope = abs(dy) < 1e-9 ? 1e9 : dx / dy;
+ float c = (cubic.stroke.x + abs(invslope) * (0.5 * float(TILE_HEIGHT_PX) + cubic.stroke.y)) * SX;
+ float b = invslope; // Note: assumes square tiles, otherwise scale.
+ float a = (p0.x - (p0.y - 0.5 * float(TILE_HEIGHT_PX)) * b) * SX;
+
+ int x0 = int(floor(xmin * SX));
+ int x1 = int(floor(xmax * SX) + 1);
+ int y0 = int(floor(ymin * SY));
+ int y1 = int(floor(ymax * SY) + 1);
+
+ x0 = clamp(x0, bbox.x, bbox.z);
+ y0 = clamp(y0, bbox.y, bbox.w);
+ x1 = clamp(x1, bbox.x, bbox.z);
+ y1 = clamp(y1, bbox.y, bbox.w);
+ float xc = a + b * float(y0);
+ int stride = bbox.z - bbox.x;
+ int base = (y0 - bbox.y) * stride - bbox.x;
+ // TODO: can be tighter, use c to bound width
+ uint n_tile_alloc = uint((x1 - x0) * (y1 - y0));
+ // Consider using subgroups to aggregate atomic add.
+ MallocResult tile_alloc = malloc(n_tile_alloc * TileSeg_size);
+ if (tile_alloc.failed || !mem_ok) {
+ return;
+ }
+ uint tile_offset = tile_alloc.alloc.offset;
+
+ TileSeg tile_seg;
+
+ int xray = int(floor(p0.x*SX));
+ int last_xray = int(floor(p1.x*SX));
+ if (p0.y > p1.y) {
+ int tmp = xray;
+ xray = last_xray;
+ last_xray = tmp;
+ }
+ for (int y = y0; y < y1; y++) {
+ float tile_y0 = float(y * TILE_HEIGHT_PX);
+ int xbackdrop = max(xray + 1, bbox.x);
+ if (!is_stroke && min(p0.y, p1.y) < tile_y0 && xbackdrop < bbox.z) {
+ int backdrop = p1.y < p0.y ? 1 : -1;
+ TileRef tile_ref = Tile_index(path.tiles, uint(base + xbackdrop));
+ uint tile_el = tile_ref.offset >> 2;
+ if (touch_mem(path_alloc, tile_el + 1)) {
+ atomicAdd(memory[tile_el + 1], backdrop);
+ }
+ }
+
+ // next_xray is the xray for the next scanline; the line segment intersects
+ // all tiles between xray and next_xray.
+ int next_xray = last_xray;
+ if (y < y1 - 1) {
+ float tile_y1 = float((y + 1) * TILE_HEIGHT_PX);
+ float x_edge = mix(p0.x, p1.x, (tile_y1 - p0.y) / dy);
+ next_xray = int(floor(x_edge*SX));
+ }
+
+ int min_xray = min(xray, next_xray);
+ int max_xray = max(xray, next_xray);
+ int xx0 = min(int(floor(xc - c)), min_xray);
+ int xx1 = max(int(ceil(xc + c)), max_xray + 1);
+ xx0 = clamp(xx0, x0, x1);
+ xx1 = clamp(xx1, x0, x1);
+
+ for (int x = xx0; x < xx1; x++) {
+ float tile_x0 = float(x * TILE_WIDTH_PX);
+ TileRef tile_ref = Tile_index(TileRef(path.tiles.offset), uint(base + x));
+ uint tile_el = tile_ref.offset >> 2;
+ uint old = 0;
+ if (touch_mem(path_alloc, tile_el)) {
+ old = atomicExchange(memory[tile_el], tile_offset);
+ }
+ tile_seg.origin = p0;
+ tile_seg.vector = p1 - p0;
+ float y_edge = 0.0;
+ if (!is_stroke) {
+ y_edge = mix(p0.y, p1.y, (tile_x0 - p0.x) / dx);
+ if (min(p0.x, p1.x) < tile_x0) {
+ vec2 p = vec2(tile_x0, y_edge);
+ if (p0.x > p1.x) {
+ tile_seg.vector = p - p0;
+ } else {
+ tile_seg.origin = p;
+ tile_seg.vector = p1 - p;
+ }
+ // kernel4 uses sign(vector.x) for the sign of the intersection backdrop.
+ // Nudge zeroes towards the intended sign.
+ if (tile_seg.vector.x == 0) {
+ tile_seg.vector.x = sign(p1.x - p0.x)*1e-9;
+ }
+ }
+ if (x <= min_xray || max_xray < x) {
+ // Reject inconsistent intersections.
+ y_edge = 1e9;
+ }
+ }
+ tile_seg.y_edge = y_edge;
+ tile_seg.next.offset = old;
+ TileSeg_write(tile_alloc.alloc, TileSegRef(tile_offset), tile_seg);
+ tile_offset += TileSeg_size;
+ }
+ xc += b;
+ base += stride;
+ xray = next_xray;
+ }
+
+ n_out += 1;
+ target += v_step;
+ p0 = p1;
+ }
+ val_sum += params.val;
+
+ qp0 = qp2;
+ }
+
+ break;
+ }
+}
diff --git a/vendor/gioui.org/shader/piet/path_coarse_abi.c b/vendor/gioui.org/shader/piet/path_coarse_abi.c
new file mode 100644
index 0000000..575d260
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/path_coarse_abi.c
@@ -0,0 +1,23 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+#include
+#include
+#include "abi.h"
+#include "runtime.h"
+#include "path_coarse_abi.h"
+
+const struct program_info path_coarse_program_info = {
+ .has_cbarriers = 0,
+ .min_memory_size = 100000,
+ .desc_set_size = sizeof(struct path_coarse_descriptor_set_layout),
+ .workgroup_size_x = 32,
+ .workgroup_size_y = 1,
+ .workgroup_size_z = 1,
+ .begin = path_coarse_coroutine_begin,
+ .await = path_coarse_coroutine_await,
+ .destroy = path_coarse_coroutine_destroy,
+};
diff --git a/vendor/gioui.org/shader/piet/path_coarse_abi.go b/vendor/gioui.org/shader/piet/path_coarse_abi.go
new file mode 100644
index 0000000..34d8ac3
--- /dev/null
+++ b/vendor/gioui.org/shader/piet/path_coarse_abi.go
@@ -0,0 +1,35 @@
+// Code generated by gioui.org/cpu/cmd/compile DO NOT EDIT.
+
+//go:build linux && (arm64 || arm || amd64)
+// +build linux
+// +build arm64 arm amd64
+
+package piet
+
+import "gioui.org/cpu"
+import "unsafe"
+
+/*
+#cgo LDFLAGS: -lm
+
+#include