From fea91e92d236320acc7a3ab68bcffbcfd9182f84 Mon Sep 17 00:00:00 2001 From: "Ryan M. Shetley" Date: Wed, 25 Sep 2024 09:13:43 -0500 Subject: [PATCH] Merge development (#112) * started implementing rhi * UR-244: Fix reimport * UR-245: Remove RiveViewport client, replaced RiveWidget for fixed UI blending; Removed UI Blend Mode * fix includes for release build * ensure all Initialization paths listen for RiveFile delegates; Artboard file code cleanup * 4b4529abda1ffdf2e4801974fec7e93bdf1c53d8 continued: "ensure all Initialization paths listen for RiveFile delegates; Artboard file code cleanup" * expose OnRiveReady for RiveTextureObject * UR-247: Artboard / State machine dropdowns implemented across RiveWidget, RiveTextureObject, RiveActorComponent * rive audio changes * editor-only: reset rive textures on editor begin play * RiveWidget audio convience * win,android: update rive to b192f37856b249fbd6247700bf13d07d9b064c47 * macOS, iOS: update rive to b192f37856b249fbd6247700bf13d07d9b064c47 * Runtime Asset swapping (#105) RiveAsset class was broken into 3 classes RiveAudioAsset RiveImageAsset, contains runtime compatible functions: LoadImageBytes (accepting an array of bytes in png, webp, jpg format) LoadTexture (not yet implemented fully, as we wait on Rive to allow us to submit bitmap data) RiveFontAsset, contains runtime compatible functions: LoadFontFace (loads an Unreal font face, if the font face's load policy is set to Inline) LoadFontBytes (accepting an array of bytes in ttf/otf format) RiveFile now supports a function "GetRiveAssetById", returning a base RiveAsset which can later be cast to one of the specific asset types to operate on *** * asset overrides * load image bytes call * cleanup * Ensure RiveWidget setup is called a short time after init * Fixed Artboards. (#104) * Fixed Artboards. - No more phantom artboards during PIE. - Artboards in the level reset on PIE begin. * Made PR suggested changes. - Moved Artboard check/creation to URiveTextureObject::RiveReady - Removed EditorBeginPlay() and bHasBegunPlay * working imagemesh without background * missed some headers * UR-249: Improve UMG widget handling, with minimum sizes based on selected artboard as the initial size * imageRect now working * win,android: update rive to 14d13c3ded7b141d5130c0246901008dc070c9fe * macOS, iOS: update rive to 14d13c3ded7b141d5130c0246901008dc070c9fe * fix a few includes * fix filehelper include * add another include * replaced forward with include; fixing a peculiar compilation issue * add SetTextValueAtPath, and GetStringValueAtPath * updated to work with new rive layout * basic rendering and shader permutations working * made work with main * win,android refactor update rive to bc8ed46e56b3c312f0333aa82f0486838a898739 * missing files * macOS, iOS refactor update rive to bc8ed46e56b3c312f0333aa82f0486838a898739 * remove unused components and content * header fixes for packaging * update plugin icon with Rive graphic * android rendertarget slip * some cleanup and exports for gms * more cleanup * UR-207: Rive on Metal looks washed out * commit basic FilterPlugin * removed uneeded guards * updated to latest runtime * added rive rhi shaders * removed dupliocate calls in render target * allow some UTextures to be used as targets for runtime asset overriding * fixed some compiler warnings and added flag to ignore shader warnings * add rive renderer settings * remvoe duplicate include * added default load action instead of silencing warning * updated to use the enable tech preview setting * addressed PR comments * move editor specific settings to RiveEditor module * Use RiveFile initialization delegate before completing rivetextureobject init * added webp loading, fixed issues with packaging the plugin --------- Co-authored-by: blakdragan7 Co-authored-by: Tod-Rive --- Config/FilterPlugin.ini | 8 + .../Generated/advanced_blend.minified.ush | 270 ++++ .../Rive/Generated/atomic_draw.minified.ush | 738 ++++++++++ .../blit_texture_as_draw.minified.ush | 33 + .../Rive/Generated/color_ramp.minified.ush | 80 ++ .../Rive/Generated/common.minified.ush | 243 ++++ .../Rive/Generated/constants.minified.ush | 154 +++ .../Generated/draw_image_mesh.minified.ush | 193 +++ .../Rive/Generated/draw_path.minified.ush | 530 ++++++++ .../Generated/draw_path_common.minified.ush | 301 +++++ .../Private/Rive/Generated/glsl.minified.ush | 492 +++++++ .../Private/Rive/Generated/hlsl.minified.ush | 385 ++++++ .../Private/Rive/Generated/metal.minified.ush | 447 +++++++ .../Generated/pls_load_store_ext.minified.ush | 98 ++ .../Private/Rive/Generated/rhi.minified.ush | 354 +++++ .../Generated/specialization.minified.ush | 13 + .../Rive/Generated/stencil_draw.minified.ush | 30 + .../Rive/Generated/tessellate.minified.ush | 425 ++++++ Shaders/Private/Rive/atomic_base.ush | 13 + .../Private/Rive/atomic_draw_image_mesh.usf | 4 + .../Private/Rive/atomic_draw_image_rect.usf | 4 + .../Rive/atomic_draw_interior_triangles.usf | 3 + Shaders/Private/Rive/atomic_draw_path.usf | 3 + Shaders/Private/Rive/atomic_resolve_pls.usf | 4 + Shaders/Private/Rive/color_ramp.usf | 9 + Shaders/Private/Rive/draw_image_mesh.usf | 14 + .../Private/Rive/draw_interior_triangles.usf | 13 + Shaders/Private/Rive/draw_path.usf | 14 + Shaders/Private/Rive/parse_environment.ush | 29 + Shaders/Private/Rive/tessellate.usf | 9 + Shaders/Private/Rive/test.usf | 11 + .../Rive/Private/Game/RiveActorComponent.cpp | 129 +- .../Private/Rive/Assets/RiveImageAsset.cpp | 122 +- Source/Rive/Private/Rive/RiveTexture.cpp | 3 +- .../Rive/Private/Rive/RiveTextureObject.cpp | 103 +- .../Rive/Private/Rive/RiveTextureResource.cpp | 7 +- Source/Rive/Private/RiveModule.cpp | 5 + Source/Rive/Public/Game/RiveActorComponent.h | 4 + Source/Rive/Public/Rive/RiveTextureObject.h | 4 +- Source/Rive/Rive.Build.cs | 4 +- .../RiveEditor/Private/RiveEditorModule.cpp | 21 + Source/RiveEditor/RiveEditor.Build.cs | 5 +- .../Private/Platform/RiveRenderTargetRHI.cpp | 71 + .../Private/Platform/RiveRenderTargetRHI.h | 28 + .../Private/Platform/RiveRendererD3D11.h | 6 + .../Private/Platform/RiveRendererRHI.cpp | 35 + .../Private/Platform/RiveRendererRHI.h | 13 + .../Platform/pls_render_context_rhi_impl.cpp | 1191 +++++++++++++++++ .../Platform/pls_render_context_rhi_impl.hpp | 239 ++++ .../RiveRenderer/Private/RiveRenderTarget.cpp | 29 +- .../RiveRenderer/Private/RiveRenderTarget.h | 7 + Source/RiveRenderer/Private/RiveRenderer.h | 5 + .../Private/RiveRendererModule.cpp | 107 +- .../RiveRenderer/Private/RiveRendererModule.h | 3 + .../Private/RiveRendererSettings.cpp | 9 + .../Private/Shaders/ShaderPipelineManager.cpp | 125 ++ .../Private/Shaders/ShaderPipelineManager.h | 358 +++++ .../RiveRenderer/Public/IRiveRenderTarget.h | 8 + .../RiveRenderer/Public/IRiveRendererModule.h | 2 +- .../Public/RiveRendererSettings.h | 26 + Source/RiveRenderer/RiveRenderer.Build.cs | 4 +- .../rive/animation/transition_comparator.hpp | 1 + .../transition_value_trigger_comparator.hpp | 13 + .../data_bind/bindable_property_trigger.hpp | 13 + .../rive/data_bind/context/context_value.hpp | 4 +- .../context/context_value_trigger.hpp | 16 + .../converters/data_converter_trigger.hpp | 15 + .../Includes/rive/data_bind/data_context.hpp | 4 - .../rive/data_bind/data_values/data_type.hpp | 5 +- .../data_values/data_value_boolean.hpp | 1 + .../data_values/data_value_color.hpp | 3 +- .../data_bind/data_values/data_value_enum.hpp | 1 + .../data_values/data_value_number.hpp | 1 + .../data_values/data_value_string.hpp | 1 + .../data_values/data_value_trigger.hpp | 24 + ...ansition_value_trigger_comparator_base.hpp | 72 + .../Includes/rive/generated/core_registry.hpp | 47 + .../bindable_property_trigger_base.hpp | 71 + .../data_converter_trigger_base.hpp | 36 + .../rive/generated/text/text_base.hpp | 18 + .../viewmodel_instance_trigger_base.hpp | 71 + .../viewmodel_property_trigger_base.hpp | 37 + .../Includes/rive/math/math_types.hpp | 4 + .../out/generated/shaders/.makecommand | 1 + .../shaders/advanced_blend.exports.h | 178 +++ .../generated/shaders/advanced_blend.glsl.hpp | 281 ++++ .../shaders/advanced_blend.minified.ush | 270 ++++ .../generated/shaders/atomic_draw.exports.h | 178 +++ .../generated/shaders/atomic_draw.glsl.hpp | 746 +++++++++++ .../shaders/atomic_draw.minified.ush | 735 ++++++++++ .../shaders/blit_texture_as_draw.exports.h | 178 +++ .../shaders/blit_texture_as_draw.glsl.hpp | 44 + .../shaders/blit_texture_as_draw.minified.ush | 33 + .../generated/shaders/color_ramp.exports.h | 178 +++ .../out/generated/shaders/color_ramp.glsl.hpp | 91 ++ .../generated/shaders/color_ramp.minified.ush | 80 ++ .../out/generated/shaders/common.exports.h | 178 +++ .../out/generated/shaders/common.glsl.hpp | 254 ++++ .../out/generated/shaders/common.minified.ush | 243 ++++ .../out/generated/shaders/constants.exports.h | 178 +++ .../out/generated/shaders/constants.glsl.hpp | 165 +++ .../generated/shaders/constants.minified.ush | 154 +++ .../shaders/draw_image_mesh.exports.h | 178 +++ .../shaders/draw_image_mesh.glsl.hpp | 204 +++ .../shaders/draw_image_mesh.minified.ush | 193 +++ .../out/generated/shaders/draw_path.exports.h | 178 +++ .../out/generated/shaders/draw_path.glsl.hpp | 540 ++++++++ .../generated/shaders/draw_path.minified.ush | 529 ++++++++ .../shaders/draw_path_common.exports.h | 178 +++ .../shaders/draw_path_common.glsl.hpp | 312 +++++ .../shaders/draw_path_common.minified.ush | 301 +++++ .../out/generated/shaders/glsl.exports.h | 178 +++ .../out/generated/shaders/glsl.glsl.hpp | 508 +++++++ .../out/generated/shaders/glsl.minified.ush | 497 +++++++ .../shaders/out/generated/shaders/glsl.stamp | 0 .../out/generated/shaders/hlsl.exports.h | 178 +++ .../out/generated/shaders/hlsl.glsl.hpp | 396 ++++++ .../out/generated/shaders/hlsl.minified.ush | 385 ++++++ .../out/generated/shaders/metal.exports.h | 178 +++ .../out/generated/shaders/metal.glsl.hpp | 458 +++++++ .../out/generated/shaders/metal.minified.ush | 447 +++++++ .../shaders/pls_load_store_ext.exports.h | 178 +++ .../shaders/pls_load_store_ext.glsl.hpp | 109 ++ .../shaders/pls_load_store_ext.minified.ush | 98 ++ .../out/generated/shaders/rhi.exports.h | 178 +++ .../out/generated/shaders/rhi.glsl.hpp | 365 +++++ .../out/generated/shaders/rhi.minified.ush | 354 +++++ .../shaders/specialization.exports.h | 178 +++ .../generated/shaders/specialization.glsl.hpp | 24 + .../shaders/specialization.minified.ush | 13 + .../generated/shaders/stencil_draw.exports.h | 178 +++ .../generated/shaders/stencil_draw.glsl.hpp | 41 + .../shaders/stencil_draw.minified.ush | 30 + .../generated/shaders/tessellate.exports.h | 178 +++ .../out/generated/shaders/tessellate.glsl.hpp | 436 ++++++ .../generated/shaders/tessellate.minified.ush | 425 ++++++ .../Includes/rive/text/raw_text.hpp | 1 + .../RiveLibrary/Includes/rive/text/text.hpp | 12 +- .../RiveLibrary/Includes/rive/text_engine.hpp | 15 + .../viewmodel/viewmodel_instance_trigger.hpp | 23 + .../viewmodel/viewmodel_property_trigger.hpp | 13 + .../RiveLibrary/Includes/webp/decode.h | 506 +++++++ .../RiveLibrary/Includes/webp/demux.h | 367 +++++ .../RiveLibrary/Includes/webp/encode.h | 557 ++++++++ .../Includes/webp/format_constants.h | 87 ++ .../RiveLibrary/Includes/webp/mux.h | 591 ++++++++ .../RiveLibrary/Includes/webp/mux_types.h | 99 ++ .../RiveLibrary/Includes/webp/types.h | 93 ++ .../RiveLibrary/RiveLibrary.Build.cs | 1 + 149 files changed, 23042 insertions(+), 166 deletions(-) create mode 100644 Config/FilterPlugin.ini create mode 100644 Shaders/Private/Rive/Generated/advanced_blend.minified.ush create mode 100644 Shaders/Private/Rive/Generated/atomic_draw.minified.ush create mode 100644 Shaders/Private/Rive/Generated/blit_texture_as_draw.minified.ush create mode 100644 Shaders/Private/Rive/Generated/color_ramp.minified.ush create mode 100644 Shaders/Private/Rive/Generated/common.minified.ush create mode 100644 Shaders/Private/Rive/Generated/constants.minified.ush create mode 100644 Shaders/Private/Rive/Generated/draw_image_mesh.minified.ush create mode 100644 Shaders/Private/Rive/Generated/draw_path.minified.ush create mode 100644 Shaders/Private/Rive/Generated/draw_path_common.minified.ush create mode 100644 Shaders/Private/Rive/Generated/glsl.minified.ush create mode 100644 Shaders/Private/Rive/Generated/hlsl.minified.ush create mode 100644 Shaders/Private/Rive/Generated/metal.minified.ush create mode 100644 Shaders/Private/Rive/Generated/pls_load_store_ext.minified.ush create mode 100644 Shaders/Private/Rive/Generated/rhi.minified.ush create mode 100644 Shaders/Private/Rive/Generated/specialization.minified.ush create mode 100644 Shaders/Private/Rive/Generated/stencil_draw.minified.ush create mode 100644 Shaders/Private/Rive/Generated/tessellate.minified.ush create mode 100644 Shaders/Private/Rive/atomic_base.ush create mode 100644 Shaders/Private/Rive/atomic_draw_image_mesh.usf create mode 100644 Shaders/Private/Rive/atomic_draw_image_rect.usf create mode 100644 Shaders/Private/Rive/atomic_draw_interior_triangles.usf create mode 100644 Shaders/Private/Rive/atomic_draw_path.usf create mode 100644 Shaders/Private/Rive/atomic_resolve_pls.usf create mode 100644 Shaders/Private/Rive/color_ramp.usf create mode 100644 Shaders/Private/Rive/draw_image_mesh.usf create mode 100644 Shaders/Private/Rive/draw_interior_triangles.usf create mode 100644 Shaders/Private/Rive/draw_path.usf create mode 100644 Shaders/Private/Rive/parse_environment.ush create mode 100644 Shaders/Private/Rive/tessellate.usf create mode 100644 Shaders/Private/Rive/test.usf create mode 100644 Source/RiveRenderer/Private/Platform/RiveRenderTargetRHI.cpp create mode 100644 Source/RiveRenderer/Private/Platform/RiveRenderTargetRHI.h create mode 100644 Source/RiveRenderer/Private/Platform/RiveRendererRHI.cpp create mode 100644 Source/RiveRenderer/Private/Platform/RiveRendererRHI.h create mode 100644 Source/RiveRenderer/Private/Platform/pls_render_context_rhi_impl.cpp create mode 100644 Source/RiveRenderer/Private/Platform/pls_render_context_rhi_impl.hpp create mode 100644 Source/RiveRenderer/Private/RiveRendererSettings.cpp create mode 100644 Source/RiveRenderer/Private/Shaders/ShaderPipelineManager.cpp create mode 100644 Source/RiveRenderer/Private/Shaders/ShaderPipelineManager.h create mode 100644 Source/RiveRenderer/Public/RiveRendererSettings.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/animation/transition_value_trigger_comparator.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/bindable_property_trigger.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/context/context_value_trigger.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/converters/data_converter_trigger.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_trigger.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/generated/animation/transition_value_trigger_comparator_base.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/generated/data_bind/bindable_property_trigger_base.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/generated/data_bind/converters/data_converter_trigger_base.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/generated/viewmodel/viewmodel_instance_trigger_base.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/generated/viewmodel/viewmodel_property_trigger_base.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/.makecommand create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/advanced_blend.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/advanced_blend.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/advanced_blend.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/atomic_draw.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/atomic_draw.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/atomic_draw.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/blit_texture_as_draw.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/blit_texture_as_draw.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/blit_texture_as_draw.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/color_ramp.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/color_ramp.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/color_ramp.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/common.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/common.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/common.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/constants.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/constants.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/constants.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_image_mesh.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_image_mesh.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_image_mesh.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path_common.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path_common.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path_common.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.stamp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/hlsl.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/hlsl.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/hlsl.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/metal.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/metal.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/metal.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/pls_load_store_ext.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/pls_load_store_ext.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/pls_load_store_ext.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/rhi.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/rhi.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/rhi.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/specialization.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/specialization.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/specialization.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/stencil_draw.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/stencil_draw.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/stencil_draw.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/tessellate.exports.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/tessellate.glsl.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/tessellate.minified.ush create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/viewmodel/viewmodel_instance_trigger.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/rive/viewmodel/viewmodel_property_trigger.hpp create mode 100644 Source/ThirdParty/RiveLibrary/Includes/webp/decode.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/webp/demux.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/webp/encode.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/webp/format_constants.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/webp/mux.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/webp/mux_types.h create mode 100644 Source/ThirdParty/RiveLibrary/Includes/webp/types.h diff --git a/Config/FilterPlugin.ini b/Config/FilterPlugin.ini new file mode 100644 index 00000000..ccebca2f --- /dev/null +++ b/Config/FilterPlugin.ini @@ -0,0 +1,8 @@ +[FilterPlugin] +; This section lists additional files which will be packaged along with your plugin. Paths should be listed relative to the root plugin directory, and +; may include "...", "*", and "?" wildcards to match directories, files, and individual characters respectively. +; +; Examples: +; /README.txt +; /Extras/... +; /Binaries/ThirdParty/*.dll diff --git a/Shaders/Private/Rive/Generated/advanced_blend.minified.ush b/Shaders/Private/Rive/Generated/advanced_blend.minified.ush new file mode 100644 index 00000000..77f69dff --- /dev/null +++ b/Shaders/Private/Rive/Generated/advanced_blend.minified.ush @@ -0,0 +1,270 @@ +/* + * Copyright 2022 Rive + */ + +// From the KHR_blend_equation_advanced spec: +// +// The advanced blend equations are those listed in tables X.1 and X.2. When +// using one of these equations, blending is performed according to the +// following equations: +// +// R = f(Rs',Rd')*p0(As,Ad) + Y*Rs'*p1(As,Ad) + Z*Rd'*p2(As,Ad) +// G = f(Gs',Gd')*p0(As,Ad) + Y*Gs'*p1(As,Ad) + Z*Gd'*p2(As,Ad) +// B = f(Bs',Bd')*p0(As,Ad) + Y*Bs'*p1(As,Ad) + Z*Bd'*p2(As,Ad) +// A = X*p0(As,Ad) + Y*p1(As,Ad) + Z*p2(As,Ad) +// +// where the function f and terms X, Y, and Z are specified in the table. +// The R, G, and B components of the source color used for blending are +// considered to have been premultiplied by the A component prior to +// blending. The base source color (Rs',Gs',Bs') is obtained by dividing +// through by the A component: +// +// (Rs', Gs', Bs') = +// (0, 0, 0), if As == 0 +// (Rs/As, Gs/As, Bs/As), otherwise +// +// The destination color components are always considered to have been +// premultiplied by the destination A component and the base destination +// color (Rd', Gd', Bd') is obtained by dividing through by the A component: +// +// (Rd', Gd', Bd') = +// (0, 0, 0), if Ad == 0 +// (Rd/Ad, Gd/Ad, Bd/Ad), otherwise +// +// When blending using advanced blend equations, we expect that the R, G, and +// B components of premultiplied source and destination color inputs be +// stored as the product of non-premultiplied R, G, and B components and the +// A component of the color. If any R, G, or B component of a premultiplied +// input color is non-zero and the A component is zero, the color is +// considered ill-formed, and the corresponding component of the blend result +// will be undefined. +// +// The weighting functions p0, p1, and p2 are defined as follows: +// +// p0(As,Ad) = As*Ad +// p1(As,Ad) = As*(1-Ad) +// p2(As,Ad) = Ad*(1-As) +// +// In these functions, the A components of the source and destination colors +// are taken to indicate the portion of the pixel covered by the fragment +// (source) and the fragments previously accumulated in the pixel +// (destination). The functions p0, p1, and p2 approximate the relative +// portion of the pixel covered by the intersection of the source and +// destination, covered only by the source, and covered only by the +// destination, respectively. The equations defined here assume that there +// is no correlation between the source and destination coverage. +// + +#ifdef FRAGMENT + +#ifdef ENABLE_KHR_BLEND +layout( +#ifdef ENABLE_HSL_BLEND_MODES + blend_support_all_equations +#else + blend_support_multiply, + blend_support_screen, + blend_support_overlay, + blend_support_darken, + blend_support_lighten, + blend_support_colordodge, + blend_support_colorburn, + blend_support_hardlight, + blend_support_softlight, + blend_support_difference, + blend_support_exclusion +#endif + ) out; +#endif // ENABLE_KHR_BLEND + +#ifdef ENABLE_ADVANCED_BLEND +#ifdef ENABLE_HSL_BLEND_MODES +// When using one of the HSL blend equations in table X.2 as the blend equation, the RGB color +// components produced by the function f() are effectively obtained by converting both the +// non-premultiplied source and destination colors to the HSL (hue, saturation, luminosity) color +// space, generating a new HSL color by selecting H, S, and L components from the source or +// destination according to the blend equation, and then converting the result back to RGB. The HSL +// blend equations are only well defined when the values of the input color components are in the +// range [0..1]. +half minv3(half3 c) { return min(min(c.x, c.y), c.z); } +half maxv3(half3 c) { return max(max(c.x, c.y), c.z); } +half lumv3(half3 c) { return dot(c, make_half3(.30, .59, .11)); } +half satv3(half3 c) { return maxv3(c) - minv3(c); } + +// If any color components are outside [0,1], adjust the color to get the components in range. +half3 clip_color(half3 color) +{ + half lum = lumv3(color); + half mincol = minv3(color); + half maxcol = maxv3(color); + if (mincol < .0) + color = lum + ((color - lum) * lum) / (lum - mincol); + if (maxcol > 1.) + color = lum + ((color - lum) * (1. - lum)) / (maxcol - lum); + return color; +} + +// Take the base RGB color and override its luminosity with that of the RGB color . +half3 set_lum(half3 cbase, half3 clum) +{ + half lbase = lumv3(cbase); + half llum = lumv3(clum); + half ldiff = llum - lbase; + half3 color = cbase + make_half3(ldiff); + return clip_color(color); +} + +// Take the base RGB color and override its saturation with that of the RGB color . +// The override the luminosity of the result with that of the RGB color . +half3 set_lum_sat(half3 cbase, half3 csat, half3 clum) +{ + half minbase = minv3(cbase); + half sbase = satv3(cbase); + half ssat = satv3(csat); + half3 color; + if (sbase > .0) + { + // Equivalent (modulo rounding errors) to setting the smallest (R,G,B) component to 0, the + // largest to , and interpolating the "middle" component based on its original value + // relative to the smallest/largest. + color = (cbase - minbase) * ssat / sbase; + } + else + { + color = make_half3(.0); + } + return set_lum(color, clum); +} +#endif // ENABLE_HSL_BLEND_MODES + +half4 advanced_blend(half4 src, half4 dst, ushort mode) +{ + // The function f() operates on un-multiplied rgb values and dictates the look of the advanced + // blend equations. + half3 f = make_half3(.0); + switch (mode) + { + case BLEND_MODE_MULTIPLY: + f = src.xyz * dst.xyz; + break; + case BLEND_MODE_SCREEN: + f = src.xyz + dst.xyz - src.xyz * dst.xyz; + break; + case BLEND_MODE_OVERLAY: + { + for (int i = 0; i < 3; ++i) + { + if (dst[i] <= .5) + f[i] = 2. * src[i] * dst[i]; + else + f[i] = 1. - 2. * (1. - src[i]) * (1. - dst[i]); + } + break; + } + case BLEND_MODE_DARKEN: + f = min(src.xyz, dst.xyz); + break; + case BLEND_MODE_LIGHTEN: + f = max(src.xyz, dst.xyz); + break; + case BLEND_MODE_COLORDODGE: + // ES3 spec, 4.5.1 Range and Precision: dividing a non-zero by 0 results in the + // appropriately signed IEEE Inf. + f = mix(min(dst.xyz / (1. - src.xyz), make_half3(1.)), + make_half3(.0), + lessThanEqual(dst.xyz, make_half3(.0))); + break; + case BLEND_MODE_COLORBURN: + // ES3 spec, 4.5.1 Range and Precision: dividing a non-zero by 0 results in the + // appropriately signed IEEE Inf. + f = mix(1. - min((1. - dst.xyz) / src.xyz, 1.), + make_half3(1., 1., 1.), + greaterThanEqual(dst.xyz, make_half3(1.))); + break; + case BLEND_MODE_HARDLIGHT: + { + for (int i = 0; i < 3; ++i) + { + if (src[i] <= .5) + f[i] = 2. * src[i] * dst[i]; + else + f[i] = 1. - 2. * (1. - src[i]) * (1. - dst[i]); + } + break; + } + case BLEND_MODE_SOFTLIGHT: + { + for (int i = 0; i < 3; ++i) + { + if (src[i] <= 0.5) + f[i] = dst[i] - (1. - 2. * src[i]) * dst[i] * (1. - dst[i]); + else if (dst[i] <= .25) + f[i] = + dst[i] + (2. * src[i] - 1.) * dst[i] * ((16. * dst[i] - 12.) * dst[i] + 3.); + else + f[i] = dst[i] + (2. * src[i] - 1.) * (sqrt(dst[i]) - dst[i]); + } + break; + } + case BLEND_MODE_DIFFERENCE: + f = abs(dst.xyz - src.xyz); + break; + case BLEND_MODE_EXCLUSION: + f = src.xyz + dst.xyz - 2. * src.xyz * dst.xyz; + break; +#ifdef ENABLE_HSL_BLEND_MODES + // The HSL blend equations are only well defined when the values of the input color + // components are in the range [0..1]. + case BLEND_MODE_HUE: + if (ENABLE_HSL_BLEND_MODES) + { + src.xyz = clamp(src.xyz, make_half3(.0), make_half3(1.)); + f = set_lum_sat(src.xyz, dst.xyz, dst.xyz); + } + break; + case BLEND_MODE_SATURATION: + if (ENABLE_HSL_BLEND_MODES) + { + src.xyz = clamp(src.xyz, make_half3(.0), make_half3(1.)); + f = set_lum_sat(dst.xyz, src.xyz, dst.xyz); + } + break; + case BLEND_MODE_COLOR: + if (ENABLE_HSL_BLEND_MODES) + { + src.xyz = clamp(src.xyz, make_half3(.0), make_half3(1.)); + f = set_lum(src.xyz, dst.xyz); + } + break; + case BLEND_MODE_LUMINOSITY: + if (ENABLE_HSL_BLEND_MODES) + { + src.xyz = clamp(src.xyz, make_half3(.0), make_half3(1.)); + f = set_lum(dst.xyz, src.xyz); + } + break; +#endif + } + + // The weighting functions p0, p1, and p2 are defined as follows: + // + // p0(As,Ad) = As*Ad + // p1(As,Ad) = As*(1-Ad) + // p2(As,Ad) = Ad*(1-As) + // + half3 p = make_half3(src.w * dst.w, src.w * (1. - dst.w), (1. - src.w) * dst.w); + + // When using one of these equations, blending is performed according to the following + // equations: + // + // R = f(Rs',Rd')*p0(As,Ad) + Y*Rs'*p1(As,Ad) + Z*Rd'*p2(As,Ad) + // G = f(Gs',Gd')*p0(As,Ad) + Y*Gs'*p1(As,Ad) + Z*Gd'*p2(As,Ad) + // B = f(Bs',Bd')*p0(As,Ad) + Y*Bs'*p1(As,Ad) + Z*Bd'*p2(As,Ad) + // A = X*p0(As,Ad) + Y*p1(As,Ad) + Z*p2(As,Ad) + // + // NOTE: (X,Y,Z) always == (1,1,1), so it is ignored in this implementation. + return MUL(make_half3x4(f, 1., src.xyz, 1., dst.xyz, 1.), p); +} +#endif // ENABLE_ADVANCED_BLEND + +#endif // FRAGMENT diff --git a/Shaders/Private/Rive/Generated/atomic_draw.minified.ush b/Shaders/Private/Rive/Generated/atomic_draw.minified.ush new file mode 100644 index 00000000..000e3184 --- /dev/null +++ b/Shaders/Private/Rive/Generated/atomic_draw.minified.ush @@ -0,0 +1,738 @@ +/* + * Copyright 2023 Rive + */ + +#ifdef DRAW_PATH +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, float4, _EXPORTED_a_patchVertexData); // [localVertexID, outset, fillCoverage, vertexType] +ATTR(1, float4, _EXPORTED_a_mirroredVertexData); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, half2, v_edgeDistance); +FLAT VARYING(1, ushort, v_pathID); +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_patchVertexData, float4); + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_mirroredVertexData, float4); + + VARYING_INIT(v_edgeDistance, half2); + VARYING_INIT(v_pathID, ushort); + + float4 pos; + float2 vertexPosition; + if (unpack_tessellated_path_vertex(_EXPORTED_a_patchVertexData, + _EXPORTED_a_mirroredVertexData, + _instanceID, + v_pathID, + vertexPosition, + v_edgeDistance VERTEX_CONTEXT_UNPACK)) + { + pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); + } + else + { + pos = float4(uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue); + } + + VARYING_PACK(v_edgeDistance); + VARYING_PACK(v_pathID); + EMIT_VERTEX(pos); +} +#endif // VERTEX +#endif // DRAW_PATH + +#ifdef DRAW_INTERIOR_TRIANGLES +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, packed_float3, _EXPORTED_a_triangleVertex); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +OPTIONALLY_FLAT VARYING(0, half, v_windingWeight); +FLAT VARYING(1, ushort, v_pathID); +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_triangleVertex, float3); + + VARYING_INIT(v_windingWeight, half); + VARYING_INIT(v_pathID, ushort); + + float2 vertexPosition = unpack_interior_triangle_vertex(_EXPORTED_a_triangleVertex, + v_pathID, + v_windingWeight VERTEX_CONTEXT_UNPACK); + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); + + VARYING_PACK(v_windingWeight); + VARYING_PACK(v_pathID); + EMIT_VERTEX(pos); +} +#endif // VERTEX +#endif // DRAW_INTERIOR_TRIANGLES + +#ifdef DRAW_IMAGE +#ifdef DRAW_IMAGE_RECT +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, float4, _EXPORTED_a_imageRectVertex); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float2, v_texCoord); +NO_PERSPECTIVE VARYING(1, half, v_edgeCoverage); +#ifdef ENABLE_CLIP_RECT +NO_PERSPECTIVE VARYING(2, float4, v_clipRect); +#endif +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +IMAGE_RECT_VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_imageRectVertex, float4); + + VARYING_INIT(v_texCoord, float2); + VARYING_INIT(v_edgeCoverage, half); +#ifdef ENABLE_CLIP_RECT + VARYING_INIT(v_clipRect, float4); +#endif + + bool isOuterVertex = _EXPORTED_a_imageRectVertex.z == .0 || _EXPORTED_a_imageRectVertex.w == .0; + v_edgeCoverage = isOuterVertex ? .0 : 1.; + + float2 vertexPosition = _EXPORTED_a_imageRectVertex.xy; + float2x2 M = make_float2x2(imageDrawUniforms.viewMatrix); + float2x2 MIT = transpose(inverse(M)); + if (!isOuterVertex) + { + // Inset the inner vertices to the point where coverage == 1. + // NOTE: if width/height ever change from 1, these equations need to be updated. + float aaRadiusX = AA_RADIUS * manhattan_width(MIT[1]) / dot(M[1], MIT[1]); + if (aaRadiusX >= .5) + { + vertexPosition.x = .5; + v_edgeCoverage *= cast_float_to_half(.5 / aaRadiusX); + } + else + { + vertexPosition.x += aaRadiusX * _EXPORTED_a_imageRectVertex.z; + } + float aaRadiusY = AA_RADIUS * manhattan_width(MIT[0]) / dot(M[0], MIT[0]); + if (aaRadiusY >= .5) + { + vertexPosition.y = .5; + v_edgeCoverage *= cast_float_to_half(.5 / aaRadiusY); + } + else + { + vertexPosition.y += aaRadiusY * _EXPORTED_a_imageRectVertex.w; + } + } + + v_texCoord = vertexPosition; + vertexPosition = MUL(M, vertexPosition) + imageDrawUniforms.translate; + + if (isOuterVertex) + { + // Outset the outer vertices to the point where coverage == 0. + float2 n = MUL(MIT, _EXPORTED_a_imageRectVertex.zw); + n *= manhattan_width(n) / dot(n, n); + vertexPosition += AA_RADIUS * n; + } + +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { + v_clipRect = find_clip_rect_coverage_distances( + make_float2x2(imageDrawUniforms.clipRectInverseMatrix), + imageDrawUniforms.clipRectInverseTranslate, + vertexPosition); + } +#endif + + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); + + VARYING_PACK(v_texCoord); + VARYING_PACK(v_edgeCoverage); +#ifdef ENABLE_CLIP_RECT + VARYING_PACK(v_clipRect); +#endif + EMIT_VERTEX(pos); +} +#endif // VERTEX + +#else // DRAW_IMAGE_RECT -> DRAW_IMAGE_MESH +#ifdef VERTEX +ATTR_BLOCK_BEGIN(PositionAttr) +ATTR(0, float2, _EXPORTED_a_position); +ATTR_BLOCK_END + +ATTR_BLOCK_BEGIN(UVAttr) +ATTR(1, float2, _EXPORTED_a_texCoord); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float2, v_texCoord); +#ifdef ENABLE_CLIP_RECT +NO_PERSPECTIVE VARYING(1, float4, v_clipRect); +#endif +VARYING_BLOCK_END + +#ifdef VERTEX +IMAGE_MESH_VERTEX_MAIN(_EXPORTED_drawVertexMain, PositionAttr, position, UVAttr, uv, _vertexID) +{ + ATTR_UNPACK(_vertexID, position, _EXPORTED_a_position, float2); + ATTR_UNPACK(_vertexID, uv, _EXPORTED_a_texCoord, float2); + + VARYING_INIT(v_texCoord, float2); +#ifdef ENABLE_CLIP_RECT + VARYING_INIT(v_clipRect, float4); +#endif + + float2x2 M = make_float2x2(imageDrawUniforms.viewMatrix); + float2 vertexPosition = MUL(M, _EXPORTED_a_position) + imageDrawUniforms.translate; + v_texCoord = _EXPORTED_a_texCoord; + +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { + v_clipRect = find_clip_rect_coverage_distances( + make_float2x2(imageDrawUniforms.clipRectInverseMatrix), + imageDrawUniforms.clipRectInverseTranslate, + vertexPosition); + } +#endif + + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); + + VARYING_PACK(v_texCoord); +#ifdef ENABLE_CLIP_RECT + VARYING_PACK(v_clipRect); +#endif + EMIT_VERTEX(pos); +} +#endif // VERTEX +#endif // DRAW_IMAGE_MESH +#endif // DRAW_IMAGE + +#ifdef DRAW_RENDER_TARGET_UPDATE_BOUNDS +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR_BLOCK_END +#endif // VERTEX + +VARYING_BLOCK_BEGIN +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + int2 coord; + coord.x = (_vertexID & 1) == 0 ? uniforms.renderTargetUpdateBounds.x + : uniforms.renderTargetUpdateBounds.z; + coord.y = (_vertexID & 2) == 0 ? uniforms.renderTargetUpdateBounds.y + : uniforms.renderTargetUpdateBounds.w; + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(float2(coord)); + EMIT_VERTEX(pos); +} +#endif // VERTEX +#endif // DRAW_RENDER_TARGET_UPDATE_BOUNDS + +#ifdef ENABLE_BINDLESS_TEXTURES +#define NEEDS_IMAGE_TEXTURE +#endif +#ifdef DRAW_IMAGE +#define NEEDS_IMAGE_TEXTURE +#endif + +#ifdef FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, GRAD_TEXTURE_IDX, _EXPORTED_gradTexture); +#ifdef NEEDS_IMAGE_TEXTURE +TEXTURE_RGBA8(PER_DRAW_BINDINGS_SET, IMAGE_TEXTURE_IDX, _EXPORTED_imageTexture); +#endif +FRAG_TEXTURE_BLOCK_END + +SAMPLER_LINEAR(GRAD_TEXTURE_IDX, gradSampler) +#ifdef NEEDS_IMAGE_TEXTURE +SAMPLER_MIPMAP(IMAGE_TEXTURE_IDX, imageSampler) +#endif + +PLS_BLOCK_BEGIN +// We only write the framebuffer as a storage texture when there are blend modes. Otherwise, we +// render to it as a normal color attachment. +#ifndef FIXED_FUNCTION_COLOR_BLEND +#ifdef COLOR_PLANE_IDX_OVERRIDE +// D3D11 doesn't let us bind the framebuffer UAV to slot 0 when there is a color output. +PLS_DECL4F(COLOR_PLANE_IDX_OVERRIDE, colorBuffer); +#else +PLS_DECL4F(COLOR_PLANE_IDX, colorBuffer); +#endif +#endif +#ifdef ENABLE_CLIPPING +PLS_DECLUI(CLIP_PLANE_IDX, clipBuffer); +#endif +PLS_DECLUI_ATOMIC(COVERAGE_PLANE_IDX, coverageCountBuffer); +PLS_BLOCK_END + +FRAG_STORAGE_BUFFER_BLOCK_BEGIN +STORAGE_BUFFER_U32x2(PAINT_BUFFER_IDX, PaintBuffer, _EXPORTED_paintBuffer); +STORAGE_BUFFER_F32x4(PAINT_AUX_BUFFER_IDX, PaintAuxBuffer, _EXPORTED_paintAuxBuffer); +FRAG_STORAGE_BUFFER_BLOCK_END + +uint to_fixed(float x) { return uint(x * FIXED_COVERAGE_FACTOR + FIXED_COVERAGE_ZERO); } + +half from_fixed(uint x) +{ + return cast_float_to_half(float(x) * FIXED_COVERAGE_INVERSE_FACTOR + + (-FIXED_COVERAGE_ZERO * FIXED_COVERAGE_INVERSE_FACTOR)); +} + +// Return the color of the path at index 'pathID' at location '_fragCoord'. +// Also update the PLS clip value if called for. +half4 resolve_path_color(half coverageCount, + uint2 paintData, + uint pathID FRAGMENT_CONTEXT_DECL PLS_CONTEXT_DECL, + OUT(uint) clipData, + bool needsClipData) +{ + clipData = 0u; + half coverage = abs(coverageCount); +#ifdef ENABLE_EVEN_ODD + if (ENABLE_EVEN_ODD && (paintData.x & PAINT_FLAG_EVEN_ODD) != 0u) + { + coverage = 1. - abs(fract(coverage * .5) * 2. + -1.); + } +#endif // ENABLE_EVEN_ODD + coverage = min(coverage, make_half(1.)); // This also caps stroke coverage, which can be >1. +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + uint clipID = paintData.x >> 16u; + if (clipID != 0u || needsClipData) + { + clipData = PLS_LOADUI(clipBuffer); + } + if (clipID != 0u) + { + half clipCoverage = clipID == (clipData >> 16u) ? unpackHalf2x16(clipData).x : .0; + coverage = min(coverage, clipCoverage); + } + } +#endif // ENABLE_CLIPPING + half4 color = make_half4(.0); + uint paintType = paintData.x & 0xfu; + switch (paintType) + { + case SOLID_COLOR_PAINT_TYPE: + color = unpackUnorm4x8(paintData.y); +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + PLS_PRESERVE_UI(clipBuffer); + } +#endif + break; + case LINEAR_GRADIENT_PAINT_TYPE: + case RADIAL_GRADIENT_PAINT_TYPE: +#ifdef ENABLE_BINDLESS_TEXTURES + case IMAGE_PAINT_TYPE: +#endif // ENABLE_BINDLESS_TEXTURES + { + float2x2 M = make_float2x2(STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u)); + float4 translate = STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 1u); + float2 paintCoord = MUL(M, _fragCoord) + translate.xy; +#ifdef ENABLE_BINDLESS_TEXTURES + if (paintType == IMAGE_PAINT_TYPE) + { + color = TEXTURE_SAMPLE_GRAD(sampler2D(floatBitsToUint(translate.zw)), + imageSampler, + paintCoord, + M[0], + M[1]); + float opacity = uintBitsToFloat(paintData.y); + color.w *= opacity; + } + else +#endif // ENABLE_BINDLESS_TEXTURES + { + float t = paintType == LINEAR_GRADIENT_PAINT_TYPE ? /*linear*/ paintCoord.x + : /*radial*/ length(paintCoord); + t = clamp(t, .0, 1.); + float x = t * translate.z + translate.w; + float y = uintBitsToFloat(paintData.y); + color = TEXTURE_SAMPLE_LOD(_EXPORTED_gradTexture, gradSampler, float2(x, y), .0); + } +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + PLS_PRESERVE_UI(clipBuffer); + } +#endif + break; + } +#ifdef ENABLE_CLIPPING + case CLIP_UPDATE_PAINT_TYPE: + if (ENABLE_CLIPPING) + { + clipData = paintData.y | packHalf2x16(make_half2(coverage, .0)); + PLS_STOREUI(clipBuffer, clipData); + } + break; +#endif // ENABLE_CLIPPING + } +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT && (paintData.x & PAINT_FLAG_HAS_CLIP_RECT) != 0u) + { + float2x2 M = make_float2x2(STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 2u)); + float4 translate = STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 3u); + float2 clipCoord = MUL(M, _fragCoord) + translate.xy; + // translate.zw contains -1 / fwidth(clipCoord), which we use to calculate antialiasing. + half2 distXY = cast_float2_to_half2(abs(clipCoord) * translate.zw - translate.zw); + half clipRectCoverage = clamp(min(distXY.x, distXY.y) + .5, .0, 1.); + coverage = min(coverage, clipRectCoverage); + } +#endif // ENABLE_CLIP_RECT + color.w *= coverage; + return color; +} + +half4 blend_src_over(half4 srcColorPremul, half4 dstColorPremul) +{ + return srcColorPremul + dstColorPremul * (1. - srcColorPremul.w); +} + +#ifndef FIXED_FUNCTION_COLOR_BLEND +half4 blend(half4 srcColorUnmul, half4 dstColorPremul, ushort blendMode) +{ +#ifdef ENABLE_ADVANCED_BLEND + if (ENABLE_ADVANCED_BLEND && blendMode != BLEND_SRC_OVER) + { + return advanced_blend(srcColorUnmul, unmultiply(dstColorPremul), blendMode); + } + else +#endif // ENABLE_ADVANCED_BLEND + { + return blend_src_over(premultiply(srcColorUnmul), dstColorPremul); + } +} + +half4 do_pls_blend(half4 color, uint2 paintData PLS_CONTEXT_DECL) +{ + half4 dstColorPremul = PLS_LOAD4F(colorBuffer); + ushort blendMode = cast_uint_to_ushort((paintData.x >> 4) & 0xfu); + return blend(color, dstColorPremul, blendMode); +} + +void write_pls_blend(half4 color, uint2 paintData PLS_CONTEXT_DECL) +{ + if (color.w != .0) + { + half4 blendedColor = do_pls_blend(color, paintData PLS_CONTEXT_UNPACK); + PLS_STORE4F(colorBuffer, blendedColor); + } + else + { + PLS_PRESERVE_4F(colorBuffer); + } +} +#endif // !FIXED_FUNCTION_COLOR_BLEND + +#ifdef FIXED_FUNCTION_COLOR_BLEND +#define ATOMIC_PLS_MAIN PLS_FRAG_COLOR_MAIN +#define ATOMIC_PLS_MAIN_WITH_IMAGE_UNIFORMS PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS +#define EMIT_ATOMIC_PLS EMIT_PLS_AND_FRAG_COLOR +#else // !FIXED_FUNCTION_COLOR_BLEND +#define ATOMIC_PLS_MAIN PLS_MAIN +#define ATOMIC_PLS_MAIN_WITH_IMAGE_UNIFORMS PLS_MAIN_WITH_IMAGE_UNIFORMS +#define EMIT_ATOMIC_PLS EMIT_PLS +#endif + +#ifdef DRAW_PATH +ATOMIC_PLS_MAIN(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_edgeDistance, half2); + VARYING_UNPACK(v_pathID, ushort); + + half coverage = min(min(v_edgeDistance.x, abs(v_edgeDistance.y)), make_half(1.)); + + // Since v_pathID increases monotonically with every draw, and since it lives in the most + // significant bits of the coverage data, an atomic max() function will serve 3 purposes: + // + // * The invocation that changes the pathID is the single first fragment invocation to + // hit the new path, and the one that should resolve the previous path in the framebuffer. + // * Properly resets coverage to zero when we do cross over into processing a new path. + // * Accumulates coverage for strokes. + // + uint fixedCoverage = to_fixed(coverage); + uint minCoverageData = (make_uint(v_pathID) << 16) | fixedCoverage; + uint lastCoverageData = PLS_ATOMIC_MAX(coverageCountBuffer, minCoverageData); + ushort lastPathID = cast_uint_to_ushort(lastCoverageData >> 16); + if (lastPathID != v_pathID) + { + // We crossed into a new path! Resolve the previous path now that we know its exact + // coverage. + half coverageCount = from_fixed(lastCoverageData & 0xffffu); + uint2 paintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, lastPathID); + uint clipData; + half4 color = resolve_path_color(coverageCount, + paintData, + lastPathID FRAGMENT_CONTEXT_UNPACK PLS_CONTEXT_UNPACK, + clipData, + /*needsClipData=*/false); +#ifdef FIXED_FUNCTION_COLOR_BLEND + _fragColor = premultiply(color); +#else + write_pls_blend(color, paintData PLS_CONTEXT_UNPACK); +#endif // FIXED_FUNCTION_COLOR_BLEND + } + else + { + if (v_edgeDistance.y < .0 /*fill?*/) + { + // We're a fill, and we did not cross into the new path this time. Count coverage. + if (lastCoverageData < minCoverageData) + { + // We already crossed into this path. Oops. Undo the effect of the min(). + fixedCoverage += lastCoverageData - minCoverageData; + } + fixedCoverage -= uint(FIXED_COVERAGE_ZERO); // Only apply the zero bias once. + PLS_ATOMIC_ADD(coverageCountBuffer, fixedCoverage); + } + // Discard because some PLS implementations require that we assign values to the color & + // clip attachments, but since we aren't raster ordered, we don't have values to assign. + discard; + } + + EMIT_ATOMIC_PLS +} +#endif // DRAW_PATH + +#ifdef DRAW_INTERIOR_TRIANGLES +ATOMIC_PLS_MAIN(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_windingWeight, half); + VARYING_UNPACK(v_pathID, ushort); + + half coverage = v_windingWeight; + + uint lastCoverageData = PLS_LOADUI_ATOMIC(coverageCountBuffer); + ushort lastPathID = cast_uint_to_ushort(lastCoverageData >> 16); + half lastCoverageCount = from_fixed(lastCoverageData & 0xffffu); + if (lastPathID != v_pathID) + { + // We crossed into a new path! Resolve the previous path now that we know its exact + // coverage. + uint2 paintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, lastPathID); + uint clipData; + half4 color = resolve_path_color(lastCoverageCount, + paintData, + lastPathID FRAGMENT_CONTEXT_UNPACK PLS_CONTEXT_UNPACK, + clipData, + /*needsClipData=*/false); +#ifdef FIXED_FUNCTION_COLOR_BLEND + _fragColor = premultiply(color); +#else + write_pls_blend(color, paintData PLS_CONTEXT_UNPACK); +#endif // FIXED_FUNCTION_COLOR_BLEND + } + else + { + coverage += lastCoverageCount; + } + + PLS_STOREUI_ATOMIC(coverageCountBuffer, (make_uint(v_pathID) << 16) | to_fixed(coverage)); + + if (lastPathID == v_pathID) + { + // Discard because some PLS implementations require that we assign values to the color & + // clip attachments, but since we aren't raster ordered, we don't have values to assign. + discard; + } + + EMIT_ATOMIC_PLS +} +#endif // DRAW_INTERIOR_TRIANGLES + +#ifdef DRAW_IMAGE +ATOMIC_PLS_MAIN_WITH_IMAGE_UNIFORMS(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_texCoord, float2); +#ifdef DRAW_IMAGE_RECT + VARYING_UNPACK(v_edgeCoverage, half); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_UNPACK(v_clipRect, float4); +#endif + + // Start by finding the image color. We have to do this immediately instead of allowing it to + // get resolved later like other draws because the @imageTexture binding is liable to change, + // and furthermore in the case of imageMeshes, we can't calculate UV coordinates based on + // fragment position. + half4 imageColor = TEXTURE_SAMPLE(_EXPORTED_imageTexture, imageSampler, v_texCoord); + half meshCoverage = 1.; +#ifdef DRAW_IMAGE_RECT + meshCoverage = min(v_edgeCoverage, meshCoverage); +#endif +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { + half clipRectCoverage = min_value(cast_float4_to_half4(v_clipRect)); + meshCoverage = clamp(clipRectCoverage, make_half(.0), meshCoverage); + } +#endif + +#ifdef DRAW_IMAGE_MESH + // TODO: If we care: Use the interlock if we can, since individual meshes may shimmer if they + // have overlapping triangles. + PLS_INTERLOCK_BEGIN; +#endif + + // Find the previous path color. (This might also update the clip buffer.) + // TODO: skip this step if no clipping AND srcOver AND imageColor is solid. + uint lastCoverageData = PLS_LOADUI_ATOMIC(coverageCountBuffer); + half coverageCount = from_fixed(lastCoverageData & 0xffffu); + ushort lastPathID = cast_uint_to_ushort(lastCoverageData >> 16); + uint2 lastPaintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, lastPathID); + uint clipData; + half4 lastColor = resolve_path_color(coverageCount, + lastPaintData, + lastPathID FRAGMENT_CONTEXT_UNPACK PLS_CONTEXT_UNPACK, + clipData, + /*needsClipData=*/true); + + // Clip the image after resolving the previous path, since that can affect the clip buffer. +#ifdef ENABLE_CLIPPING // TODO! ENABLE_IMAGE_CLIPPING in addition to ENABLE_CLIPPING? + if (ENABLE_CLIPPING && imageDrawUniforms.clipID != 0u) + { + uint clipID = clipData >> 16; + half clipCoverage = clipID == imageDrawUniforms.clipID ? unpackHalf2x16(clipData).x : .0; + meshCoverage = min(meshCoverage, clipCoverage); + } +#endif // ENABLE_CLIPPING + imageColor.w *= meshCoverage * cast_float_to_half(imageDrawUniforms.opacity); + +#ifdef FIXED_FUNCTION_COLOR_BLEND + // Leverage the property that premultiplied src-over blending is associative and blend the + // imageColor and lastColor before passing them on to the blending pipeline. + _fragColor = blend_src_over(premultiply(imageColor), premultiply(lastColor)); +#else + if (lastColor.w != .0 || imageColor.w != .0) + { + // Blend the previous path and image both in a single operation. + // TODO: Are advanced blend modes associative? srcOver is, so at least there we can blend + // lastColor and imageColor first, and potentially avoid a framebuffer load if it ends up + // opaque. + half4 dstColorPremul = PLS_LOAD4F(colorBuffer); + ushort lastBlendMode = cast_uint_to_ushort((lastPaintData.x >> 4) & 0xfu); + ushort imageBlendMode = cast_uint_to_ushort(imageDrawUniforms.blendMode); + dstColorPremul = blend(lastColor, dstColorPremul, lastBlendMode); + imageColor = blend(imageColor, dstColorPremul, imageBlendMode); + PLS_STORE4F(colorBuffer, imageColor); + } + else + { + PLS_PRESERVE_4F(colorBuffer); + } +#endif // FIXED_FUNCTION_COLOR_BLEND + + // Write out a coverage value of "zero at pathID=0" so a future resolve attempt doesn't affect + // this pixel. + PLS_STOREUI_ATOMIC(coverageCountBuffer, uint(FIXED_COVERAGE_ZERO)); + +#ifdef DRAW_IMAGE_MESH + // TODO: If we care: Use the interlock if we can, since individual meshes may shimmer if they + // have overlapping triangles. + PLS_INTERLOCK_END; +#endif + + EMIT_ATOMIC_PLS +} +#endif // DRAW_IMAGE + +#ifdef INITIALIZE_PLS + +ATOMIC_PLS_MAIN(_EXPORTED_drawFragmentMain) +{ +#ifdef STORE_COLOR_CLEAR + PLS_STORE4F(colorBuffer, unpackUnorm4x8(uniforms.colorClearValue)); +#endif +#ifdef SWIZZLE_COLOR_BGRA_TO_RGBA + half4 color = PLS_LOAD4F(colorBuffer); + PLS_STORE4F(colorBuffer, color.zyxw); +#endif + PLS_STOREUI_ATOMIC(coverageCountBuffer, uniforms.coverageClearValue); +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + PLS_STOREUI(clipBuffer, 0u); + } +#endif +#ifdef FIXED_FUNCTION_COLOR_BLEND + discard; +#endif + EMIT_ATOMIC_PLS +} + +#endif // INITIALIZE_PLS + +#ifdef RESOLVE_PLS + +#ifdef COALESCED_PLS_RESOLVE_AND_TRANSFER +PLS_FRAG_COLOR_MAIN(_EXPORTED_drawFragmentMain) +#else +ATOMIC_PLS_MAIN(_EXPORTED_drawFragmentMain) +#endif +{ + uint lastCoverageData = PLS_LOADUI_ATOMIC(coverageCountBuffer); + half coverageCount = from_fixed(lastCoverageData & 0xffffu); + ushort lastPathID = cast_uint_to_ushort(lastCoverageData >> 16); + uint2 paintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, lastPathID); + uint clipData; + half4 color = resolve_path_color(coverageCount, + paintData, + lastPathID FRAGMENT_CONTEXT_UNPACK PLS_CONTEXT_UNPACK, + clipData, + false); +#ifdef COALESCED_PLS_RESOLVE_AND_TRANSFER + //color = make_half4(1.0,0.0,0.0,1.0); + _fragColor = do_pls_blend(color, paintData PLS_CONTEXT_UNPACK); + EMIT_PLS_AND_FRAG_COLOR +#else +#ifdef FIXED_FUNCTION_COLOR_BLEND + //color = make_half4(0.0,1.0,0.0,1.0); + _fragColor = premultiply(color); +#else + //color = make_half4(0.0,0.0,1.0,1.0); + write_pls_blend(color, paintData PLS_CONTEXT_UNPACK); +#endif // FIXED_FUNCTION_COLOR_BLEND + EMIT_ATOMIC_PLS +#endif // COALESCED_PLS_RESOLVE_AND_TRANSFER +} +#endif // RESOLVE_PLS +#endif // FRAGMENT diff --git a/Shaders/Private/Rive/Generated/blit_texture_as_draw.minified.ush b/Shaders/Private/Rive/Generated/blit_texture_as_draw.minified.ush new file mode 100644 index 00000000..475a15f9 --- /dev/null +++ b/Shaders/Private/Rive/Generated/blit_texture_as_draw.minified.ush @@ -0,0 +1,33 @@ +/* + * Copyright 2024 Rive + */ + +#ifdef VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +VERTEX_MAIN(_EXPORTED_blitVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + // Fill the entire screen. The caller will use a scissor test to control the bounds being drawn. + float2 coord; + coord.x = (_vertexID & 1) == 0 ? -1. : 1.; + coord.y = (_vertexID & 2) == 0 ? -1. : 1.; + float4 pos = float4(coord, 0, 1); + EMIT_VERTEX(pos); +} +#endif + +#ifdef FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, 0, _EXPORTED_blitTextureSource); +FRAG_TEXTURE_BLOCK_END + +FRAG_DATA_MAIN(half4, _EXPORTED_blitFragmentMain) +{ + half4 srcColor = TEXEL_FETCH(_EXPORTED_blitTextureSource, int2(floor(_fragCoord.xy))); + EMIT_FRAG_DATA(srcColor); +} +#endif // FRAGMENT diff --git a/Shaders/Private/Rive/Generated/color_ramp.minified.ush b/Shaders/Private/Rive/Generated/color_ramp.minified.ush new file mode 100644 index 00000000..76a9e6f2 --- /dev/null +++ b/Shaders/Private/Rive/Generated/color_ramp.minified.ush @@ -0,0 +1,80 @@ +/* + * Copyright 2022 Rive + */ + +// This shader draws horizontal color ramps into a gradient texture, which will later be sampled by +// the renderer for drawing gradients. + +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +#ifdef SPLIT_UINT4_ATTRIBUTES +ATTR(0, uint, _EXPORTED_a_span_a); +ATTR(1, uint, _EXPORTED_a_span_b); +ATTR(2, uint, _EXPORTED_a_span_c); +ATTR(3, uint, _EXPORTED_a_span_d); +#else +ATTR(0, uint4, _EXPORTED_a_span); // [spanX, y, color0, color1] +#endif +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, half4, v_rampColor); +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +half4 unpackColorInt(uint color) +{ + return cast_uint4_to_half4((uint4(color, color, color, color) >> uint4(16, 8, 0, 24)) & 0xffu) / + 255.; +} + +VERTEX_MAIN(_EXPORTED_colorRampVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + +#ifdef SPLIT_UINT4_ATTRIBUTES + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span_a, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span_b, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span_c, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span_d, uint); + uint4 _EXPORTED_a_span = uint4( _EXPORTED_a_span_a, _EXPORTED_a_span_b, _EXPORTED_a_span_c, _EXPORTED_a_span_d); + +#else + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span, uint4); + +#endif + VARYING_INIT(v_rampColor, half4); + + float x = float((_vertexID & 1) == 0 ? _EXPORTED_a_span.x & 0xffffu : _EXPORTED_a_span.x >> 16) / 65536.; + float offsetY = (_vertexID & 2) == 0 ? 1. : .0; + if (uniforms.gradInverseViewportY < .0) + { + // Make sure we always emit clockwise triangles. Swap the top and bottom vertices. + offsetY = 1. - offsetY; + } + v_rampColor = unpackColorInt((_vertexID & 1) == 0 ? _EXPORTED_a_span.z : _EXPORTED_a_span.w); + + float4 pos; + pos.x = x * 2. - 1.; + pos.y = (float(_EXPORTED_a_span.y) + offsetY) * uniforms.gradInverseViewportY - + sign(uniforms.gradInverseViewportY); + pos.zw = float2(0, 1); + + VARYING_PACK(v_rampColor); + EMIT_VERTEX(pos); +} +#endif + +#ifdef FRAGMENT +FRAG_DATA_MAIN(half4, _EXPORTED_colorRampFragmentMain) +{ + VARYING_UNPACK(v_rampColor, half4); + EMIT_FRAG_DATA(v_rampColor); +} +#endif diff --git a/Shaders/Private/Rive/Generated/common.minified.ush b/Shaders/Private/Rive/Generated/common.minified.ush new file mode 100644 index 00000000..ebe51006 --- /dev/null +++ b/Shaders/Private/Rive/Generated/common.minified.ush @@ -0,0 +1,243 @@ +/* + * Copyright 2022 Rive + */ + +// Common functions shared by multiple shaders. + +#ifdef USE_GENERATED_UNIFORMS +#include "/Engine/Generated/GeneratedUniformBuffers.ush" +#endif + +#define PI float(3.141592653589793238) + +#ifndef USING_DEPTH_STENCIL +#define AA_RADIUS float(.5) +#else +#define AA_RADIUS float(.0) +#endif + +#ifdef GLSL +// GLSL has different semantics around precision. Normalize type conversions across +// languages with "cast_*_to_*()" methods. +INLINE half cast_float_to_half(float x) { return x; } +INLINE half cast_uint_to_half(uint x) { return float(x); } +INLINE half cast_ushort_to_half(ushort x) { return float(x); } +INLINE half cast_int_to_half(int x) { return float(x); } +INLINE half4 cast_float4_to_half4(float4 xyzw) { return xyzw; } +INLINE half2 cast_float2_to_half2(float2 xy) { return xy; } +INLINE half4 cast_uint4_to_half4(uint4 xyzw) { return vec4(xyzw); } +INLINE ushort cast_half_to_ushort(half x) { return uint(x); } +INLINE ushort cast_uint_to_ushort(uint x) { return x; } +#else +INLINE half cast_float_to_half(float x) { return (half)x; } +INLINE half cast_uint_to_half(uint x) { return (half)x; } +INLINE half cast_ushort_to_half(ushort x) { return (half)x; } +INLINE half cast_int_to_half(int x) { return (half)x; } +INLINE half4 cast_float4_to_half4(float4 xyzw) { return (half4)xyzw; } +INLINE half2 cast_float2_to_half2(float2 xy) { return (half2)xy; } +INLINE half4 cast_uint4_to_half4(uint4 xyzw) { return (half4)xyzw; } +INLINE ushort cast_half_to_ushort(half x) { return (ushort)x; } +INLINE ushort cast_uint_to_ushort(uint x) { return (ushort)x; } +#endif + +INLINE half make_half(half x) { return x; } + +INLINE half2 make_half2(half2 xy) { return xy; } + +INLINE half2 make_half2(half x, half y) +{ + half2 ret; + ret.x = x, ret.y = y; + return ret; +} + +INLINE half3 make_half3(half x, half y, half z) +{ + half3 ret; + ret.x = x, ret.y = y, ret.z = z; + return ret; +} + +INLINE half3 make_half3(half x) +{ + half3 ret; + ret.x = x, ret.y = x, ret.z = x; + return ret; +} + +INLINE half4 make_half4(half x, half y, half z, half w) +{ + half4 ret; + ret.x = x, ret.y = y, ret.z = z, ret.w = w; + return ret; +} + +INLINE half4 make_half4(half3 xyz, half w) +{ + half4 ret; + ret.xyz = xyz; + ret.w = w; + return ret; +} + +INLINE half4 make_half4(half x) +{ + half4 ret; + ret.x = x, ret.y = x, ret.z = x, ret.w = x; + return ret; +} + +INLINE half3x4 make_half3x4(half3 a, half b, half3 c, half d, half3 e, half f) +{ + half3x4 ret; + ret[0] = make_half4(a, b); + ret[1] = make_half4(c, d); + ret[2] = make_half4(e, f); + return ret; +} + +INLINE float2x2 make_float2x2(float4 x) { return float2x2(x.xy, x.zw); } + +INLINE uint make_uint(ushort x) { return x; } + +INLINE uint contour_data_idx(uint contourIDWithFlags) +{ + return (contourIDWithFlags & CONTOUR_ID_MASK) - 1u; +} + +INLINE float2 unchecked_mix(float2 a, float2 b, float t) { return (b - a) * t + a; } + +INLINE half id_bits_to_f16(uint idBits, uint pathIDGranularity) +{ + return idBits == 0u ? .0 : unpackHalf2x16((idBits + MAX_DENORM_F16) * pathIDGranularity).x; +} + +INLINE float atan2(float2 v) +{ + float bias = .0; + if (abs(v.x) > abs(v.y)) + { + v = float2(v.y, -v.x); + bias = PI / 2.; + } + return atan(v.y, v.x) + bias; +} + +INLINE half4 premultiply(half4 color) { return make_half4(color.xyz * color.w, color.w); } + +INLINE half4 unmultiply(half4 color) +{ + if (color.w != .0) + color.xyz *= 1.0 / color.w; + return color; +} + +INLINE half min_value(half4 min4) +{ + half2 min2 = min(min4.xy, min4.zw); + half min1 = min(min2.x, min2.y); + return min1; +} + +INLINE float manhattan_width(float2 x) { return abs(x.x) + abs(x.y); } + +#ifdef VERTEX + +#ifndef USE_GENERATED_UNIFORMS +UNIFORM_BLOCK_BEGIN(FLUSH_UNIFORM_BUFFER_IDX, _EXPORTED_FlushUniforms) +float gradInverseViewportY; +float tessInverseViewportY; +float renderTargetInverseViewportX; +float renderTargetInverseViewportY; +uint renderTargetWidth; +uint renderTargetHeight; +uint colorClearValue; // Only used if clears are implemented as draws. +uint coverageClearValue; // Only used if clears are implemented as draws. +int4 renderTargetUpdateBounds; // drawBounds, or renderTargetBounds if there is a clear. (LTRB.) +uint pathIDGranularity; // Spacing between adjacent path IDs (1 if IEEE compliant). +float vertexDiscardValue; +UNIFORM_BLOCK_END(uniforms) +#endif + +#define RENDER_TARGET_COORD_TO_CLIP_COORD(COORD) \ + float4((COORD).x* uniforms.renderTargetInverseViewportX - 1., \ + (COORD).y * -uniforms.renderTargetInverseViewportY + \ + sign(uniforms.renderTargetInverseViewportY), \ + .0, \ + 1.) + +#ifndef USING_DEPTH_STENCIL +// Calculates the Manhattan distance in pixels from the given pixelPosition, to the point at each +// edge of the clipRect where coverage = 0. +// +// clipRectInverseMatrix transforms from pixel coordinates to a space where the clipRect is the +// normalized rectangle: [-1, -1, 1, 1]. +INLINE float4 find_clip_rect_coverage_distances(float2x2 clipRectInverseMatrix, + float2 clipRectInverseTranslate, + float2 pixelPosition) +{ + float2 clipRectAAWidth = abs(clipRectInverseMatrix[0]) + abs(clipRectInverseMatrix[1]); + if (clipRectAAWidth.x != .0 && clipRectAAWidth.y != .0) + { + float2 r = 1. / clipRectAAWidth; + float2 clipRectCoord = MUL(clipRectInverseMatrix, pixelPosition) + clipRectInverseTranslate; + // When the center of a pixel falls exactly on an edge, coverage should be .5. + const float coverageWhenDistanceIsZero = .5; + return float4(clipRectCoord, -clipRectCoord) * r.xyxy + r.xyxy + coverageWhenDistanceIsZero; + } + else + { + // The caller gave us a singular clipRectInverseMatrix. This is a special case where we are + // expected to use tx and ty as uniform coverage. + return clipRectInverseTranslate.xyxy; + } +} + +#else // USING_DEPTH_STENCIL + +INLINE float normalize_z_index(uint zIndex) { return 1. - float(zIndex) * (2. / 32768.); } + +#ifdef ENABLE_CLIP_RECT +INLINE void set_clip_rect_plane_distances(float2x2 clipRectInverseMatrix, + float2 clipRectInverseTranslate, + float2 pixelPosition) +{ + if (clipRectInverseMatrix != float2x2(0)) + { + float2 clipRectCoord = + MUL(clipRectInverseMatrix, pixelPosition) + clipRectInverseTranslate.xy; + gl_ClipDistance[0] = clipRectCoord.x + 1.; + gl_ClipDistance[1] = clipRectCoord.y + 1.; + gl_ClipDistance[2] = 1. - clipRectCoord.x; + gl_ClipDistance[3] = 1. - clipRectCoord.y; + } + else + { + // "clipRectInverseMatrix == 0" is a special case: + // "clipRectInverseTranslate.x == 1" => all in. + // "clipRectInverseTranslate.x == 0" => all out. + gl_ClipDistance[0] = gl_ClipDistance[1] = gl_ClipDistance[2] = gl_ClipDistance[3] = + clipRectInverseTranslate.x - .5; + } +} +#endif // ENABLE_CLIP_RECT +#endif // USING_DEPTH_STENCIL +#endif // VERTEX + +#ifdef DRAW_IMAGE +#ifndef USE_GENERATED_UNIFORMS +UNIFORM_BLOCK_BEGIN(IMAGE_DRAW_UNIFORM_BUFFER_IDX, _EXPORTED_ImageDrawUniforms) +float4 viewMatrix; +float2 translate; +float opacity; +float padding; +// clipRectInverseMatrix transforms from pixel coordinates to a space where the clipRect is the +// normalized rectangle: [-1, -1, 1, 1]. +float4 clipRectInverseMatrix; +float2 clipRectInverseTranslate; +uint clipID; +uint blendMode; +uint zIndex; +UNIFORM_BLOCK_END(imageDrawUniforms) +#endif +#endif diff --git a/Shaders/Private/Rive/Generated/constants.minified.ush b/Shaders/Private/Rive/Generated/constants.minified.ush new file mode 100644 index 00000000..5a7fc5ef --- /dev/null +++ b/Shaders/Private/Rive/Generated/constants.minified.ush @@ -0,0 +1,154 @@ +/* + * Copyright 2022 Rive + */ + +#define TESS_TEXTURE_WIDTH float(2048) +#define TESS_TEXTURE_WIDTH_LOG2 11 + +#define GRAD_TEXTURE_WIDTH float(512) +#define GRAD_TEXTURE_INVERSE_WIDTH float(0.001953125) + +// Width to use for a texture that emulates a storage buffer. +// +// Minimize width since the texture needs to be updated in entire rows from the resource buffer. +// Since these only serve paths and contours, both of those are limited to 16-bit indices, 2048 +// is the min specified texture size in ES3, and no path buffer uses more than 4 texels, we can +// safely use a width of 128. +#define STORAGE_TEXTURE_WIDTH 128 +#define STORAGE_TEXTURE_SHIFT_Y 7 +#define STORAGE_TEXTURE_MASK_X 0x7fu + +// Tells shaders that a cubic should actually be drawn as the single, non-AA triangle: [p0, p1, p3]. +// This is used to squeeze in more rare triangles, like "grout" triangles from self intersections on +// interior triangulation, where it wouldn't be worth it to put them in their own dedicated draw +// call. +#define RETROFITTED_TRIANGLE_CONTOUR_FLAG (1u << 31u) + +// Tells the tessellation shader to re-run Wang's formula on the given curve, figure out how many +// segments it actually needs, and make any excess segments degenerate by co-locating their vertices +// at T=0. (Used on the "outerCurve" patches that are drawn with interior triangulations.) +#define CULL_EXCESS_TESSELLATION_SEGMENTS_CONTOUR_FLAG (1u << 30u) + +// Flags for specifying the join type. +#define JOIN_TYPE_MASK (3u << 28u) +#define MITER_CLIP_JOIN_CONTOUR_FLAG (3u << 28u) +#define MITER_REVERT_JOIN_CONTOUR_FLAG (2u << 28u) +#define BEVEL_JOIN_CONTOUR_FLAG (1u << 28u) + +// When a join is being used to emulate a stroke cap, the shader emits additional vertices at T=0 +// and T=1 for round joins, and changes the miter limit to 1 for miter-clip joins. +#define EMULATED_STROKE_CAP_CONTOUR_FLAG (1u << 27u) + +// Internal contour flags. +#define MIRRORED_CONTOUR_CONTOUR_FLAG (1u << 26u) +#define JOIN_TANGENT_0_CONTOUR_FLAG (1u << 25u) +#define JOIN_TANGENT_INNER_CONTOUR_FLAG (1u << 24u) +#define LEFT_JOIN_CONTOUR_FLAG (1u << 23u) +#define RIGHT_JOIN_CONTOUR_FLAG (1u << 22u) +#define CONTOUR_ID_MASK 0xffffu + +// Says which part of the patch a vertex belongs to. +#define STROKE_VERTEX 0 +#define FAN_VERTEX 1 +#define FAN_MIDPOINT_VERTEX 2 + +// Says which part of the patch a vertex belongs to. +#define STROKE_VERTEX 0 +#define FAN_VERTEX 1 +#define FAN_MIDPOINT_VERTEX 2 + +// Mirrors pls::PaintType. +#define SOLID_COLOR_PAINT_TYPE 0u +#define LINEAR_GRADIENT_PAINT_TYPE 1u +#define RADIAL_GRADIENT_PAINT_TYPE 2u +#define IMAGE_PAINT_TYPE 3u +#define CLIP_UPDATE_PAINT_TYPE 4u + +// Paint flags, found in the x-component value of @paintBuffer. +#define PAINT_FLAG_EVEN_ODD 0x100u +#define PAINT_FLAG_HAS_CLIP_RECT 0x200u + +// PLS draw resources are either updated per flush or per draw. They go into set 0 +// or set 1, depending on how often they are updated. +#define PER_FLUSH_BINDINGS_SET 0 +#define PER_DRAW_BINDINGS_SET 1 + +// Index at which we access each resource. +#define TESS_VERTEX_TEXTURE_IDX 0 +#define GRAD_TEXTURE_IDX 1 +#define IMAGE_TEXTURE_IDX 2 +#define PATH_BUFFER_IDX 3 +#define PAINT_BUFFER_IDX 4 +#define PAINT_AUX_BUFFER_IDX 5 +#define CONTOUR_BUFFER_IDX 6 +#define FLUSH_UNIFORM_BUFFER_IDX 7 +#define PATH_BASE_INSTANCE_UNIFORM_BUFFER_IDX 8 +#define IMAGE_DRAW_UNIFORM_BUFFER_IDX 9 +#define DST_COLOR_TEXTURE_IDX 10 +#define DEFAULT_BINDINGS_SET_SIZE 11 + +// Samplers are accessed at the same index as their corresponding texture, so we put them in a +// separate binding set. +#define SAMPLER_BINDINGS_SET 2 + +// PLS textures are accessed at the same index as their PLS planes, so we put them in a separate +// binding set. +#define PLS_TEXTURE_BINDINGS_SET 3 + +#define BINDINGS_SET_COUNT 4 + +// Index of each pixel local storage plane. +#define COLOR_PLANE_IDX 0 +#define CLIP_PLANE_IDX 1 +#define SCRATCH_COLOR_PLANE_IDX 2 +#define COVERAGE_PLANE_IDX 3 + +// acos(1/4), because the miter limit is always 4. +#define MITER_ANGLE_LIMIT float(1.318116071652817965746) + +// Raw bit representation of the largest denormalized fp16 value. We offset all (1-based) path IDs +// by this value in order to avoid denorms, which have been empirically unreliable on Android as ID +// values. +#define MAX_DENORM_F16 1023u + +// Blend modes. Mirrors rive::BlendMode, but 0-based and contiguous for tighter packing. +#define BLEND_SRC_OVER 0u +#define BLEND_MODE_SCREEN 1u +#define BLEND_MODE_OVERLAY 2u +#define BLEND_MODE_DARKEN 3u +#define BLEND_MODE_LIGHTEN 4u +#define BLEND_MODE_COLORDODGE 5u +#define BLEND_MODE_COLORBURN 6u +#define BLEND_MODE_HARDLIGHT 7u +#define BLEND_MODE_SOFTLIGHT 8u +#define BLEND_MODE_DIFFERENCE 9u +#define BLEND_MODE_EXCLUSION 10u +#define BLEND_MODE_MULTIPLY 11u +#define BLEND_MODE_HUE 12u +#define BLEND_MODE_SATURATION 13u +#define BLEND_MODE_COLOR 14u +#define BLEND_MODE_LUMINOSITY 15u + +// Fixed-point coverage values for the experimental atomic mode. +// Atomic mode uses 7:9 fixed point, so the winding number breaks if a shape has more than 64 +// levels of self overlap in either winding direction at any point. +#define FIXED_COVERAGE_FACTOR float(512) +#define FIXED_COVERAGE_INVERSE_FACTOR float(0.001953125) +#define FIXED_COVERAGE_ZERO float(1 << 15) +#define FIXED_COVERAGE_ONE (FIXED_COVERAGE_FACTOR + FIXED_COVERAGE_ZERO) + +// Binding points for storage buffers. +#define PAINT_STORAGE_BUFFER_IDX 8 +#define PAINT_MATRIX_STORAGE_BUFFER_IDX 9 +#define PAINT_TRANSLATE_STORAGE_BUFFER_IDX 10 +#define CLIPRECT_MATRIX_STORAGE_BUFFER_IDX 11 +#define CLIPRECT_TRANSLATE_STORAGE_BUFFER_IDX 12 + +// Indices for SPIRV specialization constants (used in lieu of #defines in Vulkan.) +#define CLIPPING_SPECIALIZATION_IDX 0 +#define CLIP_RECT_SPECIALIZATION_IDX 1 +#define ADVANCED_BLEND_SPECIALIZATION_IDX 2 +#define EVEN_ODD_SPECIALIZATION_IDX 3 +#define NESTED_CLIPPING_SPECIALIZATION_IDX 4 +#define HSL_BLEND_MODES_SPECIALIZATION_IDX 5 +#define SPECIALIZATION_COUNT 6 diff --git a/Shaders/Private/Rive/Generated/draw_image_mesh.minified.ush b/Shaders/Private/Rive/Generated/draw_image_mesh.minified.ush new file mode 100644 index 00000000..0cc8d8ab --- /dev/null +++ b/Shaders/Private/Rive/Generated/draw_image_mesh.minified.ush @@ -0,0 +1,193 @@ +/* + * Copyright 2023 Rive + */ + +#ifdef VERTEX +ATTR_BLOCK_BEGIN(PositionAttr) +ATTR(0, float2, _EXPORTED_a_position); +ATTR_BLOCK_END + +ATTR_BLOCK_BEGIN(UVAttr) +ATTR(1, float2, _EXPORTED_a_texCoord); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float2, v_texCoord); +#ifdef ENABLE_CLIPPING +OPTIONALLY_FLAT VARYING(1, half, v_clipID); +#endif +#ifdef ENABLE_CLIP_RECT +NO_PERSPECTIVE VARYING(2, float4, v_clipRect); +#endif +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +IMAGE_MESH_VERTEX_MAIN(_EXPORTED_drawVertexMain, PositionAttr, position, UVAttr, uv, _vertexID) +{ + ATTR_UNPACK(_vertexID, position, _EXPORTED_a_position, float2); + ATTR_UNPACK(_vertexID, uv, _EXPORTED_a_texCoord, float2); + + VARYING_INIT(v_texCoord, float2); +#ifdef ENABLE_CLIPPING + VARYING_INIT(v_clipID, half); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_INIT(v_clipRect, float4); +#endif + + float2 vertexPosition = + MUL(make_float2x2(imageDrawUniforms.viewMatrix), _EXPORTED_a_position) + imageDrawUniforms.translate; + v_texCoord = _EXPORTED_a_texCoord; +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + v_clipID = id_bits_to_f16(imageDrawUniforms.clipID, uniforms.pathIDGranularity); + } +#endif +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { +#ifndef USING_DEPTH_STENCIL + v_clipRect = find_clip_rect_coverage_distances( + make_float2x2(imageDrawUniforms.clipRectInverseMatrix), + imageDrawUniforms.clipRectInverseTranslate, + vertexPosition); +#else // USING_DEPTH_STENCIL + set_clip_rect_plane_distances(make_float2x2(imageDrawUniforms.clipRectInverseMatrix), + imageDrawUniforms.clipRectInverseTranslate, + vertexPosition); +#endif // USING_DEPTH_STENCIL + } +#endif // ENABLE_CLIP_RECT + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); +#ifdef USING_DEPTH_STENCIL + pos.z = normalize_z_index(imageDrawUniforms.zIndex); +#endif + + VARYING_PACK(v_texCoord); +#ifdef ENABLE_CLIPPING + VARYING_PACK(v_clipID); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_PACK(v_clipRect); +#endif + EMIT_VERTEX(pos); +} +#endif + +#ifdef FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA8(PER_DRAW_BINDINGS_SET, IMAGE_TEXTURE_IDX, _EXPORTED_imageTexture); +#ifdef USING_DEPTH_STENCIL +#ifdef ENABLE_ADVANCED_BLEND +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, DST_COLOR_TEXTURE_IDX, _EXPORTED_dstColorTexture); +#endif +#endif +FRAG_TEXTURE_BLOCK_END + +SAMPLER_MIPMAP(IMAGE_TEXTURE_IDX, imageSampler) + +FRAG_STORAGE_BUFFER_BLOCK_BEGIN +FRAG_STORAGE_BUFFER_BLOCK_END + +#ifndef USING_DEPTH_STENCIL + +PLS_BLOCK_BEGIN +PLS_DECL4F(COLOR_PLANE_IDX, colorBuffer); +#if defined(ENABLE_CLIPPING) || defined(PLS_IMPL_ANGLE) +PLS_DECLUI(CLIP_PLANE_IDX, clipBuffer); +#endif +PLS_DECL4F(SCRATCH_COLOR_PLANE_IDX, scratchColorBuffer); +PLS_DECLUI(COVERAGE_PLANE_IDX, coverageCountBuffer); +PLS_BLOCK_END + +PLS_MAIN_WITH_IMAGE_UNIFORMS(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_texCoord, float2); +#ifdef ENABLE_CLIPPING + VARYING_UNPACK(v_clipID, half); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_UNPACK(v_clipRect, float4); +#endif + + half4 color = TEXTURE_SAMPLE(_EXPORTED_imageTexture, imageSampler, v_texCoord); + half coverage = 1.; + +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { + half clipRectCoverage = min_value(cast_float4_to_half4(v_clipRect)); + coverage = clamp(clipRectCoverage, make_half(.0), coverage); + } +#endif + + PLS_INTERLOCK_BEGIN; + +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING && v_clipID != .0) + { + half2 clipData = unpackHalf2x16(PLS_LOADUI(clipBuffer)); + half clipContentID = clipData.y; + half clipCoverage = clipContentID == v_clipID ? clipData.x : make_half(.0); + coverage = min(coverage, clipCoverage); + } +#endif + + // Blend with the framebuffer color. + color.w *= imageDrawUniforms.opacity * coverage; + half4 dstColor = PLS_LOAD4F(colorBuffer); +#ifdef ENABLE_ADVANCED_BLEND + if (ENABLE_ADVANCED_BLEND && imageDrawUniforms.blendMode != BLEND_SRC_OVER) + { + color = advanced_blend(color, + unmultiply(dstColor), + cast_uint_to_ushort(imageDrawUniforms.blendMode)); + } + else +#endif + { + color.xyz *= color.w; + color = color + dstColor * (1. - color.w); + } + + PLS_STORE4F(colorBuffer, color); +#ifdef ENABLE_CLIPPING + PLS_PRESERVE_UI(clipBuffer); +#endif + + PLS_INTERLOCK_END; + + EMIT_PLS; +} + +#else // USING_DEPTH_STENCIL + +FRAG_DATA_MAIN(half4, _EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_texCoord, float2); + + half4 color = TEXTURE_SAMPLE(_EXPORTED_imageTexture, imageSampler, v_texCoord); + color.w *= imageDrawUniforms.opacity; + +#ifdef ENABLE_ADVANCED_BLEND + if (ENABLE_ADVANCED_BLEND) + { + half4 dstColor = TEXEL_FETCH(_EXPORTED_dstColorTexture, int2(floor(_fragCoord.xy))); + color = advanced_blend(color, unmultiply(dstColor), imageDrawUniforms.blendMode); + } + else +#endif // !ENABLE_ADVANCED_BLEND + { + color = premultiply(color); + } + + EMIT_FRAG_DATA(color); +} + +#endif // USING_DEPTH_STENCIL +#endif // FRAGMENT diff --git a/Shaders/Private/Rive/Generated/draw_path.minified.ush b/Shaders/Private/Rive/Generated/draw_path.minified.ush new file mode 100644 index 00000000..60e29339 --- /dev/null +++ b/Shaders/Private/Rive/Generated/draw_path.minified.ush @@ -0,0 +1,530 @@ +/* + * Copyright 2022 Rive + */ + +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +#ifdef DRAW_INTERIOR_TRIANGLES +ATTR(0, packed_float3, _EXPORTED_a_triangleVertex); +#else +ATTR(0, float4, _EXPORTED_a_patchVertexData); // [localVertexID, outset, fillCoverage, vertexType] +ATTR(1, float4, _EXPORTED_a_mirroredVertexData); +#endif +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float4, v_paint); +#ifndef USING_DEPTH_STENCIL +#ifdef DRAW_INTERIOR_TRIANGLES +OPTIONALLY_FLAT VARYING(1, half, v_windingWeight); +#else +NO_PERSPECTIVE VARYING(2, half2, v_edgeDistance); +#endif +OPTIONALLY_FLAT VARYING(3, half, v_pathID); +#ifdef ENABLE_CLIPPING +OPTIONALLY_FLAT VARYING(4, half, v_clipID); +#endif +#ifdef ENABLE_CLIP_RECT +NO_PERSPECTIVE VARYING(5, float4, v_clipRect); +#endif +#endif // !USING_DEPTH_STENCIL +#ifdef ENABLE_ADVANCED_BLEND +OPTIONALLY_FLAT VARYING(6, half, v_blendMode); +#endif +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ +#ifdef DRAW_INTERIOR_TRIANGLES + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_triangleVertex, float3); +#else + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_patchVertexData, float4); + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_mirroredVertexData, float4); +#endif + + VARYING_INIT(v_paint, float4); +#ifndef USING_DEPTH_STENCIL +#ifdef DRAW_INTERIOR_TRIANGLES + VARYING_INIT(v_windingWeight, half); +#else + VARYING_INIT(v_edgeDistance, half2); +#endif + VARYING_INIT(v_pathID, half); +#ifdef ENABLE_CLIPPING + VARYING_INIT(v_clipID, half); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_INIT(v_clipRect, float4); +#endif +#endif // !USING_DEPTH_STENCIL +#ifdef ENABLE_ADVANCED_BLEND + VARYING_INIT(v_blendMode, half); +#endif + + bool shouldDiscardVertex = false; + ushort pathID; + float2 vertexPosition; +#ifdef USING_DEPTH_STENCIL + ushort pathZIndex; +#endif + +#ifdef DRAW_INTERIOR_TRIANGLES + vertexPosition = unpack_interior_triangle_vertex(_EXPORTED_a_triangleVertex, + pathID, + v_windingWeight VERTEX_CONTEXT_UNPACK); +#else + shouldDiscardVertex = !unpack_tessellated_path_vertex(_EXPORTED_a_patchVertexData, + _EXPORTED_a_mirroredVertexData, + _instanceID, + pathID, + vertexPosition +#ifndef USING_DEPTH_STENCIL + , + v_edgeDistance +#else + , + pathZIndex +#endif + VERTEX_CONTEXT_UNPACK); +#endif // !DRAW_INTERIOR_TRIANGLES + + uint2 paintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, pathID); + +#ifndef USING_DEPTH_STENCIL + // Encode the integral pathID as a "half" that we know the hardware will see as a unique value + // in the fragment shader. + v_pathID = id_bits_to_f16(pathID, uniforms.pathIDGranularity); + + // Indicate even-odd fill rule by making pathID negative. + if ((paintData.x & PAINT_FLAG_EVEN_ODD) != 0u) + v_pathID = -v_pathID; +#endif // !USING_DEPTH_STENCIL + + uint paintType = paintData.x & 0xfu; +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + uint clipIDBits = (paintType == CLIP_UPDATE_PAINT_TYPE ? paintData.y : paintData.x) >> 16; + v_clipID = id_bits_to_f16(clipIDBits, uniforms.pathIDGranularity); + // Negative clipID means to update the clip buffer instead of the color buffer. + if (paintType == CLIP_UPDATE_PAINT_TYPE) + v_clipID = -v_clipID; + } +#endif +#ifdef ENABLE_ADVANCED_BLEND + if (ENABLE_ADVANCED_BLEND) + { + v_blendMode = float((paintData.x >> 4) & 0xfu); + } +#endif + + // Paint matrices operate on the fragment shader's "_fragCoord", which is bottom-up in GL. + float2 fragCoord = vertexPosition; +#ifdef FRAG_COORD_BOTTOM_UP + fragCoord.y = float(uniforms.renderTargetHeight) - fragCoord.y; +#endif + +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { + // clipRectInverseMatrix transforms from pixel coordinates to a space where the clipRect is + // the normalized rectangle: [-1, -1, 1, 1]. + float2x2 clipRectInverseMatrix = + make_float2x2(STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 2u)); + float4 clipRectInverseTranslate = STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 3u); +#ifndef USING_DEPTH_STENCIL + v_clipRect = find_clip_rect_coverage_distances(clipRectInverseMatrix, + clipRectInverseTranslate.xy, + fragCoord); +#else // USING_DEPTH_STENCIL + set_clip_rect_plane_distances(clipRectInverseMatrix, + clipRectInverseTranslate.xy, + fragCoord); +#endif // USING_DEPTH_STENCIL + } +#endif // ENABLE_CLIP_RECT + + // Unpack the paint once we have a position. + if (paintType == SOLID_COLOR_PAINT_TYPE) + { + half4 color = unpackUnorm4x8(paintData.y); + v_paint = float4(color); + } +#ifdef ENABLE_CLIPPING + else if (ENABLE_CLIPPING && paintType == CLIP_UPDATE_PAINT_TYPE) + { + half outerClipID = id_bits_to_f16(paintData.x >> 16, uniforms.pathIDGranularity); + v_paint = float4(outerClipID, 0, 0, 0); + } +#endif + else + { + float2x2 paintMatrix = make_float2x2(STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u)); + float4 paintTranslate = STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 1u); + float2 paintCoord = MUL(paintMatrix, fragCoord) + paintTranslate.xy; + if (paintType == LINEAR_GRADIENT_PAINT_TYPE || paintType == RADIAL_GRADIENT_PAINT_TYPE) + { + // v_paint.a contains "-row" of the gradient ramp at texel center, in normalized space. + v_paint.w = -uintBitsToFloat(paintData.y); + // abs(v_paint.b) contains either: + // - 2 if the gradient ramp spans an entire row. + // - x0 of the gradient ramp in normalized space, if it's a simple 2-texel ramp. + if (paintTranslate.z > .9) // paintTranslate.z is either ~1 or ~1/GRAD_TEXTURE_WIDTH. + { + // Complex ramps span an entire row. Set it to 2 to convey this. + v_paint.z = 2.; + } + else + { + // This is a simple ramp. + v_paint.z = paintTranslate.w; + } + if (paintType == LINEAR_GRADIENT_PAINT_TYPE) + { + // The paint is a linear gradient. + v_paint.y = .0; + v_paint.x = paintCoord.x; + } + else + { + // The paint is a radial gradient. Mark v_paint.b negative to indicate this to the + // fragment shader. (v_paint.b can't be zero because the gradient ramp is aligned on + // pixel centers, so negating it will always produce a negative number.) + v_paint.z = -v_paint.z; + v_paint.xy = paintCoord.xy; + } + } + else // IMAGE_PAINT_TYPE + { + // v_paint.a <= -1. signals that the paint is an image. + // v_paint.b is the image opacity. + // v_paint.rg is the normalized image texture coordinate (built into the paintMatrix). + float opacity = uintBitsToFloat(paintData.y); + v_paint = float4(paintCoord.x, paintCoord.y, opacity, -2.); + } + } + + float4 pos; + if (!shouldDiscardVertex) + { + pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); +#ifdef USING_DEPTH_STENCIL + pos.z = normalize_z_index(pathZIndex); +#endif + } + else + { + pos = float4(uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue); + } + + VARYING_PACK(v_paint); +#ifndef USING_DEPTH_STENCIL +#ifdef DRAW_INTERIOR_TRIANGLES + VARYING_PACK(v_windingWeight); +#else + VARYING_PACK(v_edgeDistance); +#endif + VARYING_PACK(v_pathID); +#ifdef ENABLE_CLIPPING + VARYING_PACK(v_clipID); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_PACK(v_clipRect); +#endif +#endif // !USING_DEPTH_STENCIL +#ifdef ENABLE_ADVANCED_BLEND + VARYING_PACK(v_blendMode); +#endif + EMIT_VERTEX(pos); +} +#endif + +#ifdef FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, GRAD_TEXTURE_IDX, _EXPORTED_gradTexture); +TEXTURE_RGBA8(PER_DRAW_BINDINGS_SET, IMAGE_TEXTURE_IDX, _EXPORTED_imageTexture); +#ifdef USING_DEPTH_STENCIL +#ifdef ENABLE_ADVANCED_BLEND +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, DST_COLOR_TEXTURE_IDX, _EXPORTED_dstColorTexture); +#endif +#endif +FRAG_TEXTURE_BLOCK_END + +SAMPLER_LINEAR(GRAD_TEXTURE_IDX, gradSampler) +SAMPLER_MIPMAP(IMAGE_TEXTURE_IDX, imageSampler) + +FRAG_STORAGE_BUFFER_BLOCK_BEGIN +FRAG_STORAGE_BUFFER_BLOCK_END + +INLINE half4 find_paint_color(float4 paint +#ifdef TARGET_VULKAN + , + float2 imagePaintDDX, + float2 imagePaintDDY +#endif + FRAGMENT_CONTEXT_DECL) +{ + if (paint.w >= .0) // Is the paint a solid color? + { + return cast_float4_to_half4(paint); + } + else if (paint.w > -1.) // Is paint is a gradient (linear or radial)? + { + float t = paint.z > .0 ? /*linear*/ paint.x : /*radial*/ length(paint.xy); + t = clamp(t, .0, 1.); + float span = abs(paint.z); + float x = span > 1. ? /*entire row*/ (1. - 1. / GRAD_TEXTURE_WIDTH) * t + + (.5 / GRAD_TEXTURE_WIDTH) + : /*two texels*/ (1. / GRAD_TEXTURE_WIDTH) * t + span; + float row = -paint.w; + // Our gradient texture is not mipmapped. Issue a texture-sample that explicitly does not + // find derivatives for LOD computation (by specifying derivatives directly). + return TEXTURE_SAMPLE_LOD(_EXPORTED_gradTexture, gradSampler, float2(x, row), .0); + } + else // The paint is an image. + { + half4 color; +#ifdef TARGET_VULKAN + // Vulkan validators require explicit derivatives when sampling a texture in + // "non-uniform" control flow. See above. + color = TEXTURE_SAMPLE_GRAD(_EXPORTED_imageTexture, + imageSampler, + paint.xy, + imagePaintDDX, + imagePaintDDY); +#else + color = TEXTURE_SAMPLE(_EXPORTED_imageTexture, imageSampler, paint.xy); +#endif + color.w *= paint.z; // paint.b holds the opacity of the image. + return color; + } +} + +#ifndef USING_DEPTH_STENCIL + +PLS_BLOCK_BEGIN +PLS_DECL4F(COLOR_PLANE_IDX, colorBuffer); +#if defined(ENABLE_CLIPPING) || defined(PLS_IMPL_ANGLE) +PLS_DECLUI(CLIP_PLANE_IDX, clipBuffer); +#endif +PLS_DECL4F(SCRATCH_COLOR_PLANE_IDX, scratchColorBuffer); +PLS_DECLUI(COVERAGE_PLANE_IDX, coverageCountBuffer); +PLS_BLOCK_END + +PLS_MAIN(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_paint, float4); +#ifdef DRAW_INTERIOR_TRIANGLES + VARYING_UNPACK(v_windingWeight, half); +#else + VARYING_UNPACK(v_edgeDistance, half2); +#endif + VARYING_UNPACK(v_pathID, half); +#ifdef ENABLE_CLIPPING + VARYING_UNPACK(v_clipID, half); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_UNPACK(v_clipRect, float4); +#endif +#ifdef ENABLE_ADVANCED_BLEND + VARYING_UNPACK(v_blendMode, half); +#endif + +#ifdef TARGET_VULKAN + // Strict validators require derivatives (i.e., for a mipmapped texture sample) to be computed + // within uniform control flow. + // Our control flow for texture sampling is uniform for an entire triangle, so we're fine, but + // the validators don't know this. + // If this might be a problem (e.g., for WebGPU), just find the potential image paint + // derivatives here. + float2 imagePaintDDX = dFdx(v_paint.xy); + float2 imagePaintDDY = dFdy(v_paint.xy); +#endif + +#ifndef DRAW_INTERIOR_TRIANGLES + // Interior triangles don't overlap, so don't need raster ordering. + PLS_INTERLOCK_BEGIN; +#endif + + half2 coverageData = unpackHalf2x16(PLS_LOADUI(coverageCountBuffer)); + half coverageBufferID = coverageData.y; + half coverageCount = coverageBufferID == v_pathID ? coverageData.x : make_half(.0); + +#ifdef DRAW_INTERIOR_TRIANGLES + coverageCount += v_windingWeight; +#else + if (v_edgeDistance.y >= .0) // Stroke. + coverageCount = max(min(v_edgeDistance.x, v_edgeDistance.y), coverageCount); + else // Fill. (Back-face culling ensures v_edgeDistance.x is appropriately signed.) + coverageCount += v_edgeDistance.x; + + // Save the updated coverage. + PLS_STOREUI(coverageCountBuffer, packHalf2x16(make_half2(coverageCount, v_pathID))); +#endif + + // Convert coverageCount to coverage. + half coverage = abs(coverageCount); +#ifdef ENABLE_EVEN_ODD + if (ENABLE_EVEN_ODD && v_pathID < .0 /*even-odd*/) + { + coverage = 1. - make_half(abs(fract(coverage * .5) * 2. + -1.)); + } +#endif + coverage = min(coverage, make_half(1.)); // This also caps stroke coverage, which can be >1. + +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING && v_clipID < .0) // Update the clip buffer. + { + half clipID = -v_clipID; +#ifdef ENABLE_NESTED_CLIPPING + if (ENABLE_NESTED_CLIPPING) + { + half outerClipID = v_paint.x; + if (outerClipID != .0) + { + // This is a nested clip. Intersect coverage with the enclosing clip (outerClipID). + half2 clipData = unpackHalf2x16(PLS_LOADUI(clipBuffer)); + half clipContentID = clipData.y; + half outerClipCoverage; + if (clipContentID != clipID) + { + // First hit: either clipBuffer contains outerClipCoverage, or this pixel is not + // inside the outer clip and outerClipCoverage is zero. + outerClipCoverage = clipContentID == outerClipID ? clipData.x : .0; +#ifndef DRAW_INTERIOR_TRIANGLES + // Stash outerClipCoverage before overwriting clipBuffer, in case we hit this + // pixel again and need it. (Not necessary when drawing interior triangles + // because they always go last and don't overlap.) + PLS_STORE4F(scratchColorBuffer, make_half4(outerClipCoverage, .0, .0, .0)); +#endif + } + else + { + // Subsequent hit: outerClipCoverage is stashed in scratchColorBuffer. + outerClipCoverage = PLS_LOAD4F(scratchColorBuffer).x; +#ifndef DRAW_INTERIOR_TRIANGLES + // Since interior triangles are always last, there's no need to preserve this + // value. + PLS_PRESERVE_4F(scratchColorBuffer); +#endif + } + coverage = min(coverage, outerClipCoverage); + } + } +#endif // @ENABLE_NESTED_CLIPPING + PLS_STOREUI(clipBuffer, packHalf2x16(make_half2(coverage, clipID))); + PLS_PRESERVE_4F(colorBuffer); + } + else // Render to the main framebuffer. +#endif // @ENABLE_CLIPPING + { +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + // Apply the clip. + if (v_clipID != .0) + { + // Clip IDs are not necessarily drawn in monotonically increasing order, so always + // check exact equality of the clipID. + half2 clipData = unpackHalf2x16(PLS_LOADUI(clipBuffer)); + half clipContentID = clipData.y; + half clipCoverage = clipContentID == v_clipID ? clipData.x : make_half(.0); + coverage = min(coverage, clipCoverage); + } + PLS_PRESERVE_UI(clipBuffer); + } +#endif +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { + half clipRectCoverage = min_value(cast_float4_to_half4(v_clipRect)); + coverage = clamp(clipRectCoverage, make_half(.0), coverage); + } +#endif // ENABLE_CLIP_RECT + + half4 color = find_paint_color(v_paint +#ifdef TARGET_VULKAN + , + imagePaintDDX, + imagePaintDDY +#endif + FRAGMENT_CONTEXT_UNPACK); + color.w *= coverage; + + half4 dstColor; + if (coverageBufferID != v_pathID) + { + // This is the first fragment from pathID to touch this pixel. + dstColor = PLS_LOAD4F(colorBuffer); +#ifndef DRAW_INTERIOR_TRIANGLES + // We don't need to store coverage when drawing interior triangles because they always + // go last and don't overlap, so every fragment is the final one in the path. + PLS_STORE4F(scratchColorBuffer, dstColor); +#endif + } + else + { + dstColor = PLS_LOAD4F(scratchColorBuffer); +#ifndef DRAW_INTERIOR_TRIANGLES + // Since interior triangles are always last, there's no need to preserve this value. + PLS_PRESERVE_4F(scratchColorBuffer); +#endif + } + + // Blend with the framebuffer color. +#ifdef ENABLE_ADVANCED_BLEND + if (ENABLE_ADVANCED_BLEND && v_blendMode != cast_uint_to_half(BLEND_SRC_OVER)) + { + color = advanced_blend(color, unmultiply(dstColor), cast_half_to_ushort(v_blendMode)); + } + else +#endif + { + color.xyz *= color.w; + color = color + dstColor * (1. - color.w); + } + + PLS_STORE4F(colorBuffer, color); + } + +#ifndef DRAW_INTERIOR_TRIANGLES + // Interior triangles don't overlap, so don't need raster ordering. + PLS_INTERLOCK_END; +#endif + + EMIT_PLS; +} + +#else // USING_DEPTH_STENCIL + +FRAG_DATA_MAIN(half4, _EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_paint, float4); +#ifdef ENABLE_ADVANCED_BLEND + VARYING_UNPACK(v_blendMode, half); +#endif + + half4 color = find_paint_color(v_paint); + +#ifdef ENABLE_ADVANCED_BLEND + if (ENABLE_ADVANCED_BLEND) + { + half4 dstColor = TEXEL_FETCH(_EXPORTED_dstColorTexture, int2(floor(_fragCoord.xy))); + color = advanced_blend(color, unmultiply(dstColor), cast_half_to_ushort(v_blendMode)); + } + else +#endif // !ENABLE_ADVANCED_BLEND + { + color = premultiply(color); + } + EMIT_FRAG_DATA(color); +} + +#endif // !USING_DEPTH_STENCIL + +#endif // FRAGMENT diff --git a/Shaders/Private/Rive/Generated/draw_path_common.minified.ush b/Shaders/Private/Rive/Generated/draw_path_common.minified.ush new file mode 100644 index 00000000..52991645 --- /dev/null +++ b/Shaders/Private/Rive/Generated/draw_path_common.minified.ush @@ -0,0 +1,301 @@ +/* + * Copyright 2023 Rive + */ + +// Common functions shared by draw shaders. + +#ifdef VERTEX + +VERTEX_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA32UI(PER_FLUSH_BINDINGS_SET, TESS_VERTEX_TEXTURE_IDX, _EXPORTED_tessVertexTexture); +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +STORAGE_BUFFER_U32x4(PATH_BUFFER_IDX, PathBuffer, _EXPORTED_pathBuffer); +STORAGE_BUFFER_U32x2(PAINT_BUFFER_IDX, PaintBuffer, _EXPORTED_paintBuffer); +STORAGE_BUFFER_F32x4(PAINT_AUX_BUFFER_IDX, PaintAuxBuffer, _EXPORTED_paintAuxBuffer); +STORAGE_BUFFER_U32x4(CONTOUR_BUFFER_IDX, ContourBuffer, _EXPORTED_contourBuffer); +VERTEX_STORAGE_BUFFER_BLOCK_END + +#ifdef DRAW_PATH +INLINE int2 tess_texel_coord(int texelIndex) +{ + return int2(texelIndex & ((1 << TESS_TEXTURE_WIDTH_LOG2) - 1), + texelIndex >> TESS_TEXTURE_WIDTH_LOG2); +} + +INLINE float manhattan_pixel_width(float2x2 M, float2 normalized) +{ + + float2 v = MUL(M, normalized); + return (abs(v.x) + abs(v.y)) * (1. / dot(v, v)); +} + +INLINE bool unpack_tessellated_path_vertex(float4 patchVertexData, + float4 mirroredVertexData, + int _instanceID, + OUT(ushort) o_pathID, + OUT(float2) o_vertexPosition +#ifndef USING_DEPTH_STENCIL + , + OUT(half2) o_edgeDistance +#else + , + OUT(ushort) o_pathZIndex +#endif + VERTEX_CONTEXT_DECL) +{ + // Unpack patchVertexData. + int localVertexID = int(patchVertexData.x); + float outset = patchVertexData.y; + float fillCoverage = patchVertexData.z; + int patchSegmentSpan = floatBitsToInt(patchVertexData.w) >> 2; + int vertexType = floatBitsToInt(patchVertexData.w) & 3; + + // Fetch a vertex that definitely belongs to the contour we're drawing. + int vertexIDOnContour = min(localVertexID, patchSegmentSpan - 1); + int tessVertexIdx = _instanceID * patchSegmentSpan + vertexIDOnContour; + uint4 tessVertexData = TEXEL_FETCH(_EXPORTED_tessVertexTexture, tess_texel_coord(tessVertexIdx)); + uint contourIDWithFlags = tessVertexData.w; + + // Fetch and unpack the contour referenced by the tessellation vertex. + uint4 contourData = STORAGE_BUFFER_LOAD4(_EXPORTED_contourBuffer, contour_data_idx(contourIDWithFlags)); + float2 midpoint = uintBitsToFloat(contourData.xy); + o_pathID = cast_uint_to_ushort(contourData.z & 0xffffu); + uint vertexIndex0 = contourData.w; + + // Fetch and unpack the path. + float2x2 M = make_float2x2(uintBitsToFloat(STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, o_pathID * 2u))); + uint4 pathData = STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, o_pathID * 2u + 1u); + float2 translate = uintBitsToFloat(pathData.xy); + + float strokeRadius = uintBitsToFloat(pathData.z); +#ifdef USING_DEPTH_STENCIL + o_pathZIndex = cast_uint_to_ushort(pathData.w); +#endif + + // Fix the tessellation vertex if we fetched the wrong one in order to guarantee we got the + // correct contour ID and flags, or if we belong to a mirrored contour and this vertex has an + // alternate position when mirrored. + uint mirroredContourFlag = contourIDWithFlags & MIRRORED_CONTOUR_CONTOUR_FLAG; + if (mirroredContourFlag != 0u) + { + localVertexID = int(mirroredVertexData.x); + outset = mirroredVertexData.y; + fillCoverage = mirroredVertexData.z; + } + if (localVertexID != vertexIDOnContour) + { + // This can peek one vertex before or after the contour, but the tessellator guarantees + // there is always at least one padding vertex at the beginning and end of the data. + tessVertexIdx += localVertexID - vertexIDOnContour; + uint4 replacementTessVertexData = + TEXEL_FETCH(_EXPORTED_tessVertexTexture, tess_texel_coord(tessVertexIdx)); + if ((replacementTessVertexData.w & 0xffffu) != (contourIDWithFlags & 0xffffu)) + { + // We crossed over into a new contour. Either wrap to the first vertex in the contour or + // leave it clamped at the final vertex of the contour. + bool isClosed = strokeRadius == .0 || // filled + midpoint.x != .0; // explicity closed stroke + if (isClosed) + { + tessVertexData = + TEXEL_FETCH(_EXPORTED_tessVertexTexture, tess_texel_coord(int(vertexIndex0))); + } + } + else + { + tessVertexData = replacementTessVertexData; + } + // MIRRORED_CONTOUR_CONTOUR_FLAG is not preserved at vertexIndex0. Preserve it here. By not + // preserving this flag, the normal and mirrored contour can both share the same contour + // record. + contourIDWithFlags = tessVertexData.w | mirroredContourFlag; + } + + // Finish unpacking tessVertexData. + float theta = uintBitsToFloat(tessVertexData.z); + float2 norm = float2(sin(theta), -cos(theta)); + float2 origin = uintBitsToFloat(tessVertexData.xy); + float2 postTransformVertexOffset; + + if (strokeRadius != .0) // Is this a stroke? + { + // Ensure strokes always emit clockwise triangles. + outset *= sign(determinant(M)); + + // Joins only emanate from the outer side of the stroke. + if ((contourIDWithFlags & LEFT_JOIN_CONTOUR_FLAG) != 0u) + outset = min(outset, .0); + if ((contourIDWithFlags & RIGHT_JOIN_CONTOUR_FLAG) != 0u) + outset = max(outset, .0); + + float aaRadius = manhattan_pixel_width(M, norm) * AA_RADIUS; + half globalCoverage = 1.; + if (aaRadius > strokeRadius) + { + // The stroke is narrower than the AA ramp. Instead of emitting subpixel geometry, make + // the stroke as wide as the AA ramp and apply a global coverage multiplier. + globalCoverage = cast_float_to_half(strokeRadius) / cast_float_to_half(aaRadius); + strokeRadius = aaRadius; + } + + // Extend the vertex by half the width of the AA ramp. + float2 vertexOffset = MUL(norm, strokeRadius + aaRadius); // Bloat stroke width for AA. + +#ifndef USING_DEPTH_STENCIL + // Calculate the AA distance to both the outset and inset edges of the stroke. The fragment + // shader will use whichever is lesser. + float x = outset * (strokeRadius + aaRadius); + o_edgeDistance = + cast_float2_to_half2((1. / (aaRadius * 2.)) * (float2(x, -x) + strokeRadius) + .5); +#endif + + uint joinType = contourIDWithFlags & JOIN_TYPE_MASK; + if (joinType != 0u) + { + // This vertex belongs to a miter or bevel join. Begin by finding the bisector, which is + // the same as the miter line. The first two vertices in the join peek forward to figure + // out the bisector, and the final two peek backward. + int peekDir = 2; + if ((contourIDWithFlags & JOIN_TANGENT_0_CONTOUR_FLAG) == 0u) + peekDir = -peekDir; + if ((contourIDWithFlags & MIRRORED_CONTOUR_CONTOUR_FLAG) != 0u) + peekDir = -peekDir; + int2 otherJoinTexelCoord = tess_texel_coord(tessVertexIdx + peekDir); + uint4 otherJoinData = TEXEL_FETCH(_EXPORTED_tessVertexTexture, otherJoinTexelCoord); + float otherJoinTheta = uintBitsToFloat(otherJoinData.z); + float joinAngle = abs(otherJoinTheta - theta); + if (joinAngle > PI) + joinAngle = 2. * PI - joinAngle; + bool isTan0 = (contourIDWithFlags & JOIN_TANGENT_0_CONTOUR_FLAG) != 0u; + bool isLeftJoin = (contourIDWithFlags & LEFT_JOIN_CONTOUR_FLAG) != 0u; + float bisectTheta = joinAngle * (isTan0 == isLeftJoin ? -.5 : .5) + theta; + float2 bisector = float2(sin(bisectTheta), -cos(bisectTheta)); + float bisectPixelWidth = manhattan_pixel_width(M, bisector); + + // Generalize everything to a "miter-clip", which is proposed in the SVG-2 draft. Bevel + // joins are converted to miter-clip joins with a miter limit of 1/2 pixel. They + // technically bleed out 1/2 pixel when drawn this way, but they seem to look fine and + // there is not an obvious solution to antialias them without an ink bleed. + float miterRatio = cos(joinAngle * .5); + float clipRadius; + if ((joinType == MITER_CLIP_JOIN_CONTOUR_FLAG) || + (joinType == MITER_REVERT_JOIN_CONTOUR_FLAG && miterRatio >= .25)) + { + // Miter! (Or square cap.) + // We currently use hard coded miter limits: + // * 1 for square caps being emulated as miter-clip joins. + // * 4, which is the SVG default, for all other miter joins. + float miterInverseLimit = + (contourIDWithFlags & EMULATED_STROKE_CAP_CONTOUR_FLAG) != 0u ? 1. : .25; + clipRadius = strokeRadius * (1. / max(miterRatio, miterInverseLimit)); + } + else + { + // Bevel! (Or butt cap.) + clipRadius = strokeRadius * miterRatio + /* 1/2px bleed! */ bisectPixelWidth * .5; + } + float clipAARadius = clipRadius + bisectPixelWidth * AA_RADIUS; + if ((contourIDWithFlags & JOIN_TANGENT_INNER_CONTOUR_FLAG) != 0u) + { + // Reposition the inner join vertices at the miter-clip positions. Leave the outer + // join vertices as duplicates on the surrounding curve endpoints. We emit duplicate + // vertex positions because we need a hard stop on the clip distance (see below). + // + // Use aaRadius here because we're tracking AA on the mitered edge, NOT the outer + // clip edge. + float strokeAARaidus = strokeRadius + aaRadius; + // clipAARadius must be 1/16 of an AA ramp (~1/16 pixel) longer than the miter + // length before we start clipping, to ensure we are solving for a numerically + // stable intersection. + float slop = aaRadius * .125; + if (strokeAARaidus <= clipAARadius * miterRatio + slop) + { + // The miter point is before the clip line. Extend out to the miter point. + float miterAARadius = strokeAARaidus * (1. / miterRatio); + vertexOffset = bisector * miterAARadius; + } + else + { + // The clip line is before the miter point. Find where the clip line and the + // mitered edge intersect. + float2 bisectAAOffset = bisector * clipAARadius; + float2 k = float2(dot(vertexOffset, vertexOffset), + dot(bisectAAOffset, bisectAAOffset)); + vertexOffset = MUL(k, inverse(float2x2(vertexOffset, bisectAAOffset))); + } + } + // The clip distance tells us how to antialias the outer clipped edge. Since joins only + // emanate from the outset side of the stroke, we can repurpose the inset distance as + // the clip distance. + float2 pt = abs(outset) * vertexOffset; + float clipDistance = + (clipAARadius - dot(pt, bisector)) / (bisectPixelWidth * (AA_RADIUS * 2.)); +#ifndef USING_DEPTH_STENCIL + if ((contourIDWithFlags & LEFT_JOIN_CONTOUR_FLAG) != 0u) + o_edgeDistance.y = cast_float_to_half(clipDistance); + else + o_edgeDistance.x = cast_float_to_half(clipDistance); +#endif + } + +#ifndef USING_DEPTH_STENCIL + o_edgeDistance *= globalCoverage; + + // Bias o_edgeDistance.y slightly upwards in order to guarantee o_edgeDistance.y is >= 0 at + // every pixel. "o_edgeDistance.y < 0" is used to differentiate between strokes and fills. + o_edgeDistance.y = max(o_edgeDistance.y, make_half(1e-4)); +#endif + + postTransformVertexOffset = MUL(M, outset * vertexOffset); + + // Throw away the fan triangles since we're a stroke. + if (vertexType != STROKE_VERTEX) + return false; + } + else // This is a fill. + { + // Place the fan point. + if (vertexType == FAN_MIDPOINT_VERTEX) + origin = midpoint; + + // Offset the vertex for Manhattan AA. + postTransformVertexOffset = sign(MUL(outset * norm, inverse(M))) * AA_RADIUS; + + if ((contourIDWithFlags & MIRRORED_CONTOUR_CONTOUR_FLAG) != 0u) + fillCoverage = -fillCoverage; + +#ifndef USING_DEPTH_STENCIL + // "o_edgeDistance.y < 0" indicates to the fragment shader that this is a fill. + o_edgeDistance = make_half2(fillCoverage, -1.); +#endif + + // If we're actually just drawing a triangle, throw away the entire patch except a single + // fan triangle. + if ((contourIDWithFlags & RETROFITTED_TRIANGLE_CONTOUR_FLAG) != 0u && + vertexType != FAN_VERTEX) + return false; + } + + o_vertexPosition = MUL(M, origin) + postTransformVertexOffset + translate; + return true; +} +#endif // @DRAW_PATH + +#ifdef DRAW_INTERIOR_TRIANGLES +INLINE float2 unpack_interior_triangle_vertex(float3 triangleVertex, + OUT(ushort) o_pathID, + OUT(half) o_windingWeight VERTEX_CONTEXT_DECL) +{ + o_pathID = cast_uint_to_ushort(floatBitsToUint(triangleVertex.z) & 0xffffu); + float2x2 M = make_float2x2(uintBitsToFloat(STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, o_pathID * 2u))); + uint4 pathData = STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, o_pathID * 2u + 1u); + float2 translate = uintBitsToFloat(pathData.xy); + o_windingWeight = + cast_int_to_half(floatBitsToInt(triangleVertex.z) >> 16) * sign(determinant(M)); + return MUL(M, triangleVertex.xy) + translate; +} +#endif // @DRAW_INTERIOR_TRIANGLES + +#endif // @VERTEX diff --git a/Shaders/Private/Rive/Generated/glsl.minified.ush b/Shaders/Private/Rive/Generated/glsl.minified.ush new file mode 100644 index 00000000..a4d59f7a --- /dev/null +++ b/Shaders/Private/Rive/Generated/glsl.minified.ush @@ -0,0 +1,492 @@ +/* + * Copyright 2023 Rive + */ + +// This header provides GLSL-specific #defines and declarations that enable our shaders to be +// compiled on MSL and GLSL both. + +#define GLSL + +#ifndef GLSL_VERSION +// In "#version 320 es", Qualcomm incorrectly substitutes __VERSION__ to 300. @GLSL_VERSION is a +// workaround for this. +#define GLSL_VERSION __VERSION__ +#endif + +#define float2 vec2 +#define float3 vec3 +#define packed_float3 vec3 +#define float4 vec4 + +#define half mediump float +#define half2 mediump vec2 +#define half3 mediump vec3 +#define half4 mediump vec4 +#define half3x4 mediump mat3x4 + +#define int2 ivec2 +#define int3 ivec3 +#define int4 ivec4 + +#define short mediump int +#define short2 mediump ivec2 +#define short3 mediump ivec3 +#define short4 mediump ivec4 + +#define uint2 uvec2 +#define uint3 uvec3 +#define uint4 uvec4 + +#define ushort mediump uint +#define ushort2 mediump uvec2 +#define ushort3 mediump uvec3 +#define ushort4 mediump uvec4 + +#define float2x2 mat2 + +#define INLINE +#define OUT(ARG_TYPE) out ARG_TYPE + +#ifdef GL_ANGLE_base_vertex_base_instance_shader_builtin +#extension GL_ANGLE_base_vertex_base_instance_shader_builtin : require +#endif + +#ifdef ENABLE_BINDLESS_TEXTURES +#extension GL_ARB_bindless_texture : require +#endif + +#ifdef ENABLE_KHR_BLEND +#extension GL_KHR_blend_equation_advanced : require +#endif + +#if defined(USING_DEPTH_STENCIL) && defined(ENABLE_CLIP_RECT) +#ifdef GL_EXT_clip_cull_distance +#extension GL_EXT_clip_cull_distance : require +#elif defined(GL_ANGLE_clip_cull_distance) +#extension GL_ANGLE_clip_cull_distance : require +#endif +#endif // USING_DEPTH_STENCIL && ENABLE_CLIP_RECT + +#if GLSL_VERSION >= 310 +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + layout(binding = IDX, std140) uniform NAME \ + { +#else +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + layout(std140) uniform NAME \ + { +#endif +// clang-format barrier... Otherwise it tries to merge this #define into the above macro... +#define UNIFORM_BLOCK_END(NAME) \ + } \ + NAME; + +#define ATTR_BLOCK_BEGIN(NAME) +#define ATTR(IDX, TYPE, NAME) layout(location = IDX) in TYPE NAME +#define ATTR_BLOCK_END +#define ATTR_LOAD(A, B, C, D) +#define ATTR_UNPACK(ID, attrs, NAME, TYPE) + +#ifdef VERTEX +#if GLSL_VERSION >= 310 +#define VARYING(IDX, TYPE, NAME) layout(location = IDX) out TYPE NAME +#else +#define VARYING(IDX, TYPE, NAME) out TYPE NAME +#endif +#else +#if GLSL_VERSION >= 310 +#define VARYING(IDX, TYPE, NAME) layout(location = IDX) in TYPE NAME +#else +#define VARYING(IDX, TYPE, NAME) in TYPE NAME +#endif +#endif +#define FLAT flat +#define VARYING_BLOCK_BEGIN +#define VARYING_BLOCK_END + +// clang-format off +#ifdef TARGET_VULKAN + // Since Vulkan is compiled offline and not all platforms support noperspective, don't use it. +#define NO_PERSPECTIVE +#else +#ifdef GL_NV_shader_noperspective_interpolation +#extension GL_NV_shader_noperspective_interpolation : require +#define NO_PERSPECTIVE noperspective +#else +#define NO_PERSPECTIVE +#endif +#endif +// clang-format on + +#ifdef VERTEX +#define VERTEX_TEXTURE_BLOCK_BEGIN +#define VERTEX_TEXTURE_BLOCK_END +#endif + +#ifdef FRAGMENT +#define FRAG_TEXTURE_BLOCK_BEGIN +#define FRAG_TEXTURE_BLOCK_END +#endif + +#ifdef TARGET_VULKAN +#define TEXTURE_RGBA32UI(SET, IDX, NAME) \ + layout(set = SET, binding = IDX) uniform highp utexture2D NAME +#define TEXTURE_RGBA32F(SET, IDX, NAME) \ + layout(set = SET, binding = IDX) uniform highp texture2D NAME +#define TEXTURE_RGBA8(SET, IDX, NAME) \ + layout(set = SET, binding = IDX) uniform mediump texture2D NAME +#elif GLSL_VERSION >= 310 +#define TEXTURE_RGBA32UI(SET, IDX, NAME) layout(binding = IDX) uniform highp usampler2D NAME +#define TEXTURE_RGBA32F(SET, IDX, NAME) layout(binding = IDX) uniform highp sampler2D NAME +#define TEXTURE_RGBA8(SET, IDX, NAME) layout(binding = IDX) uniform mediump sampler2D NAME +#else +#define TEXTURE_RGBA32UI(SET, IDX, NAME) uniform highp usampler2D NAME +#define TEXTURE_RGBA32F(SET, IDX, NAME) uniform highp sampler2D NAME +#define TEXTURE_RGBA8(SET, IDX, NAME) uniform mediump sampler2D NAME +#endif +#define TEXTURE_RG32UI(SET, IDX, NAME) TEXTURE_RGBA32UI(SET, IDX, NAME) + +#ifdef TARGET_VULKAN +#define SAMPLER_LINEAR(TEXTURE_IDX, NAME) \ + layout(set = SAMPLER_BINDINGS_SET, binding = TEXTURE_IDX) uniform mediump sampler NAME; +#define SAMPLER_MIPMAP(TEXTURE_IDX, NAME) \ + layout(set = SAMPLER_BINDINGS_SET, binding = TEXTURE_IDX) uniform mediump sampler NAME; +#define TEXTURE_SAMPLE(NAME, SAMPLER_NAME, COORD) texture(sampler2D(NAME, SAMPLER_NAME), COORD) +#define TEXTURE_SAMPLE_LOD(NAME, SAMPLER_NAME, COORD, LOD) \ + textureLod(sampler2D(NAME, SAMPLER_NAME), COORD, LOD) +#define TEXTURE_SAMPLE_GRAD(NAME, SAMPLER_NAME, COORD, DDX, DDY) \ + textureGrad(sampler2D(NAME, SAMPLER_NAME), COORD, DDX, DDY) +#else +// SAMPLER_LINEAR and SAMPLER_MIPMAP are no-ops because in GL, sampling parameters are API-level +// state tied to the texture. +#define SAMPLER_LINEAR(TEXTURE_IDX, NAME) +#define SAMPLER_MIPMAP(TEXTURE_IDX, NAME) +#define TEXTURE_SAMPLE(NAME, SAMPLER_NAME, COORD) texture(NAME, COORD) +#define TEXTURE_SAMPLE_LOD(NAME, SAMPLER_NAME, COORD, LOD) textureLod(NAME, COORD, LOD) +#define TEXTURE_SAMPLE_GRAD(NAME, SAMPLER_NAME, COORD, DDX, DDY) textureGrad(NAME, COORD, DDX, DDY) +#endif + +#define TEXEL_FETCH(NAME, COORD) texelFetch(NAME, COORD, 0) + +#define VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +#define VERTEX_STORAGE_BUFFER_BLOCK_END + +#define FRAG_STORAGE_BUFFER_BLOCK_BEGIN +#define FRAG_STORAGE_BUFFER_BLOCK_END + +#ifdef DISABLE_SHADER_STORAGE_BUFFERS + +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) \ + TEXTURE_RGBA32UI(PER_FLUSH_BINDINGS_SET, IDX, NAME) +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + TEXTURE_RG32UI(PER_FLUSH_BINDINGS_SET, IDX, NAME) +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + TEXTURE_RGBA32F(PER_FLUSH_BINDINGS_SET, IDX, NAME) +#define STORAGE_BUFFER_LOAD4(NAME, I) \ + TEXEL_FETCH(NAME, int2((I)&STORAGE_TEXTURE_MASK_X, (I) >> STORAGE_TEXTURE_SHIFT_Y)) +#define STORAGE_BUFFER_LOAD2(NAME, I) \ + TEXEL_FETCH(NAME, int2((I)&STORAGE_TEXTURE_MASK_X, (I) >> STORAGE_TEXTURE_SHIFT_Y)).xy + +#else + +#ifdef GL_ARB_shader_storage_buffer_object +#extension GL_ARB_shader_storage_buffer_object : require +#endif +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) \ + layout(std430, binding = IDX) readonly buffer GLSL_STRUCT_NAME { uint2 _values[]; } \ + NAME +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + layout(std430, binding = IDX) readonly buffer GLSL_STRUCT_NAME { uint4 _values[]; } \ + NAME +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + layout(std430, binding = IDX) readonly buffer GLSL_STRUCT_NAME { float4 _values[]; } \ + NAME +#define STORAGE_BUFFER_LOAD4(NAME, I) NAME._values[I] +#define STORAGE_BUFFER_LOAD2(NAME, I) NAME._values[I] + +#endif // DISABLE_SHADER_STORAGE_BUFFERS + +// Define macros for implementing pixel local storage based on available extensions. +#ifdef PLS_IMPL_ANGLE + +#extension GL_ANGLE_shader_pixel_local_storage : require + +#define PLS_BLOCK_BEGIN +#define PLS_DECL4F(IDX, NAME) layout(binding = IDX, rgba8) uniform lowp pixelLocalANGLE NAME +#define PLS_DECLUI(IDX, NAME) layout(binding = IDX, r32ui) uniform highp upixelLocalANGLE NAME +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) pixelLocalLoadANGLE(PLANE) +#define PLS_LOADUI(PLANE) pixelLocalLoadANGLE(PLANE).x +#define PLS_STORE4F(PLANE, VALUE) pixelLocalStoreANGLE(PLANE, VALUE) +#define PLS_STOREUI(PLANE, VALUE) pixelLocalStoreANGLE(PLANE, uvec4(VALUE)) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif // PLS_IMPL_ANGLE + +#ifdef PLS_IMPL_EXT_NATIVE + +#extension GL_EXT_shader_pixel_local_storage : enable + +// We need one of the framebuffer fetch extensions for the shader that loads the framebuffer. +#extension GL_ARM_shader_framebuffer_fetch : enable +#extension GL_EXT_shader_framebuffer_fetch : enable + +#define PLS_BLOCK_BEGIN \ + __pixel_localEXT PLS \ + { +#define PLS_DECL4F(IDX, NAME) layout(rgba8) lowp vec4 NAME +#define PLS_DECLUI(IDX, NAME) layout(r32ui) highp uint NAME +#define PLS_BLOCK_END \ + } \ + ; + +#define PLS_LOAD4F(PLANE) PLANE +#define PLS_LOADUI(PLANE) PLANE +#define PLS_STORE4F(PLANE, VALUE) PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) PLANE = (VALUE) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif + +#ifdef PLS_IMPL_FRAMEBUFFER_FETCH + +#extension GL_EXT_shader_framebuffer_fetch : require + +#define PLS_BLOCK_BEGIN +#define PLS_DECL4F(IDX, NAME) layout(location = IDX) inout lowp vec4 NAME +#define PLS_DECLUI(IDX, NAME) layout(location = IDX) inout highp uvec4 NAME +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) PLANE +#define PLS_LOADUI(PLANE) PLANE.x +#define PLS_STORE4F(PLANE, VALUE) PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) PLANE.x = (VALUE) + +// When using multiple color attachments, we have to write a value to every color attachment, every +// shader invocation, or else the contents become undefined. +#define PLS_PRESERVE_4F(PLANE) PLS_STORE4F(PLANE, PLS_LOAD4F(PLANE)) +#define PLS_PRESERVE_UI(PLANE) PLS_STOREUI(PLANE, PLS_LOADUI(PLANE)) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif // PLS_IMPL_FRAMEBUFFER_FETCH + +#ifdef PLS_IMPL_STORAGE_TEXTURE + +#ifdef GL_ARB_shader_image_load_store +#extension GL_ARB_shader_image_load_store : require +#endif +#if defined(GL_ARB_fragment_shader_interlock) +#extension GL_ARB_fragment_shader_interlock : require +#define PLS_INTERLOCK_BEGIN beginInvocationInterlockARB() +#define PLS_INTERLOCK_END endInvocationInterlockARB() +#elif defined(GL_INTEL_fragment_shader_ordering) +#extension GL_INTEL_fragment_shader_ordering : require +#define PLS_INTERLOCK_BEGIN beginFragmentShaderOrderingINTEL() +#define PLS_INTERLOCK_END +#else +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END +#endif + +#define PLS_BLOCK_BEGIN +#ifdef TARGET_VULKAN +#define PLS_DECL4F(IDX, NAME) \ + layout(set = PLS_TEXTURE_BINDINGS_SET, binding = IDX, rgba8) uniform lowp coherent image2D NAME +#define PLS_DECLUI(IDX, NAME) \ + layout(set = PLS_TEXTURE_BINDINGS_SET, binding = IDX, r32ui) \ + uniform highp coherent uimage2D NAME +#else +#define PLS_DECL4F(IDX, NAME) layout(binding = IDX, rgba8) uniform lowp coherent image2D NAME +#define PLS_DECLUI(IDX, NAME) layout(binding = IDX, r32ui) uniform highp coherent uimage2D NAME +#endif +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) imageLoad(PLANE, _plsCoord) +#define PLS_LOADUI(PLANE) imageLoad(PLANE, _plsCoord).x +#define PLS_STORE4F(PLANE, VALUE) imageStore(PLANE, _plsCoord, VALUE) +#define PLS_STOREUI(PLANE, VALUE) imageStore(PLANE, _plsCoord, uvec4(VALUE)) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#ifndef USING_PLS_STORAGE_TEXTURES +#define USING_PLS_STORAGE_TEXTURES + +#endif // PLS_IMPL_STORAGE_TEXTURE + +#endif // PLS_IMPL_STORAGE_TEXTURE + +#ifdef PLS_IMPL_SUBPASS_LOAD + +#define PLS_BLOCK_BEGIN +#define PLS_DECL4F(IDX, NAME) \ + layout(input_attachment_index = IDX, binding = IDX, set = PLS_TEXTURE_BINDINGS_SET) \ + uniform lowp subpassInput _in_##NAME; \ + layout(location = IDX) out lowp vec4 NAME +#define PLS_DECLUI(IDX, NAME) \ + layout(input_attachment_index = IDX, binding = IDX, set = PLS_TEXTURE_BINDINGS_SET) \ + uniform highp usubpassInput _in_##NAME; \ + layout(location = IDX) out highp uvec4 NAME +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) subpassLoad(_in_##PLANE) +#define PLS_LOADUI(PLANE) subpassLoad(_in_##PLANE).x +#define PLS_STORE4F(PLANE, VALUE) PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) PLANE.x = (VALUE) + +#define PLS_PRESERVE_4F(PLANE) PLS_STORE4F(PLANE, subpassLoad(_in_##PLANE)) +#define PLS_PRESERVE_UI(PLANE) PLS_STOREUI(PLANE, subpassLoad(_in_##PLANE).x) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif + +#ifdef PLS_IMPL_NONE + +#define PLS_BLOCK_BEGIN +#define PLS_DECL4F(IDX, NAME) layout(location = IDX) out lowp vec4 NAME +#define PLS_DECLUI(IDX, NAME) layout(location = IDX) out highp uvec4 NAME +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) vec4(0) +#define PLS_LOADUI(PLANE) 0u +#define PLS_STORE4F(PLANE, VALUE) PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) PLANE.x = (VALUE) + +#define PLS_PRESERVE_4F(PLANE) PLANE = vec4(1, 0, 1, 1) +#define PLS_PRESERVE_UI(PLANE) PLANE.x = 0u + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif + +#ifdef TARGET_VULKAN +#define gl_VertexID gl_VertexIndex +#endif + +// clang-format off +#ifdef ENABLE_INSTANCE_INDEX +#ifdef TARGET_VULKAN +#define INSTANCE_INDEX gl_InstanceIndex +#else +#ifdef ENABLE_SPIRV_CROSS_BASE_INSTANCE + // This uniform is specifically named "SPIRV_Cross_BaseInstance" for compatibility with + // SPIRV-Cross sytems that search for it by name. + uniform int SPIRV_Cross_BaseInstance; +#define INSTANCE_INDEX (gl_InstanceID + SPIRV_Cross_BaseInstance) +#else +#define INSTANCE_INDEX (gl_InstanceID + gl_BaseInstance) +#endif +#endif +#else +#define INSTANCE_INDEX 0 +#endif +// clang-format on + +#define VERTEX_CONTEXT_DECL +#define VERTEX_CONTEXT_UNPACK + +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + void main() \ + { \ + int _vertexID = gl_VertexID; \ + int _instanceID = INSTANCE_INDEX; + +#define IMAGE_RECT_VERTEX_MAIN VERTEX_MAIN + +#define IMAGE_MESH_VERTEX_MAIN(NAME, PositionAttr, position, UVAttr, uv, _vertexID) \ + VERTEX_MAIN(NAME, PositionAttr, position, _vertexID, _instanceID) + +#define VARYING_INIT(NAME, TYPE) +#define VARYING_PACK(NAME) +#define VARYING_UNPACK(NAME, TYPE) + +#define EMIT_VERTEX(_pos) \ + gl_Position = _pos; \ + } + +#define FRAG_DATA_MAIN(DATA_TYPE, NAME) \ + layout(location = 0) out DATA_TYPE _fd; \ + void main() + +#define EMIT_FRAG_DATA(VALUE) _fd = VALUE + +#define _fragCoord gl_FragCoord.xy + +#define FRAGMENT_CONTEXT_DECL +#define FRAGMENT_CONTEXT_UNPACK + +#ifdef USING_PLS_STORAGE_TEXTURES + +#define PLS_DECLUI_ATOMIC(IDX, NAME) \ + layout(set = PLS_TEXTURE_BINDINGS_SET, binding = IDX, r32ui) \ + uniform highp coherent uimage2D NAME +#define PLS_LOADUI_ATOMIC(PLANE) imageLoad(PLANE, _plsCoord).x +#define PLS_STOREUI_ATOMIC(PLANE, VALUE) imageStore(PLANE, _plsCoord, uvec4(VALUE)) +#define PLS_ATOMIC_MAX(PLANE, X) imageAtomicMax(PLANE, _plsCoord, X) +#define PLS_ATOMIC_ADD(PLANE, X) imageAtomicAdd(PLANE, _plsCoord, X) + +#define PLS_CONTEXT_DECL , int2 _plsCoord +#define PLS_CONTEXT_UNPACK , _plsCoord + +#define PLS_MAIN(NAME) \ + void main() \ + { \ + int2 _plsCoord = ivec2(floor(_fragCoord)); + +#define EMIT_PLS } + +#else // !USING_PLS_STORAGE_TEXTURES + +#define PLS_CONTEXT_DECL +#define PLS_CONTEXT_UNPACK + +#define PLS_MAIN(NAME) void main() +#define EMIT_PLS + +#endif // !USING_PLS_STORAGE_TEXTURES + +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_MAIN(NAME) + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + layout(location = 0) out half4 _fragColor; \ + PLS_MAIN(NAME) + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + layout(location = 0) out half4 _fragColor; \ + PLS_MAIN(NAME) + +#define EMIT_PLS_AND_FRAG_COLOR EMIT_PLS + +#define MUL(A, B) ((A) * (B)) + +#ifndef TARGET_VULKAN +#define FRAG_COORD_BOTTOM_UP +#endif + +precision highp float; +precision highp int; + +#if GLSL_VERSION < 310 +// Polyfill ES 3.1+ methods. +INLINE half4 unpackUnorm4x8(uint u) +{ + uint4 vals = uint4(u & 0xffu, (u >> 8) & 0xffu, (u >> 16) & 0xffu, u >> 24); + return float4(vals) * (1. / 255.); +} +#endif diff --git a/Shaders/Private/Rive/Generated/hlsl.minified.ush b/Shaders/Private/Rive/Generated/hlsl.minified.ush new file mode 100644 index 00000000..5d1a97cb --- /dev/null +++ b/Shaders/Private/Rive/Generated/hlsl.minified.ush @@ -0,0 +1,385 @@ +/* + * Copyright 2023 Rive + */ + +// This header provides GLSL-specific #defines and declarations that enable our shaders to be +// compiled on MSL and GLSL both. + +// HLSL warns that it will unroll the loops through r,g,b values in advanced_blend.glsl, but +// unrolling these loops is exactly what we want. +#pragma warning(disable : 3550) + +// Don't warn about uninitialized variables. If we leave one uninitialized it's because we know what +// we're doing and don't want to pay the cost of initializing it. +#pragma warning(disable : 4000) + +// #define native hlsl types if their names are being rewritten. +#define _ARE_TOKEN_NAMES_PRESERVED +#ifndef _ARE_TOKEN_NAMES_PRESERVED +#define half half +#define half2 half2 +#define half3 half3 +#define half4 half4 +#define short short +#define short2 short2 +#define short3 short3 +#define short4 short4 +#define ushort ushort +#define ushort2 ushort2 +#define ushort3 ushort3 +#define ushort4 ushort4 +#define float2 float2 +#define float3 float3 +#define float4 float4 +#define bool2 bool2 +#define bool3 bool3 +#define bool4 bool4 +#define uint2 uint2 +#define uint3 uint3 +#define uint4 uint4 +#define int2 int2 +#define int3 int3 +#define int4 int4 +#define float4x2 float4x2 +#define ushort ushort +#define float2x2 float2x2 +#define half3x4 half3x4 +#endif + +typedef float3 packed_float3; + +#ifdef ENABLE_MIN_16_PRECISION + +typedef min16int short; + +typedef min16uint ushort; + +#else + +typedef int short; + +typedef uint ushort; + +#endif + +#define INLINE inline +#define OUT(ARG_TYPE) out ARG_TYPE + +#define ATTR_BLOCK_BEGIN(NAME) \ + struct NAME \ + { +#define ATTR(IDX, TYPE, NAME) TYPE NAME : NAME +#define ATTR_BLOCK_END \ + } \ + ; +#define ATTR_LOAD(T, A, N, I) +#define ATTR_UNPACK(ID, attrs, NAME, TYPE) TYPE NAME = attrs.NAME + +#define UNIFORM_BUFFER_REGISTER(IDX) register(b##IDX) + +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + cbuffer NAME : UNIFORM_BUFFER_REGISTER(IDX) \ + { \ + struct \ + { + +#define UNIFORM_BLOCK_END(NAME) \ + } \ + NAME; \ + } + +#define VARYING_BLOCK_BEGIN \ + struct Varyings \ + { + +#define NO_PERSPECTIVE noperspective +#define OPTIONALLY_FLAT nointerpolation +#define FLAT nointerpolation +#define VARYING(IDX, TYPE, NAME) TYPE NAME : TEXCOORD##IDX + +#define VARYING_BLOCK_END \ + float4 _pos : SV_Position; \ + } \ + ; + +#define VARYING_INIT(NAME, TYPE) TYPE NAME +#define VARYING_PACK(NAME) _varyings.NAME = NAME +#define VARYING_UNPACK(NAME, TYPE) TYPE NAME = _varyings.NAME + +#ifdef VERTEX +#define VERTEX_TEXTURE_BLOCK_BEGIN +#define VERTEX_TEXTURE_BLOCK_END +#endif + +#ifdef FRAGMENT +#define FRAG_TEXTURE_BLOCK_BEGIN +#define FRAG_TEXTURE_BLOCK_END +#endif + +#define TEXTURE_RGBA32UI(SET, IDX, NAME) uniform Texture2D NAME : register(t##IDX) +#define TEXTURE_RGBA32F(SET, IDX, NAME) uniform Texture2D NAME : register(t##IDX) +#define TEXTURE_RGBA8(SET, IDX, NAME) uniform Texture2D NAME : register(t##IDX) + +// SAMPLER_LINEAR and SAMPLER_MIPMAP are the same because in d3d11, sampler parameters are defined +// at the API level. +#define SAMPLER(TEXTURE_IDX, NAME) SamplerState NAME : register(s##TEXTURE_IDX); +#define SAMPLER_LINEAR SAMPLER +#define SAMPLER_MIPMAP SAMPLER + +#define TEXEL_FETCH(NAME, COORD) NAME[COORD] +#define TEXTURE_SAMPLE(NAME, SAMPLER_NAME, COORD) NAME.Sample(SAMPLER_NAME, COORD) +#define TEXTURE_SAMPLE_LOD(NAME, SAMPLER_NAME, COORD, LOD) \ + NAME.SampleLevel(SAMPLER_NAME, COORD, LOD) +#define TEXTURE_SAMPLE_GRAD(NAME, SAMPLER_NAME, COORD, DDX, DDY) \ + NAME.SampleGrad(SAMPLER_NAME, COORD, DDX, DDY) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#ifdef ENABLE_RASTERIZER_ORDERED_VIEWS +#define PLS_TEX2D RasterizerOrderedTexture2D +#else +#define PLS_TEX2D RWTexture2D +#endif + +#define PLS_BLOCK_BEGIN +#ifdef ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_DECL4F(IDX, NAME) uniform PLS_TEX2D NAME : register(u##IDX) +#else +#define PLS_DECL4F(IDX, NAME) uniform PLS_TEX2D NAME : register(u##IDX) +#endif +#define PLS_DECLUI(IDX, NAME) uniform PLS_TEX2D NAME : register(u##IDX) +#define PLS_DECLUI_ATOMIC PLS_DECLUI +#define PLS_LOADUI_ATOMIC PLS_LOADUI +#define PLS_STOREUI_ATOMIC PLS_STOREUI +#define PLS_BLOCK_END + +#ifdef ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_LOAD4F(PLANE) PLANE[_plsCoord] +#else +#define PLS_LOAD4F(PLANE) unpackUnorm4x8(PLANE[_plsCoord]) +#endif +#define PLS_LOADUI(PLANE) PLANE[_plsCoord] +#ifdef ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_STORE4F(PLANE, VALUE) PLANE[_plsCoord] = (VALUE) +#else +#define PLS_STORE4F(PLANE, VALUE) PLANE[_plsCoord] = packUnorm4x8(VALUE) +#endif +#define PLS_STOREUI(PLANE, VALUE) PLANE[_plsCoord] = (VALUE) + +INLINE uint pls_atomic_max(PLS_TEX2D plane, int2 _plsCoord, uint x) +{ + uint originalValue; + InterlockedMax(plane[_plsCoord], x, originalValue); + return originalValue; +} + +#define PLS_ATOMIC_MAX(PLANE, X) pls_atomic_max(PLANE, _plsCoord, X) + +INLINE uint pls_atomic_add(PLS_TEX2D plane, int2 _plsCoord, uint x) +{ + uint originalValue; + InterlockedAdd(plane[_plsCoord], x, originalValue); + return originalValue; +} + +#define PLS_ATOMIC_ADD(PLANE, X) pls_atomic_add(PLANE, _plsCoord, X) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define VERTEX_CONTEXT_DECL +#define VERTEX_CONTEXT_UNPACK + +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + cbuffer DrawUniforms : UNIFORM_BUFFER_REGISTER(PATH_BASE_INSTANCE_UNIFORM_BUFFER_IDX) \ + { \ + uint baseInstance; \ + uint NAME##_pad0; \ + uint NAME##_pad1; \ + uint NAME##_pad2; \ + } \ + Varyings NAME(Attrs attrs, uint _vertexID \ + : SV_VertexID, uint _instanceIDWithoutBase \ + : SV_InstanceID) \ + { \ + uint _instanceID = _instanceIDWithoutBase + baseInstance; \ + Varyings _varyings; + +#define IMAGE_RECT_VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + Varyings NAME(Attrs attrs, uint _vertexID : SV_VertexID) \ + { \ + Varyings _varyings; \ + float4 _pos; + +#define IMAGE_MESH_VERTEX_MAIN(NAME, PositionAttr, position, UVAttr, uv, _vertexID) \ + Varyings NAME(PositionAttr position, UVAttr uv, uint _vertexID : SV_VertexID) \ + { \ + Varyings _varyings; \ + float4 _pos; + +#define EMIT_VERTEX(POSITION) \ + _varyings._pos = POSITION; \ + } \ + return _varyings; + +#define FRAG_DATA_MAIN(DATA_TYPE, NAME) \ + DATA_TYPE NAME(Varyings _varyings) : SV_Target \ + { + +#define EMIT_FRAG_DATA(VALUE) \ + return VALUE; \ + } + +#define FRAGMENT_CONTEXT_DECL , float2 _fragCoord +#define FRAGMENT_CONTEXT_UNPACK , _fragCoord + +#define PLS_CONTEXT_DECL , int2 _plsCoord +#define PLS_CONTEXT_UNPACK , _plsCoord + +#define PLS_MAIN(NAME) [earlydepthstencil] void NAME(Varyings _varyings) { \ + float2 _fragCoord = _varyings._pos.xy;\ + int2 _plsCoord = int2(floor(_fragCoord)); + +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_MAIN(NAME) + +#define EMIT_PLS } + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + [earlydepthstencil] half4 NAME(Varyings _varyings) : SV_Target \ + { \ + float2 _fragCoord = _varyings._pos.xy; \ + int2 _plsCoord = int2(floor(_fragCoord)); \ + half4 _fragColor; + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_FRAG_COLOR_MAIN(NAME) + +#define EMIT_PLS_AND_FRAG_COLOR \ + } \ + return _fragColor; + +#define uintBitsToFloat asfloat +#define intBitsToFloat asfloat +#define floatBitsToInt asint +#define floatBitsToUint asuint +#define inversesqrt rsqrt +#define notEqual(A, B) ((A) != (B)) +#define lessThanEqual(A, B) ((A) <= (B)) +#define greaterThanEqual(A, B) ((A) >= (B)) + +// HLSL matrices are stored in row-major order, and therefore transposed from their counterparts +// in GLSL and Metal. We can work around this entirely by reversing the arguments to mul(). +#define MUL(A, B) mul(B, A) + +#define VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +#define VERTEX_STORAGE_BUFFER_BLOCK_END + +#define FRAG_STORAGE_BUFFER_BLOCK_BEGIN +#define FRAG_STORAGE_BUFFER_BLOCK_END + +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(t##IDX) +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(t##IDX) +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(t##IDX) + +#define STORAGE_BUFFER_LOAD4(NAME, I) NAME[I] +#define STORAGE_BUFFER_LOAD2(NAME, I) NAME[I] + +INLINE half2 unpackHalf2x16(uint u) +{ + uint y = (u >> 16); + uint x = u & 0xffffu; + return half2(f16tof32(x), f16tof32(y)); +} + +INLINE uint packHalf2x16(float2 v) +{ + uint x = f32tof16(v.x); + uint y = f32tof16(v.y); + return (y << 16) | x; +} + +INLINE half4 unpackUnorm4x8(uint u) +{ + uint4 vals = uint4(u & 0xffu, (u >> 8) & 0xffu, (u >> 16) & 0xffu, u >> 24); + return half4(vals) * (1. / 255.); +} + +INLINE uint packUnorm4x8(half4 color) +{ + uint4 vals = (uint4(color * 255.) & 0xff) << uint4(0, 8, 16, 24); + vals.xy |= vals.zw; + vals.x |= vals.y; + return vals.x; +} + +INLINE float atan(float y, float x) { return atan2(y, x); } + +INLINE float2x2 inverse(float2x2 m) +{ + float2x2 adjoint = float2x2(m[1][1], -m[0][1], -m[1][0], m[0][0]); + return adjoint * (1. / determinant(m)); +} + +// Redirects for intrinsics that have different names in HLSL + +INLINE float mix(float x, float y, float s) { return lerp(x, y, s); } +INLINE float2 mix(float2 x, float2 y, float2 s) { return lerp(x, y, s); } +INLINE float3 mix(float3 x, float3 y, float3 s) { return lerp(x, y, s); } +INLINE float4 mix(float4 x, float4 y, float4 s) { return lerp(x, y, s); } + +INLINE half mix(half x, half y, half s) { return x + s * (y - x); } +INLINE half2 mix(half2 x, half2 y, half2 s) { return x + s * (y - x); } +INLINE half3 mix(half3 x, half3 y, half3 s) { return x + s * (y - x); } +INLINE half4 mix(half4 x, half4 y, half4 s) { return x + s * (y - x); } + +INLINE float fract(float x) { return frac(x); } +INLINE float2 fract(float2 x) { return frac(x); } +INLINE float3 fract(float3 x) { return frac(x); } +INLINE float4 fract(float4 x) { return frac(x); } + +INLINE half fract(half x) { return frac(x); } +INLINE half2 fract(half2 x) { return half2(frac(x)); } +INLINE half3 fract(half3 x) { return half3(frac(x)); } +INLINE half4 fract(half4 x) { return half4(frac(x)); } + +// Reimplement intrinsics for half types. +// This shadows the intrinsic function for floats, so we also have to declare that overload. + +INLINE half rive_sign(half x) { return sign(x); } +INLINE half2 rive_sign(half2 x) { return half2(sign(x)); } +INLINE half3 rive_sign(half3 x) { return half3(sign(x)); } +INLINE half4 rive_sign(half4 x) { return half4(sign(x)); } + +INLINE float rive_sign(float x) { return sign(x); } +INLINE float2 rive_sign(float2 x) { return sign(x); } +INLINE float3 rive_sign(float3 x) { return sign(x); } +INLINE float4 rive_sign(float4 x) { return sign(x); } + +#define sign rive_sign + +INLINE half rive_abs(half x) { return abs(x); } +INLINE half2 rive_abs(half2 x) { return half2(abs(x)); } +INLINE half3 rive_abs(half3 x) { return half3(abs(x)); } +INLINE half4 rive_abs(half4 x) { return half4(abs(x)); } + +INLINE float rive_abs(float x) { return abs(x); } +INLINE float2 rive_abs(float2 x) { return abs(x); } +INLINE float3 rive_abs(float3 x) { return abs(x); } +INLINE float4 rive_abs(float4 x) { return abs(x); } + +#define abs rive_abs + +INLINE half rive_sqrt(half x) { return sqrt(x); } +INLINE half2 rive_sqrt(half2 x) { return half2(sqrt(x)); } +INLINE half3 rive_sqrt(half3 x) { return half3(sqrt(x)); } +INLINE half4 rive_sqrt(half4 x) { return half4(sqrt(x)); } + +INLINE float rive_sqrt(float x) { return sqrt(x); } +INLINE float2 rive_sqrt(float2 x) { return sqrt(x); } +INLINE float3 rive_sqrt(float3 x) { return sqrt(x); } +INLINE float4 rive_sqrt(float4 x) { return sqrt(x); } + +#define sqrt rive_sqrt diff --git a/Shaders/Private/Rive/Generated/metal.minified.ush b/Shaders/Private/Rive/Generated/metal.minified.ush new file mode 100644 index 00000000..d820477b --- /dev/null +++ b/Shaders/Private/Rive/Generated/metal.minified.ush @@ -0,0 +1,447 @@ +/* + * Copyright 2023 Rive + */ + +// This header provides Metal-specific #defines and declarations that enable our shaders to be +// compiled on MSL and GLSL both. + +#define METAL + +// #define native metal types if their names are being rewritten. +#define _ARE_TOKEN_NAMES_PRESERVED +#ifndef _ARE_TOKEN_NAMES_PRESERVED +#define half half +#define half2 half2 +#define half3 half3 +#define half4 half4 +#define short short +#define short2 short2 +#define short3 short3 +#define short4 short4 +#define ushort ushort +#define ushort2 ushort2 +#define ushort3 ushort3 +#define ushort4 ushort4 +#define float2 float2 +#define float3 float3 +#define packed_float3 packed_float3 +#define float4 float4 +#define bool2 bool2 +#define bool3 bool3 +#define bool4 bool4 +#define uint2 uint2 +#define uint3 uint3 +#define uint4 uint4 +#define int2 int2 +#define int3 int3 +#define int4 int4 +#define float4x2 float4x2 +#define ushort ushort +#define float2x2 float2x2 +#define half3x4 half3x4 +#endif + +#define INLINE inline +#define OUT(ARG_TYPE) thread ARG_TYPE& + +#define notEqual(A, B) ((A) != (B)) +#define lessThanEqual(A, B) ((A) <= (B)) +#define greaterThanEqual(A, B) ((A) >= (B)) +#define MUL(A, B) ((A) * (B)) +#define atan atan2 +#define inversesqrt rsqrt + +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + struct NAME \ + { +#define UNIFORM_BLOCK_END(NAME) \ + } \ + ; + +#define ATTR_BLOCK_BEGIN(NAME) \ + struct NAME \ + { +#define ATTR(IDX, TYPE, NAME) TYPE NAME +#define ATTR_BLOCK_END \ + } \ + ; +#define ATTR_UNPACK(ID, attrs, NAME, TYPE) TYPE NAME = attrs[ID].NAME + +#define VARYING_BLOCK_BEGIN \ + struct Varyings \ + { +#define VARYING(IDX, TYPE, NAME) TYPE NAME +#define FLAT [[flat]] +#define NO_PERSPECTIVE [[center_no_perspective]] +#ifndef OPTIONALLY_FLAT +// Don't use no-perspective interpolation for varyings that need to be flat. No-persective +// interpolation appears to break the guarantee that a varying == "x" when all barycentric values +// also == "x". Default (perspective-correct) interpolation does preserve this guarantee, and seems +// to be faster faster than flat on Apple Silicon. +#define OPTIONALLY_FLAT +#endif +#define VARYING_BLOCK_END \ + float4 _pos [[position]] [[invariant]]; \ + } \ + ; + +#define VARYING_INIT(NAME, TYPE) thread TYPE& NAME = _varyings.NAME +#define VARYING_PACK(NAME) +#define VARYING_UNPACK(NAME, TYPE) TYPE NAME = _varyings.NAME + +#define VERTEX_STORAGE_BUFFER_BLOCK_BEGIN \ + struct VertexStorageBuffers \ + { +#define VERTEX_STORAGE_BUFFER_BLOCK_END \ + } \ + ; + +#define FRAG_STORAGE_BUFFER_BLOCK_BEGIN \ + struct FragmentStorageBuffers \ + { +#define FRAG_STORAGE_BUFFER_BLOCK_END \ + } \ + ; + +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) constant uint2* NAME [[buffer(IDX)]] +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) constant uint4* NAME [[buffer(IDX)]] +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) constant float4* NAME [[buffer(IDX)]] +#define STORAGE_BUFFER_LOAD4(NAME, I) _buffers.NAME[I] +#define STORAGE_BUFFER_LOAD2(NAME, I) _buffers.NAME[I] + +#define VERTEX_TEXTURE_BLOCK_BEGIN \ + struct VertexTextures \ + { +#define VERTEX_TEXTURE_BLOCK_END \ + } \ + ; + +#define FRAG_TEXTURE_BLOCK_BEGIN \ + struct FragmentTextures \ + { +#define FRAG_TEXTURE_BLOCK_END \ + } \ + ; + +#define TEXTURE_RGBA32UI(SET, IDX, NAME) [[texture(IDX)]] texture2d NAME +#define TEXTURE_RGBA32F(SET, IDX, NAME) [[texture(IDX)]] texture2d NAME +#define TEXTURE_RGBA8(SET, IDX, NAME) [[texture(IDX)]] texture2d NAME + +#define SAMPLER_LINEAR(TEXTURE_IDX, NAME) \ + constexpr sampler NAME(filter::linear, mip_filter::none); +#define SAMPLER_MIPMAP(TEXTURE_IDX, NAME) \ + constexpr sampler NAME(filter::linear, mip_filter::linear); + +#define TEXEL_FETCH(TEXTURE, COORD) _textures.TEXTURE.read(uint2(COORD)) +#define TEXTURE_SAMPLE(TEXTURE, SAMPLER_NAME, COORD) _textures.TEXTURE.sample(SAMPLER_NAME, COORD) +#define TEXTURE_SAMPLE_LOD(TEXTURE, SAMPLER_NAME, COORD, LOD) \ + _textures.TEXTURE.sample(SAMPLER_NAME, COORD, level(LOD)) +#define TEXTURE_SAMPLE_GRAD(TEXTURE, SAMPLER_NAME, COORD, DDX, DDY) \ + _textures.TEXTURE.sample(SAMPLER_NAME, COORD, gradient2d(DDX, DDY)) + +#define VERTEX_CONTEXT_DECL , VertexTextures _textures, VertexStorageBuffers _buffers +#define VERTEX_CONTEXT_UNPACK , _textures, _buffers + +#ifdef ENABLE_INSTANCE_INDEX +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + __attribute__((visibility("default"))) Varyings vertex NAME( \ + uint _vertexID [[vertex_id]], \ + uint _instanceID [[instance_id]], \ + constant uint& _baseInstance [[buffer(PATH_BASE_INSTANCE_UNIFORM_BUFFER_IDX)]], \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant Attrs* attrs [[buffer(0)]] VERTEX_CONTEXT_DECL) \ + { \ + _instanceID += _baseInstance; \ + Varyings _varyings; +#else +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + __attribute__((visibility("default"))) Varyings vertex NAME( \ + uint _vertexID [[vertex_id]], \ + uint _instanceID [[instance_id]], \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant Attrs* attrs [[buffer(0)]] VERTEX_CONTEXT_DECL) \ + { \ + Varyings _varyings; +#endif + +#define IMAGE_RECT_VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + __attribute__((visibility("default"))) Varyings vertex NAME( \ + uint _vertexID [[vertex_id]], \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]], \ + constant Attrs* attrs [[buffer(0)]] VERTEX_CONTEXT_DECL) \ + { \ + Varyings _varyings; + +#define IMAGE_MESH_VERTEX_MAIN(NAME, PositionAttr, position, UVAttr, uv, _vertexID) \ + __attribute__((visibility("default"))) Varyings vertex NAME( \ + uint _vertexID [[vertex_id]], \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]], \ + constant PositionAttr* position [[buffer(0)]], \ + constant UVAttr* uv [[buffer(1)]]) \ + { \ + Varyings _varyings; + +#define EMIT_VERTEX(POSITION) \ + _varyings._pos = POSITION; \ + } \ + return _varyings; + +#define FRAG_DATA_MAIN(DATA_TYPE, NAME) \ + DATA_TYPE __attribute__((visibility("default"))) fragment NAME(Varyings _varyings \ + [[stage_in]]) \ + { + +#define EMIT_FRAG_DATA(VALUE) \ + return VALUE; \ + } + +#define FRAGMENT_CONTEXT_DECL \ + , float2 _fragCoord, FragmentTextures _textures, FragmentStorageBuffers _buffers +#define FRAGMENT_CONTEXT_UNPACK , _fragCoord, _textures, _buffers + +#ifdef PLS_IMPL_DEVICE_BUFFER + +#define PLS_BLOCK_BEGIN \ + struct PLS \ + { +#ifdef PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +// Apple Silicon doesn't support fragment-fragment memory barriers, so on this hardware we use +// raster order groups instead. +// Since the PLS plane indices collide with other buffer bindings, offset the binding indices of +// these buffers by DEFAULT_BINDINGS_SET_SIZE. +#define PLS_DECL4F(IDX, NAME) \ + device uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE), raster_order_group(0)]] +#define PLS_DECLUI(IDX, NAME) \ + device uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE), raster_order_group(0)]] +#define PLS_DECLUI_ATOMIC(IDX, NAME) \ + device atomic_uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE), raster_order_group(0)]] +#else +// Since the PLS plane indices collide with other buffer bindings, offset the binding indices of +// these buffers by DEFAULT_BINDINGS_SET_SIZE. +#define PLS_DECL4F(IDX, NAME) device uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE)]] +#define PLS_DECLUI(IDX, NAME) device uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE)]] +#define PLS_DECLUI_ATOMIC(IDX, NAME) \ + device atomic_uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE)]] +#endif // @PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define PLS_BLOCK_END \ + } \ + ; +#define PLS_CONTEXT_DECL , PLS _pls, uint _plsIdx +#define PLS_CONTEXT_UNPACK , _pls, _plsIdx + +#define PLS_LOAD4F(PLANE) unpackUnorm4x8(_pls.PLANE[_plsIdx]) +#define PLS_LOADUI(PLANE) _pls.PLANE[_plsIdx] +#define PLS_LOADUI_ATOMIC(PLANE) \ + atomic_load_explicit(&_pls.PLANE[_plsIdx], memory_order::memory_order_relaxed) +#define PLS_STORE4F(PLANE, VALUE) _pls.PLANE[_plsIdx] = packUnorm4x8(VALUE) +#define PLS_STOREUI(PLANE, VALUE) _pls.PLANE[_plsIdx] = (VALUE) +#define PLS_STOREUI_ATOMIC(PLANE, VALUE) \ + atomic_store_explicit(&_pls.PLANE[_plsIdx], VALUE, memory_order::memory_order_relaxed) +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define PLS_ATOMIC_MAX(PLANE, X) \ + atomic_fetch_max_explicit(&_pls.PLANE[_plsIdx], X, memory_order::memory_order_relaxed) + +#define PLS_ATOMIC_ADD(PLANE, X) \ + atomic_fetch_add_explicit(&_pls.PLANE[_plsIdx], X, memory_order::memory_order_relaxed) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#define PLS_METAL_MAIN(NAME) \ + __attribute__((visibility("default"))) fragment NAME(PLS _pls, \ + constant _EXPORTED_FlushUniforms& uniforms \ + [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers) \ + { \ + float2 _fragCoord = _varyings._pos.xy; \ + uint2 _plsCoord = uint2(metal::floor(_fragCoord)); \ + uint _plsIdx = _plsCoord.y * uniforms.renderTargetWidth + _plsCoord.x; + +#define PLS_METAL_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + __attribute__((visibility("default"))) fragment NAME( \ + PLS _pls, \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]], \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers) \ + { \ + float2 _fragCoord = _varyings._pos.xy; \ + uint2 _plsCoord = uint2(metal::floor(_fragCoord)); \ + uint _plsIdx = _plsCoord.y * uniforms.renderTargetWidth + _plsCoord.x; + +#define PLS_MAIN(NAME) void PLS_METAL_MAIN(NAME) +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) void PLS_METAL_MAIN_WITH_IMAGE_UNIFORMS(NAME) +#define EMIT_PLS } + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + half4 PLS_METAL_MAIN(NAME) \ + { \ + half4 _fragColor; + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + half4 PLS_METAL_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + { \ + half4 _fragColor; + +#define EMIT_PLS_AND_FRAG_COLOR \ + } \ + return _fragColor; \ + EMIT_PLS + +#else // Default implementation -- framebuffer reads. + +#define PLS_BLOCK_BEGIN \ + struct PLS \ + { +#define PLS_DECL4F(IDX, NAME) [[color(IDX)]] half4 NAME +#define PLS_DECLUI(IDX, NAME) [[color(IDX)]] uint NAME +#define PLS_DECLUI_ATOMIC PLS_DECLUI +#define PLS_BLOCK_END \ + } \ + ; +#define PLS_CONTEXT_DECL , thread PLS &_inpls, thread PLS &_pls +#define PLS_CONTEXT_UNPACK , _inpls, _pls + +#define PLS_LOAD4F(PLANE) _inpls.PLANE +#define PLS_LOADUI(PLANE) _inpls.PLANE +#define PLS_LOADUI_ATOMIC(PLANE) PLS_LOADUI +#define PLS_STORE4F(PLANE, VALUE) _pls.PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) _pls.PLANE = (VALUE) +#define PLS_STOREUI_ATOMIC(PLANE) PLS_STOREUI +#define PLS_PRESERVE_4F(PLANE) _pls.PLANE = _inpls.PLANE +#define PLS_PRESERVE_UI(PLANE) _pls.PLANE = _inpls.PLANE + +INLINE uint pls_atomic_max(thread uint& dst, uint x) +{ + uint originalValue = dst; + dst = metal::max(originalValue, x); + return originalValue; +} + +#define PLS_ATOMIC_MAX(PLANE, X) pls_atomic_max(_pls.PLANE, X) + +INLINE uint pls_atomic_add(thread uint& dst, uint x) +{ + uint originalValue = dst; + dst = originalValue + x; + return originalValue; +} + +#define PLS_ATOMIC_ADD(PLANE, X) pls_atomic_add(_pls.PLANE, X) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#define PLS_METAL_MAIN(NAME, ...) \ + PLS __attribute__((visibility("default"))) fragment NAME(__VA_ARGS__) \ + { \ + float2 _fragCoord [[maybe_unused]] = _varyings._pos.xy; \ + PLS _pls; + +#define PLS_MAIN(NAME, ...) \ + PLS_METAL_MAIN(NAME, \ + PLS _inpls, \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers) + +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + PLS_METAL_MAIN(NAME, \ + PLS _inpls, \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers, \ + constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]]) + +#define EMIT_PLS \ + } \ + return _pls; + +#define PLS_FRAG_COLOR_METAL_MAIN(NAME, ...) \ + struct FragmentOut \ + { \ + half4 _color [[color(0)]]; \ + PLS _pls; \ + }; \ + FragmentOut __attribute__((visibility("default"))) fragment NAME(__VA_ARGS__) \ + { \ + float2 _fragCoord [[maybe_unused]] = _varyings._pos.xy; \ + half4 _fragColor; \ + PLS _pls; + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + PLS_FRAG_COLOR_METAL_MAIN(NAME, \ + PLS _inpls, \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers) + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + PLS_FRAG_COLOR_METAL_MAIN(NAME, \ + PLS _inpls, \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers, \ + __VA_ARGS__ constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]]) + +#define EMIT_PLS_AND_FRAG_COLOR \ + } \ + return {._color = _fragColor, ._pls = _pls}; + +#endif // PLS_IMPL_DEVICE_BUFFER + +#define discard discard_fragment() + +using namespace metal; + +template INLINE vec floatBitsToUint(vec x) +{ + return as_type>(x); +} + +template INLINE vec floatBitsToInt(vec x) +{ + return as_type>(x); +} + +INLINE uint floatBitsToUint(float x) { return as_type(x); } + +INLINE int floatBitsToInt(float x) { return as_type(x); } + +template INLINE vec uintBitsToFloat(vec x) +{ + return as_type>(x); +} + +INLINE float uintBitsToFloat(uint x) { return as_type(x); } +INLINE half2 unpackHalf2x16(uint x) { return as_type(x); } +INLINE uint packHalf2x16(half2 x) { return as_type(x); } +INLINE half4 unpackUnorm4x8(uint x) { return unpack_unorm4x8_to_half(x); } +INLINE uint packUnorm4x8(half4 x) { return pack_half_to_unorm4x8(x); } + +INLINE float2x2 inverse(float2x2 m) +{ + float2x2 m_ = float2x2(m[1][1], -m[0][1], -m[1][0], m[0][0]); + float det = (m_[0][0] * m[0][0]) + (m_[0][1] * m[1][0]); + return m_ * (1 / det); +} + +INLINE half3 mix(half3 a, half3 b, bool3 c) +{ + half3 result; + for (int i = 0; i < 3; ++i) + result[i] = c[i] ? b[i] : a[i]; + return result; +} diff --git a/Shaders/Private/Rive/Generated/pls_load_store_ext.minified.ush b/Shaders/Private/Rive/Generated/pls_load_store_ext.minified.ush new file mode 100644 index 00000000..356f89e2 --- /dev/null +++ b/Shaders/Private/Rive/Generated/pls_load_store_ext.minified.ush @@ -0,0 +1,98 @@ +/* + * Copyright 2022 Rive + */ + +// The EXT_shader_pixel_local_storage extension does not provide a mechanism to load, store, or +// clear pixel local storage contents. This shader performs custom load, store, and clear +// operations via fullscreen draws. + +#ifdef VERTEX +void main() +{ + // [-1, -1] .. [+1, +1] + gl_Position = + vec4(mix(vec2(-1, 1), vec2(1, -1), equal(gl_VertexID & ivec2(1, 2), ivec2(0))), 0, 1); +} +#endif + +#ifdef FRAGMENT + +#extension GL_EXT_shader_pixel_local_storage : enable +#extension GL_ARM_shader_framebuffer_fetch : enable +#extension GL_EXT_shader_framebuffer_fetch : enable + +#ifdef CLEAR_COLOR +#if __VERSION__ >= 310 +layout(binding = 0, std140) uniform ClearColor { uniform highp vec4 value; } +clearColor; +#else +uniform mediump vec4 _EXPORTED_clearColor; +#endif +#endif + +#ifdef GL_EXT_shader_pixel_local_storage + +#ifdef STORE_COLOR +__pixel_local_inEXT PLS +#else +__pixel_local_outEXT PLS +#endif +{ + layout(rgba8) mediump vec4 colorBuffer; +#ifdef ENABLE_CLIPPING + layout(r32ui) highp uint clipBuffer; +#endif + layout(rgba8) mediump vec4 scratchColorBuffer; + layout(r32ui) highp uint coverageCountBuffer; +}; + +#ifndef GL_ARM_shader_framebuffer_fetch +#ifdef LOAD_COLOR +layout(location = 0) inout mediump vec4 fragColor; +#endif +#endif + +#ifdef STORE_COLOR +layout(location = 0) out mediump vec4 fragColor; +#endif + +void main() +{ +#ifdef CLEAR_COLOR +#if __VERSION__ >= 310 + colorBuffer = clearColor.value; +#else + colorBuffer = _EXPORTED_clearColor; +#endif +#endif + +#ifdef LOAD_COLOR +#ifdef GL_ARM_shader_framebuffer_fetch + colorBuffer = gl_LastFragColorARM; +#else + colorBuffer = fragColor; +#endif +#endif + +#ifdef CLEAR_COVERAGE + coverageCountBuffer = 0u; +#endif + +#ifdef CLEAR_CLIP + clipBuffer = 0u; +#endif + +#ifdef STORE_COLOR + fragColor = colorBuffer; +#endif +} + +#else + +// This shader is being parsed by WebGPU for introspection purposes. +layout(location = 0) out mediump vec4 unused; +void main() { unused = vec4(0, 1, 0, 1); } + +#endif // GL_EXT_shader_pixel_local_storage + +#endif // FRAGMENT diff --git a/Shaders/Private/Rive/Generated/rhi.minified.ush b/Shaders/Private/Rive/Generated/rhi.minified.ush new file mode 100644 index 00000000..41f7cae4 --- /dev/null +++ b/Shaders/Private/Rive/Generated/rhi.minified.ush @@ -0,0 +1,354 @@ +/* + * Copyright 2023 Rive + */ + +// This header provides GLSL-specific #defines and declarations that enable our shaders to be +// compiled on MSL and GLSL both. + +// HLSL warns that it will unroll the loops through r,g,b values in advanced_blend.glsl, but +// unrolling these loops is exactly what we want. +#pragma warning(disable : 3550) + +// Don't warn about uninitialized variables. If we leave one uninitialized it's because we know what +// we're doing and don't want to pay the cost of initializing it. +#pragma warning(disable : 4000) + +// #define native hlsl types if their names are being rewritten. +#define _ARE_TOKEN_NAMES_PRESERVED +#ifndef _ARE_TOKEN_NAMES_PRESERVED +#define half half +#define half2 half2 +#define half3 half3 +#define half4 half4 +#define short short +#define short2 short2 +#define short3 short3 +#define short4 short4 +#define ushort ushort +#define ushort2 ushort2 +#define ushort3 ushort3 +#define ushort4 ushort4 +#define float2 float2 +#define float3 float3 +#define float4 float4 +#define bool2 bool2 +#define bool3 bool3 +#define bool4 bool4 +#define uint2 uint2 +#define uint3 uint3 +#define uint4 uint4 +#define int2 int2 +#define int3 int3 +#define int4 int4 +#define float4x2 float4x2 +#define ushort ushort +#define float2x2 float2x2 +#define half3x4 half3x4 +#endif + +typedef float3 packed_float3; + +#ifdef ENABLE_MIN_16_PRECISION + +typedef min16uint ushort; + +#else + +typedef uint ushort; + +#endif + +#define SPLAT(A, B) A##B + +#define INLINE inline +#define OUT(ARG_TYPE) out ARG_TYPE + +#define ATTR_BLOCK_BEGIN(NAME) \ + struct NAME \ + { +#define ATTR(IDX, TYPE, NAME) TYPE NAME : SPLAT(ATTRIBUTE, IDX) +#define ATTR_BLOCK_END \ + } \ + ; +#define ATTR_LOAD(T, A, N, I) +#define ATTR_UNPACK(ID, attrs, NAME, TYPE) TYPE NAME = attrs.NAME + +#define UNIFORM_BUFFER_REGISTER(IDX) register(SPLAT(b,IDX)) + +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + cbuffer NAME : UNIFORM_BUFFER_REGISTER(IDX) \ + { \ + struct \ + { + +#define UNIFORM_BLOCK_END(NAME) \ + } \ + NAME; \ + } + +#define VARYING_BLOCK_BEGIN \ + struct Varyings \ + { + +#define NO_PERSPECTIVE noperspective +#define OPTIONALLY_FLAT nointerpolation +#define FLAT nointerpolation +#define VARYING(IDX, TYPE, NAME) TYPE NAME : SPLAT(TEXCOORD,IDX) + +#define VARYING_BLOCK_END \ + float4 _pos : SV_Position; \ + } \ + ; + +#define VARYING_INIT(NAME, TYPE) TYPE NAME +#define VARYING_PACK(NAME) _varyings.NAME = NAME +#define VARYING_UNPACK(NAME, TYPE) TYPE NAME = _varyings.NAME + +#ifdef VERTEX +#define VERTEX_TEXTURE_BLOCK_BEGIN +#define VERTEX_TEXTURE_BLOCK_END +#endif + +#ifdef FRAGMENT +#define FRAG_TEXTURE_BLOCK_BEGIN +#define FRAG_TEXTURE_BLOCK_END +#endif + +#define TEXTURE_RGBA32UI(SET, IDX, NAME) uniform Texture2D NAME : register(SPLAT(t,IDX)) +#define TEXTURE_RGBA32F(SET, IDX, NAME) uniform Texture2D NAME : register(SPLAT(t,IDX)) +#define TEXTURE_RGBA8(SET, IDX, NAME) uniform Texture2D NAME : register(SPLAT(t,IDX)) + +// SAMPLER_LINEAR and SAMPLER_MIPMAP are the same because in d3d11, sampler parameters are defined +// at the API level. +#define SAMPLER(TEXTURE_IDX, NAME) SamplerState NAME : register(SPLAT(s,TEXTURE_IDX)); +#define SAMPLER_LINEAR SAMPLER +#define SAMPLER_MIPMAP SAMPLER + +#define TEXEL_FETCH(NAME, COORD) NAME[COORD] +#define TEXTURE_SAMPLE(NAME, SAMPLER_NAME, COORD) NAME.Sample(SAMPLER_NAME, COORD) +#define TEXTURE_SAMPLE_LOD(NAME, SAMPLER_NAME, COORD, LOD) \ + NAME.SampleLevel(SAMPLER_NAME, COORD, LOD) +#define TEXTURE_SAMPLE_GRAD(NAME, SAMPLER_NAME, COORD, DDX, DDY) \ + NAME.SampleGrad(SAMPLER_NAME, COORD, DDX, DDY) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#ifdef ENABLE_RASTERIZER_ORDERED_VIEWS +#define PLS_TEX2D RasterizerOrderedTexture2D +#else +#define PLS_TEX2D RWTexture2D +#endif + +#define PLS_BLOCK_BEGIN +#ifdef ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_DECL4F(IDX, NAME) uniform PLS_TEX2D NAME : register(SPLAT(u,IDX)) +#else +#define PLS_DECL4F(IDX, NAME) uniform PLS_TEX2D NAME : register(SPLAT(u,IDX)) +#endif +#define PLS_DECLUI(IDX, NAME) uniform PLS_TEX2D NAME : register(SPLAT(u,IDX)) +#define PLS_DECLUI_ATOMIC PLS_DECLUI +#define PLS_LOADUI_ATOMIC PLS_LOADUI +#define PLS_STOREUI_ATOMIC PLS_STOREUI +#define PLS_BLOCK_END + +#ifdef ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_LOAD4F(PLANE) PLANE[_plsCoord] +#else +#define PLS_LOAD4F(PLANE) unpackUnorm4x8(PLANE[_plsCoord]) +#endif +#define PLS_LOADUI(PLANE) PLANE[_plsCoord] +#ifdef ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_STORE4F(PLANE, VALUE) PLANE[_plsCoord] = (VALUE) +#else +#define PLS_STORE4F(PLANE, VALUE) PLANE[_plsCoord] = packUnorm4x8(VALUE) +#endif +#define PLS_STOREUI(PLANE, VALUE) PLANE[_plsCoord] = (VALUE) + +INLINE uint pls_atomic_max(PLS_TEX2D plane, int2 _plsCoord, uint x) +{ + uint originalValue; + InterlockedMax(plane[_plsCoord], x, originalValue); + return originalValue; +} + +#define PLS_ATOMIC_MAX(PLANE, X) pls_atomic_max(PLANE, _plsCoord, X) + +INLINE uint pls_atomic_add(PLS_TEX2D plane, int2 _plsCoord, uint x) +{ + uint originalValue; + InterlockedAdd(plane[_plsCoord], x, originalValue); + return originalValue; +} + +#define PLS_ATOMIC_ADD(PLANE, X) pls_atomic_add(PLANE, _plsCoord, X) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define VERTEX_CONTEXT_DECL +#define VERTEX_CONTEXT_UNPACK + +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + \ + uint baseInstance; \ + \ + Varyings NAME(Attrs attrs, uint _vertexID \ + : SV_VertexID, uint _instanceIDWithoutBase \ + : SV_InstanceID) \ + { \ + uint _instanceID = _instanceIDWithoutBase + baseInstance; \ + Varyings _varyings; + +#define IMAGE_RECT_VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + Varyings NAME(Attrs attrs, uint _vertexID : SV_VertexID) \ + { \ + Varyings _varyings; \ + float4 _pos; + +#define IMAGE_MESH_VERTEX_MAIN(NAME, PositionAttr, position, UVAttr, uv, _vertexID) \ + Varyings NAME(PositionAttr position, UVAttr uv, uint _vertexID : SV_VertexID) \ + { \ + Varyings _varyings; \ + float4 _pos; + +#define EMIT_VERTEX(POSITION) \ + _varyings._pos = POSITION; \ + } \ + return _varyings; + +#define FRAG_DATA_MAIN(DATA_TYPE, NAME) \ + DATA_TYPE NAME(Varyings _varyings) : SV_Target \ + { + +#define EMIT_FRAG_DATA(VALUE) \ + return VALUE; \ + } + +#define FRAGMENT_CONTEXT_DECL , float2 _fragCoord +#define FRAGMENT_CONTEXT_UNPACK , _fragCoord + +#define PLS_CONTEXT_DECL , int2 _plsCoord +#define PLS_CONTEXT_UNPACK , _plsCoord + +#define PLS_MAIN(NAME) [earlydepthstencil] void NAME(Varyings _varyings) { \ + float2 _fragCoord = _varyings._pos.xy;\ + int2 _plsCoord = int2(floor(_fragCoord)); + +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_MAIN(NAME) + +#define EMIT_PLS } + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + [earlydepthstencil] half4 NAME(Varyings _varyings) : SV_Target \ + { \ + float2 _fragCoord = _varyings._pos.xy; \ + int2 _plsCoord = int2(floor(_fragCoord)); \ + half4 _fragColor; + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_FRAG_COLOR_MAIN(NAME) + +#define EMIT_PLS_AND_FRAG_COLOR \ + } \ + return _fragColor; + +#define uintBitsToFloat asfloat +#define intBitsToFloat asfloat +#define floatBitsToInt asint +#define floatBitsToUint asuint +#define inversesqrt rsqrt +#define notEqual(A, B) ((A) != (B)) +#define lessThanEqual(A, B) ((A) <= (B)) +#define greaterThanEqual(A, B) ((A) >= (B)) + +// HLSL matrices are stored in row-major order, and therefore transposed from their counterparts +// in GLSL and Metal. We can work around this entirely by reversing the arguments to mul(). +#define MUL(A, B) mul(B, A) + +#define VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +#define VERTEX_STORAGE_BUFFER_BLOCK_END + +#define FRAG_STORAGE_BUFFER_BLOCK_BEGIN +#define FRAG_STORAGE_BUFFER_BLOCK_END + +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(SPLAT(t,IDX)) +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(SPLAT(t,IDX)) +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(SPLAT(t,IDX)) + +#define STORAGE_BUFFER_LOAD4(NAME, I) NAME[I] +#define STORAGE_BUFFER_LOAD2(NAME, I) NAME[I] + +INLINE half2 unpackHalf2x16(uint u) +{ + uint y = (u >> 16); + uint x = u & 0xffffu; + return half2(f16tof32(x), f16tof32(y)); +} + +INLINE uint packHalf2x16(float2 v) +{ + uint x = f32tof16(v.x); + uint y = f32tof16(v.y); + return (y << 16) | x; +} + +INLINE half4 unpackUnorm4x8(uint u) +{ + uint4 vals = uint4(u & 0xffu, (u >> 8) & 0xffu, (u >> 16) & 0xffu, u >> 24); + return half4(vals) * (1. / 255.); +} + +INLINE uint packUnorm4x8(half4 color) +{ + uint4 vals = (uint4(color * 255.) & 0xff) << uint4(0, 8, 16, 24); + vals.xy |= vals.zw; + vals.x |= vals.y; + return vals.x; +} + +INLINE float atan(float y, float x) { return atan2(y, x); } + +INLINE float2x2 inverse(float2x2 m) +{ + float2x2 adjoint = float2x2(m[1][1], -m[0][1], -m[1][0], m[0][0]); + return adjoint * (1. / determinant(m)); +} + +// Redirects for intrinsics that have different names in HLSL + +INLINE float mix(float x, float y, float s) { return lerp(x, y, s); } +INLINE float2 mix(float2 x, float2 y, float2 s) { return lerp(x, y, s); } +INLINE float3 mix(float3 x, float3 y, float3 s) { return lerp(x, y, s); } +INLINE float4 mix(float4 x, float4 y, float4 s) { return lerp(x, y, s); } + +INLINE float fract(float x) { return frac(x); } +INLINE float2 fract(float2 x) { return frac(x); } +INLINE float3 fract(float3 x) { return frac(x); } +INLINE float4 fract(float4 x) { return frac(x); } + +// Reimplement intrinsics for half types. +// This shadows the intrinsic function for floats, so we also have to declare that overload. + +INLINE float rive_sign(float x) { return sign(x); } +INLINE float2 rive_sign(float2 x) { return sign(x); } +INLINE float3 rive_sign(float3 x) { return sign(x); } +INLINE float4 rive_sign(float4 x) { return sign(x); } + +#define sign rive_sign + +INLINE float rive_abs(float x) { return abs(x); } +INLINE float2 rive_abs(float2 x) { return abs(x); } +INLINE float3 rive_abs(float3 x) { return abs(x); } +INLINE float4 rive_abs(float4 x) { return abs(x); } + +#define abs rive_abs + +INLINE float rive_sqrt(float x) { return sqrt(x); } +INLINE float2 rive_sqrt(float2 x) { return sqrt(x); } +INLINE float3 rive_sqrt(float3 x) { return sqrt(x); } +INLINE float4 rive_sqrt(float4 x) { return sqrt(x); } + +#define sqrt rive_sqrt diff --git a/Shaders/Private/Rive/Generated/specialization.minified.ush b/Shaders/Private/Rive/Generated/specialization.minified.ush new file mode 100644 index 00000000..c39726bf --- /dev/null +++ b/Shaders/Private/Rive/Generated/specialization.minified.ush @@ -0,0 +1,13 @@ +layout(constant_id = CLIPPING_SPECIALIZATION_IDX) const bool kEnableClipping = false; +layout(constant_id = CLIP_RECT_SPECIALIZATION_IDX) const bool kEnableClipRect = false; +layout(constant_id = ADVANCED_BLEND_SPECIALIZATION_IDX) const bool kEnableAdvancedBlend = false; +layout(constant_id = EVEN_ODD_SPECIALIZATION_IDX) const bool kEnableEvenOdd = false; +layout(constant_id = NESTED_CLIPPING_SPECIALIZATION_IDX) const bool kEnableNestedClipping = false; +layout(constant_id = HSL_BLEND_MODES_SPECIALIZATION_IDX) const bool kEnableHSLBlendModes = false; + +#define ENABLE_CLIPPING kEnableClipping +#define ENABLE_CLIP_RECT kEnableClipRect +#define ENABLE_ADVANCED_BLEND kEnableAdvancedBlend +#define ENABLE_EVEN_ODD kEnableEvenOdd +#define ENABLE_NESTED_CLIPPING kEnableNestedClipping +#define ENABLE_HSL_BLEND_MODES kEnableHSLBlendModes diff --git a/Shaders/Private/Rive/Generated/stencil_draw.minified.ush b/Shaders/Private/Rive/Generated/stencil_draw.minified.ush new file mode 100644 index 00000000..800c793d --- /dev/null +++ b/Shaders/Private/Rive/Generated/stencil_draw.minified.ush @@ -0,0 +1,30 @@ +/* + * Copyright 2024 Rive + */ + +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, packed_float3, _EXPORTED_a_triangleVertex); +ATTR_BLOCK_END + +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +VERTEX_MAIN(_EXPORTED_stencilVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(_EXPORTED_a_triangleVertex.xy); + uint zIndex = floatBitsToUint(_EXPORTED_a_triangleVertex.z) & 0xffffu; + pos.z = normalize_z_index(zIndex); + EMIT_VERTEX(pos); +} +#endif + +#ifdef FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +FRAG_TEXTURE_BLOCK_END + +FRAG_DATA_MAIN(half4, _EXPORTED_blitFragmentMain) { EMIT_FRAG_DATA(make_half4(.0)); } +#endif // FRAGMENT diff --git a/Shaders/Private/Rive/Generated/tessellate.minified.ush b/Shaders/Private/Rive/Generated/tessellate.minified.ush new file mode 100644 index 00000000..46e35447 --- /dev/null +++ b/Shaders/Private/Rive/Generated/tessellate.minified.ush @@ -0,0 +1,425 @@ +/* + * Copyright 2020 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + * + * Initial import from skia:src/gpu/ganesh/tessellate/GrStrokeTessellationShader.cpp + * + * Copyright 2022 Rive + */ + +#define MAX_PARAMETRIC_SEGMENTS_LOG2 10 // Max 1024 segments. + +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, float4, _EXPORTED_a_p0p1_); // End in '_' because D3D interprets the '1' as a semantic index. +ATTR(1, float4, _EXPORTED_a_p2p3_); +ATTR(2, float4, _EXPORTED_a_joinTan_and_ys); // [joinTangent, y, reflectionY] +#ifdef SPLIT_UINT4_ATTRIBUTES +ATTR(3, uint, _EXPORTED_a_args_a); +ATTR(4, uint, _EXPORTED_a_args_b); +ATTR(5, uint, _EXPORTED_a_args_c); +ATTR(6, uint, _EXPORTED_a_args_d); +#else +ATTR(3, uint4, _EXPORTED_a_args); // [x0x1, reflectionX0X1, segmentCounts, contourIDWithFlags] +#endif +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float4, v_p0p1); +NO_PERSPECTIVE VARYING(1, float4, v_p2p3); +NO_PERSPECTIVE VARYING(2, float4, v_args); // [vertexIdx, totalVertexCount, joinSegmentCount, + // parametricSegmentCount, radsPerPolarSegment] +NO_PERSPECTIVE VARYING(3, float3, v_joinArgs); // [joinTangent, radsPerJoinSegment] +FLAT VARYING(4, uint, v_contourIDWithFlags); +VARYING_BLOCK_END + +// Tangent of the curve at T=0 and T=1. +INLINE float2x2 find_tangents(float2 p0, float2 p1, float2 p2, float2 p3) +{ + float2x2 t; + t[0] = (any(notEqual(p0, p1)) ? p1 : any(notEqual(p1, p2)) ? p2 : p3) - p0; + t[1] = p3 - (any(notEqual(p3, p2)) ? p2 : any(notEqual(p2, p1)) ? p1 : p0); + return t; +} + +#ifdef VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +STORAGE_BUFFER_U32x4(PATH_BUFFER_IDX, PathBuffer, _EXPORTED_pathBuffer); +STORAGE_BUFFER_U32x4(CONTOUR_BUFFER_IDX, ContourBuffer, _EXPORTED_contourBuffer); +VERTEX_STORAGE_BUFFER_BLOCK_END + +float cosine_between_vectors(float2 a, float2 b) +{ + // FIXME(crbug.com/800804,skbug.com/11268): This can overflow if we don't normalize exponents. + float ab_cosTheta = dot(a, b); + float ab_pow2 = dot(a, a) * dot(b, b); + return (ab_pow2 == .0) ? 1. : clamp(ab_cosTheta * inversesqrt(ab_pow2), -1., 1.); +} + +VERTEX_MAIN(_EXPORTED_tessellateVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + // Each instance repeats twice. Once for normal patch(es) and once for reflection(s). + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_p0p1_, float4); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_p2p3_, float4); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_joinTan_and_ys, float4); + +#ifdef SPLIT_UINT4_ATTRIBUTES + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args_a, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args_b, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args_c, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args_d, uint); + + uint4 _EXPORTED_a_args = uint4(_EXPORTED_a_args_a, _EXPORTED_a_args_b, _EXPORTED_a_args_c, _EXPORTED_a_args_d); + +#else + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args, uint4); + +#endif + + VARYING_INIT(v_p0p1, float4); + VARYING_INIT(v_p2p3, float4); + VARYING_INIT(v_args, float4); + VARYING_INIT(v_joinArgs, float3); + VARYING_INIT(v_contourIDWithFlags, uint); + + float2 p0 = _EXPORTED_a_p0p1_.xy; + float2 p1 = _EXPORTED_a_p0p1_.zw; + float2 p2 = _EXPORTED_a_p2p3_.xy; + float2 p3 = _EXPORTED_a_p2p3_.zw; + // Each instance has two spans, potentially for both a forward copy and and reflection. + // (If the second span isn't needed, the client will have placed it offscreen.) + bool isFirstSpan = _vertexID < 4; + float y = isFirstSpan ? _EXPORTED_a_joinTan_and_ys.z : _EXPORTED_a_joinTan_and_ys.w; + int x0x1 = int(isFirstSpan ? _EXPORTED_a_args.x : _EXPORTED_a_args.y); +#ifdef GLSL + int x1up = x0x1 << 16; + if (_EXPORTED_a_args.z == 0xffffffffu) + { + // Pixel 8 with ARM Mali-G715 throws away "x0x1 << 16 >> 16". We need this in order to + // sign-extend the bottom 16 bits of x0x1. + // Create a branch that we know won't be taken, in order to convince the compiler not to + // throw this operation away. + // NOTE: we could use bitfieldExtract(), but it isn't available on ES 3.0. + --x1up; + } + float x0 = float(x1up >> 16); +#else + float x0 = float(x0x1 << 16 >> 16); +#endif + float x1 = float(x0x1 >> 16); + float2 coord = float2((_vertexID & 1) == 0 ? x0 : x1, (_vertexID & 2) == 0 ? y + 1. : y); + + uint parametricSegmentCount = _EXPORTED_a_args.z & 0x3ffu; + uint polarSegmentCount = (_EXPORTED_a_args.z >> 10) & 0x3ffu; + uint joinSegmentCount = _EXPORTED_a_args.z >> 20; + uint contourIDWithFlags = _EXPORTED_a_args.w; + if (x1 < x0) // Reflections are drawn right to left. + { + contourIDWithFlags |= MIRRORED_CONTOUR_CONTOUR_FLAG; + } + if ((x1 - x0) * uniforms.tessInverseViewportY < .0) + { + // Make sure we always emit clockwise triangles. Swap the top and bottom vertices. + coord.y = 2. * y + 1. - coord.y; + } + if ((contourIDWithFlags & CULL_EXCESS_TESSELLATION_SEGMENTS_CONTOUR_FLAG) != 0u) + { + // This span may have more tessellation vertices allocated to it than necessary (e.g., + // outerCurve patches all have a fixed patch size, regardless of how many segments the curve + // actually needs). Re-run Wang's formula to figure out how many segments we actually need, + // and make any excess segments degenerate by co-locating their vertices at T=0. + uint pathIDBits = + STORAGE_BUFFER_LOAD4(_EXPORTED_contourBuffer, contour_data_idx(contourIDWithFlags)).z; + float2x2 mat = + make_float2x2(uintBitsToFloat(STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, pathIDBits * 2u))); + float2 d0 = MUL(mat, -2. * p1 + p2 + p0); + + float2 d1 = MUL(mat, -2. * p2 + p3 + p1); + float m = max(dot(d0, d0), dot(d1, d1)); + float n = max(ceil(sqrt(.75 * 4. * sqrt(m))), 1.); + parametricSegmentCount = min(uint(n), parametricSegmentCount); + } + // Polar and parametric segments share the same beginning and ending vertices, so the merged + // *vertex* count is equal to the sum of polar and parametric *segment* counts. + uint totalVertexCount = parametricSegmentCount + polarSegmentCount + joinSegmentCount - 1u; + + float2x2 tangents = find_tangents(p0, p1, p2, p3); + float theta = acos(cosine_between_vectors(tangents[0], tangents[1])); + float radsPerPolarSegment = theta / float(polarSegmentCount); + // Adjust sign of radsPerPolarSegment to match the direction the curve turns. + // NOTE: Since the curve is not allowed to inflect, we can just check F'(.5) x F''(.5). + // NOTE: F'(.5) x F''(.5) has the same sign as (p2 - p0) x (p3 - p1). + float turn = determinant(float2x2(p2 - p0, p3 - p1)); + if (turn == .0) // This is the case for joins and cusps where points are co-located. + turn = determinant(tangents); + if (turn < .0) + radsPerPolarSegment = -radsPerPolarSegment; + + v_p0p1 = float4(p0, p1); + v_p2p3 = float4(p2, p3); + v_args = float4(float(totalVertexCount) - abs(x1 - coord.x), // vertexIdx + float(totalVertexCount), // totalVertexCount + (joinSegmentCount << 10) | parametricSegmentCount, + radsPerPolarSegment); + if (joinSegmentCount > 1u) + { + float2x2 joinTangents = float2x2(tangents[1], _EXPORTED_a_joinTan_and_ys.xy); + float joinTheta = acos(cosine_between_vectors(joinTangents[0], joinTangents[1])); + float joinSpan = float(joinSegmentCount); + if ((contourIDWithFlags & (JOIN_TYPE_MASK | EMULATED_STROKE_CAP_CONTOUR_FLAG)) == + EMULATED_STROKE_CAP_CONTOUR_FLAG) + { + // Round caps emulated as joins need to emit vertices at T=0 and T=1, unlike normal + // round joins. The fragment shader will handle most of this, but here we need to adjust + // radsPerJoinSegment to account for the fact that this join will be rotating around two + // more segments. + joinSpan -= 2.; + } + float radsPerJoinSegment = joinTheta / joinSpan; + if (determinant(joinTangents) < .0) + radsPerJoinSegment = -radsPerJoinSegment; + v_joinArgs.xy = _EXPORTED_a_joinTan_and_ys.xy; + v_joinArgs.z = radsPerJoinSegment; + } + v_contourIDWithFlags = contourIDWithFlags; + + float4 pos; + pos.x = coord.x * (2. / TESS_TEXTURE_WIDTH) - 1.; + pos.y = coord.y * uniforms.tessInverseViewportY - sign(uniforms.tessInverseViewportY); + pos.zw = float2(0, 1); + + VARYING_PACK(v_p0p1); + VARYING_PACK(v_p2p3); + VARYING_PACK(v_args); + VARYING_PACK(v_joinArgs); + VARYING_PACK(v_contourIDWithFlags); + EMIT_VERTEX(pos); +} +#endif + +#ifdef FRAGMENT +FRAG_DATA_MAIN(uint4, _EXPORTED_tessellateFragmentMain) +{ + VARYING_UNPACK(v_p0p1, float4); + VARYING_UNPACK(v_p2p3, float4); + VARYING_UNPACK(v_args, float4); + VARYING_UNPACK(v_joinArgs, float3); + VARYING_UNPACK(v_contourIDWithFlags, uint); + + float2 p0 = v_p0p1.xy; + float2 p1 = v_p0p1.zw; + float2 p2 = v_p2p3.xy; + float2 p3 = v_p2p3.zw; + float2x2 tangents = find_tangents(p0, p1, p2, p3); + // Colocate any padding vertices at T=0. + float vertexIdx = max(floor(v_args.x), .0); + float totalVertexCount = v_args.y; + uint joinSegmentCount_and_parametricSegmentCount = uint(v_args.z); + float parametricSegmentCount = float(joinSegmentCount_and_parametricSegmentCount & 0x3ffu); + float joinSegmentCount = float(joinSegmentCount_and_parametricSegmentCount >> 10); + float radsPerPolarSegment = v_args.w; + uint contourIDWithFlags = v_contourIDWithFlags; + + // mergedVertexID/mergedSegmentCount are relative to the sub-section of the instance this vertex + // belongs to (either the curve section that consists of merged polar and parametric segments, + // or the join section composed of just polar segments). + // + // Begin with the assumption that we belong to the curve section. + float mergedSegmentCount = totalVertexCount - joinSegmentCount; + float mergedVertexID = vertexIdx; + if (mergedVertexID <= mergedSegmentCount) + { + // We do belong to the curve section. Clear out any stroke join flags. + contourIDWithFlags &= ~JOIN_TYPE_MASK; + } + else + { + // We actually belong to the join section following the curve. Construct a point-cubic with + // rotation. + p0 = p1 = p2 = p3; + tangents = float2x2(tangents[1], v_joinArgs.xy /*joinTangent*/); + parametricSegmentCount = 1.; + mergedVertexID -= mergedSegmentCount; + mergedSegmentCount = joinSegmentCount; + if ((contourIDWithFlags & JOIN_TYPE_MASK) != 0u) + { + // Miter or bevel join vertices snap to either tangents[0] or tangents[1], and get + // adjusted in the shader that follows. + if (mergedVertexID < 2.5) // With 5 join segments, this branch will see IDs: 1, 2, 3, 4. + contourIDWithFlags |= JOIN_TANGENT_0_CONTOUR_FLAG; + if (mergedVertexID > 1.5 && mergedVertexID < 3.5) + contourIDWithFlags |= JOIN_TANGENT_INNER_CONTOUR_FLAG; + } + else if ((contourIDWithFlags & EMULATED_STROKE_CAP_CONTOUR_FLAG) != 0u) + { + // Round caps emulated as joins need to emit vertices at T=0 and T=1, unlike normal + // round joins. Preserve the same number of vertices (the CPU should have given us two + // extra, knowing that we are an emulated cap, and the vertex shader should have already + // accounted for this in radsPerJoinSegment), but adjust our stepping parameters so we + // begin at T=0 and end at T=1. + mergedSegmentCount -= 2.; + mergedVertexID--; + } + radsPerPolarSegment = v_joinArgs.z; // radsPerJoinSegment. + contourIDWithFlags |= + radsPerPolarSegment < .0 ? LEFT_JOIN_CONTOUR_FLAG : RIGHT_JOIN_CONTOUR_FLAG; + } + + float2 tessCoord; + float theta = .0; + if (mergedVertexID == .0 || mergedVertexID == mergedSegmentCount || + (contourIDWithFlags & JOIN_TYPE_MASK) != 0u) + { + // Tessellated vertices at the beginning and end of the strip use exact endpoints and + // tangents. This ensures crack-free seaming between instances. + bool isTan0 = mergedVertexID < mergedSegmentCount * .5; + tessCoord = isTan0 ? p0 : p3; + theta = atan2(isTan0 ? tangents[0] : tangents[1]); + } + else if ((contourIDWithFlags & RETROFITTED_TRIANGLE_CONTOUR_FLAG) != 0u) + { + // This cubic should actually be drawn as the single, non-AA triangle: [p0, p1, p3]. + // This is used to squeeze in more rare triangles, like "grout" triangles from self + // intersections on interior triangulation, where it wouldn't be worth it to put them in + // their own dedicated draw call. + tessCoord = p1; + } + else + { + float T, polarT; + if (parametricSegmentCount == mergedSegmentCount) + { + // There are no polar vertices. This is (probably) a fill. Vertices are spaced evenly in + // parametric space. + T = mergedVertexID / parametricSegmentCount; + polarT = .0; // Set polarT != T to ensure we calculate the parametric tangent later. + } + else + { + // Compute the location and tangent direction of the tessellated stroke vertex with the + // integral id "mergedVertexID", where mergedVertexID is the sorted-order index of + // parametric and polar vertices. Start by finding the tangent function's power basis + // coefficients. These define a tangent direction (scaled by some uniform value) as: + // + // |T^2| + // Tangent_Direction(T) = dx,dy = |A 2B C| * |T | + // |. . .| |1 | + float2 A, B, C = p1 - p0; + float2 D = p3 - p0; + float2 E = p2 - p1; + B = E - C; + A = -3. * E + D; + // FIXME(crbug.com/800804,skbug.com/11268): Consider normalizing the exponents in A,B,C + // at this point in order to prevent fp32 overflow. + + // Now find the coefficients that give a tangent direction from a parametric vertex ID: + // + // |parametricVertexID^2| + // Tangent_Direction(parametricVertexID) = dx,dy = |A B_ C_| * |parametricVertexID | + // |. . .| |1 | + // + float2 B_ = B * (parametricSegmentCount * 2.); + float2 C_ = C * (parametricSegmentCount * parametricSegmentCount); + + // Run a binary search to determine the highest parametric vertex that is located on or + // before the mergedVertexID. A merged ID is determined by the sum of complete + // parametric and polar segments behind it. i.e., find the highest parametric vertex + // where: + // + // parametricVertexID + floor(numPolarSegmentsAtParametricT) <= mergedVertexID + // + float lastParametricVertexID = .0; + float maxParametricVertexID = min(parametricSegmentCount - 1., mergedVertexID); + // FIXME(crbug.com/800804,skbug.com/11268): This normalize() can overflow. + float2 tan0norm = normalize(tangents[0]); + float negAbsRadsPerSegment = -abs(radsPerPolarSegment); + float maxRotation0 = (1. + mergedVertexID) * abs(radsPerPolarSegment); + for (int p = MAX_PARAMETRIC_SEGMENTS_LOG2 - 1; p >= 0; --p) + { + // Test the parametric vertex at lastParametricVertexID + 2^p. + float testParametricID = lastParametricVertexID + exp2(float(p)); + if (testParametricID <= maxParametricVertexID) + { + float2 testTan = testParametricID * A + B_; + testTan = testParametricID * testTan + C_; + float cosRotation = dot(normalize(testTan), tan0norm); + float maxRotation = testParametricID * negAbsRadsPerSegment + maxRotation0; + maxRotation = min(maxRotation, PI); + // Is rotation <= maxRotation? (i.e., is the number of complete polar segments + // behind testT, + testParametricID <= mergedVertexID?) + if (cosRotation >= cos(maxRotation)) + lastParametricVertexID = testParametricID; + } + } + + // Find the T value of the parametric vertex at lastParametricVertexID. + float parametricT = lastParametricVertexID / parametricSegmentCount; + + // Now that we've identified the highest parametric vertex on or before the + // mergedVertexID, the highest polar vertex is easy: + float lastPolarVertexID = mergedVertexID - lastParametricVertexID; + + // Find the angle of tan0, or the angle between tan0norm and the positive x axis. + float theta0 = acos(clamp(tan0norm.x, -1., 1.)); + theta0 = tan0norm.y >= .0 ? theta0 : -theta0; + + // Find the tangent vector on the vertex at lastPolarVertexID. + theta = lastPolarVertexID * radsPerPolarSegment + theta0; + float2 norm = float2(sin(theta), -cos(theta)); + + // Find the T value where the tangent is orthogonal to norm. This is a quadratic: + // + // dot(norm, Tangent_Direction(T)) == 0 + // + // |T^2| + // norm * |A 2B C| * |T | == 0 + // |. . .| |1 | + // + float a = dot(norm, A), b_over_2 = dot(norm, B), c = dot(norm, C); + float discr_over_4 = max(b_over_2 * b_over_2 - a * c, .0); + float q = sqrt(discr_over_4); + if (b_over_2 > .0) + q = -q; + q -= b_over_2; + + // Roots are q/a and c/q. Since each curve section does not inflect or rotate more than + // 180 degrees, there can only be one tangent orthogonal to "norm" inside 0..1. Pick the + // root nearest .5. + float _5qa = -.5 * q * a; + float2 root = (abs(q * q + _5qa) < abs(a * c + _5qa)) ? float2(q, a) : float2(c, q); + polarT = (root.y != .0) ? root.x / root.y : .0; + polarT = clamp(polarT, .0, 1.); + + // The root finder above can become unstable when lastPolarVertexID == 0 (e.g., if there + // are roots at exatly 0 and 1 both). polarT should always == 0 in this case. + if (lastPolarVertexID == .0) + polarT = .0; + + // Now that we've identified the T values of the last parametric and polar vertices, our + // final T value for mergedVertexID is whichever is larger. + T = max(parametricT, polarT); + } + + // Evaluate the cubic at T. Use De Casteljau's for its accuracy and stability. + float2 ab = unchecked_mix(p0, p1, T); + float2 bc = unchecked_mix(p1, p2, T); + float2 cd = unchecked_mix(p2, p3, T); + float2 abc = unchecked_mix(ab, bc, T); + float2 bcd = unchecked_mix(bc, cd, T); + tessCoord = unchecked_mix(abc, bcd, T); + + // If we went with T=parametricT, then update theta. Otherwise leave it at the polar theta + // found previously. (In the event that parametricT == polarT, we keep the polar theta.) + if (T != polarT) + theta = atan2(bcd - abc); + } + + EMIT_FRAG_DATA(uint4(floatBitsToUint(float3(tessCoord, theta)), contourIDWithFlags)); +} +#endif diff --git a/Shaders/Private/Rive/atomic_base.ush b/Shaders/Private/Rive/atomic_base.ush new file mode 100644 index 00000000..99554482 --- /dev/null +++ b/Shaders/Private/Rive/atomic_base.ush @@ -0,0 +1,13 @@ +#pragma once +#define USING_PLS_STORAGE_TEXTURES 1 +#define ENABLE_TYPED_UAV_LOAD_STORE 1 +#define USE_GENERATED_UNIFORMS 1 +#define OPTIONALLY_FLAT flat + +#include "parse_environment.ush" +#include "Generated/rhi.minified.ush" +#include "Generated/constants.minified.ush" +#include "Generated/common.minified.ush" +#include "Generated/advanced_blend.minified.ush" +#include "Generated/draw_path_common.minified.ush" +#include "Generated/atomic_draw.minified.ush" diff --git a/Shaders/Private/Rive/atomic_draw_image_mesh.usf b/Shaders/Private/Rive/atomic_draw_image_mesh.usf new file mode 100644 index 00000000..8eb85eda --- /dev/null +++ b/Shaders/Private/Rive/atomic_draw_image_mesh.usf @@ -0,0 +1,4 @@ +#define DRAW_IMAGE +#define DRAW_IMAGE_MESH +#include "/Engine/Public/Platform.ush" +#include "atomic_base.ush" diff --git a/Shaders/Private/Rive/atomic_draw_image_rect.usf b/Shaders/Private/Rive/atomic_draw_image_rect.usf new file mode 100644 index 00000000..36fb1673 --- /dev/null +++ b/Shaders/Private/Rive/atomic_draw_image_rect.usf @@ -0,0 +1,4 @@ +#define DRAW_IMAGE +#define DRAW_IMAGE_RECT +#include "/Engine/Public/Platform.ush" +#include "atomic_base.ush" diff --git a/Shaders/Private/Rive/atomic_draw_interior_triangles.usf b/Shaders/Private/Rive/atomic_draw_interior_triangles.usf new file mode 100644 index 00000000..c883d718 --- /dev/null +++ b/Shaders/Private/Rive/atomic_draw_interior_triangles.usf @@ -0,0 +1,3 @@ +#define DRAW_INTERIOR_TRIANGLES +#include "/Engine/Public/Platform.ush" +#include "atomic_base.ush" diff --git a/Shaders/Private/Rive/atomic_draw_path.usf b/Shaders/Private/Rive/atomic_draw_path.usf new file mode 100644 index 00000000..b0c74527 --- /dev/null +++ b/Shaders/Private/Rive/atomic_draw_path.usf @@ -0,0 +1,3 @@ +#define DRAW_PATH +#include "/Engine/Public/Platform.ush" +#include "atomic_base.ush" diff --git a/Shaders/Private/Rive/atomic_resolve_pls.usf b/Shaders/Private/Rive/atomic_resolve_pls.usf new file mode 100644 index 00000000..62da1f34 --- /dev/null +++ b/Shaders/Private/Rive/atomic_resolve_pls.usf @@ -0,0 +1,4 @@ +#define DRAW_RENDER_TARGET_UPDATE_BOUNDS 1 +#define RESOLVE_PLS 1 +#include "/Engine/Public/Platform.ush" +#include "atomic_base.ush" \ No newline at end of file diff --git a/Shaders/Private/Rive/color_ramp.usf b/Shaders/Private/Rive/color_ramp.usf new file mode 100644 index 00000000..e5a23a50 --- /dev/null +++ b/Shaders/Private/Rive/color_ramp.usf @@ -0,0 +1,9 @@ +#include "/Engine/Public/Platform.ush" +#define USE_GENERATED_UNIFORMS 1 +#define SPLIT_UINT4_ATTRIBUTES 1 + +#include "parse_environment.ush" +#include "Generated/rhi.minified.ush" +#include "Generated/constants.minified.ush" +#include "Generated/common.minified.ush" +#include "Generated/color_ramp.minified.ush" diff --git a/Shaders/Private/Rive/draw_image_mesh.usf b/Shaders/Private/Rive/draw_image_mesh.usf new file mode 100644 index 00000000..9664522a --- /dev/null +++ b/Shaders/Private/Rive/draw_image_mesh.usf @@ -0,0 +1,14 @@ +#define PLS_IMPL_SUBPASS_LOAD +#define OPTIONALLY_FLAT flat +#define DRAW_IMAGE +#define DRAW_IMAGE_MESH + +#include "/Engine/Public/Platform.ush" + +#include "parse_environment.ush" +#include "Generated/rhi.minified.ush" +#include "Generated/constants.minified.ush" +#include "Generated/specialization.minified.ush" +#include "Generated/common.minified.ush" +#include "Generated/advanced_blend.minified.ush" +#include "Generated/draw_image_mesh.minified.ush" diff --git a/Shaders/Private/Rive/draw_interior_triangles.usf b/Shaders/Private/Rive/draw_interior_triangles.usf new file mode 100644 index 00000000..32b25f1d --- /dev/null +++ b/Shaders/Private/Rive/draw_interior_triangles.usf @@ -0,0 +1,13 @@ +#define PLS_IMPL_SUBPASS_LOAD +#define OPTIONALLY_FLAT flat +#define DRAW_INTERIOR_TRIANGLES + +#include "/Engine/Public/Platform.ush" + +#include "parse_environment.ush" +#include "Generated/rhi.minified.ush" +#include "Generated/constants.minified.ush" +#include "Generated/common.minified.ush" +#include "Generated/draw_path_common.minified.ush" +#include "Generated/advanced_blend.minified.ush" +#include "Generated/draw_path.minified.ush" diff --git a/Shaders/Private/Rive/draw_path.usf b/Shaders/Private/Rive/draw_path.usf new file mode 100644 index 00000000..45e7ffb2 --- /dev/null +++ b/Shaders/Private/Rive/draw_path.usf @@ -0,0 +1,14 @@ +#define ENABLE_INSTANCE_INDEX +#define PLS_IMPL_SUBPASS_LOAD +#define OPTIONALLY_FLAT flat +#define DRAW_PATH + +#include "/Engine/Public/Platform.ush" + +#include "parse_environment.ush" +#include "Generated/rhi.minified.ush" +#include "Generated/constants.minified.ush" +#include "Generated/common.minified.ush" +#include "Generated/draw_path_common.minified.ush" +#include "Generated/advanced_blend.minified.ush" +#include "Generated/draw_path.minified.ush" diff --git a/Shaders/Private/Rive/parse_environment.ush b/Shaders/Private/Rive/parse_environment.ush new file mode 100644 index 00000000..349a5729 --- /dev/null +++ b/Shaders/Private/Rive/parse_environment.ush @@ -0,0 +1,29 @@ +// unreal rhi always defines the permutation values, however, we expect them to either exist or not. so +// here we check if its set to false and undef if it is +#if !ENABLE_CLIPPING +#undef ENABLE_CLIPPING +#endif + +#if !ENABLE_CLIP_RECT +#undef ENABLE_CLIP_RECT +#endif + +#if !ENABLE_ADVANCED_BLEND +#undef ENABLE_ADVANCED_BLEND +#endif + +#if !FIXED_FUNCTION_COLOR_BLEND +#undef FIXED_FUNCTION_COLOR_BLEND +#endif + +#if !ENABLE_HSL_BLEND_MODES +#undef ENABLE_HSL_BLEND_MODES +#endif + +#if !ENABLE_NESTED_CLIPPING +#undef ENABLE_NESTED_CLIPPING +#endif + +#if !ENABLE_EVEN_ODD +#undef ENABLE_EVEN_ODD +#endif diff --git a/Shaders/Private/Rive/tessellate.usf b/Shaders/Private/Rive/tessellate.usf new file mode 100644 index 00000000..e88f8f07 --- /dev/null +++ b/Shaders/Private/Rive/tessellate.usf @@ -0,0 +1,9 @@ +#include "/Engine/Public/Platform.ush" +#define USE_GENERATED_UNIFORMS 1 +#define SPLIT_UINT4_ATTRIBUTES 1 + +#include "parse_environment.ush" +#include "Generated/rhi.minified.ush" +#include "Generated/constants.minified.ush" +#include "Generated/common.minified.ush" +#include "Generated/tessellate.minified.ush" diff --git a/Shaders/Private/Rive/test.usf b/Shaders/Private/Rive/test.usf new file mode 100644 index 00000000..867b9b0c --- /dev/null +++ b/Shaders/Private/Rive/test.usf @@ -0,0 +1,11 @@ +#include "/Engine/Public/Platform.ush" + +void TestVertex(uint _vertexID : SV_VertexID, out float4 out_position : SV_Position) +{ + out_position = float4(((_vertexID & 1) == 0) ? -1 : 1, ((_vertexID & 2) == 0) ? -1 : 1, 0.0, 1.0); +} + +void TestFragment(out float4 out_color : SV_Target0) +{ + out_color = float4(1.0, 1.0, 1.0, 1.0); +} \ No newline at end of file diff --git a/Source/Rive/Private/Game/RiveActorComponent.cpp b/Source/Rive/Private/Game/RiveActorComponent.cpp index 22a473ba..123e852b 100644 --- a/Source/Rive/Private/Game/RiveActorComponent.cpp +++ b/Source/Rive/Private/Game/RiveActorComponent.cpp @@ -14,6 +14,99 @@ class FRiveStateMachine; +constexpr rive::ColorInt Cyan = 0xFF00FFFF; +constexpr rive::ColorInt Magenta = 0xFFFF00FF; +constexpr rive::ColorInt Yellow = 0xFFFFFF00; +constexpr rive::ColorInt Black = 0xFF000000; +constexpr rive::ColorInt Green = 0xFF00FF00; +constexpr rive::ColorInt Blue = 0xFF0000FF; + +void DrawDefaultTest(rive::Factory* factory, rive::Renderer* renderer, FIntPoint Size) +{ + constexpr rive::ColorInt colors[] = {Cyan, Magenta, Yellow, Cyan}; + constexpr float stops[] = {0.0, 0.33, 0.66, 1}; + auto gradientShader = factory->makeRadialGradient(120, 120, Size.X/2, colors, stops, 4); + + //Gradient Test + rive::RawPath FullRect, notFullRect; + FullRect.addRect({0,0,static_cast(Size.X),static_cast(Size.Y)}); + notFullRect.addRect({0,0,100,static_cast(Size.Y)}); + auto FullRectRenderPath = factory->makeRenderPath(FullRect, rive::FillRule::nonZero); + auto notFullRectRenderPath = factory->makeRenderPath(notFullRect, rive::FillRule::nonZero); + + auto FullRectPaint = factory->makeRenderPaint(); + FullRectPaint->style(rive::RenderPaintStyle::fill); + //FullRectPaint->color(Magenta); + FullRectPaint->shader(gradientShader); + + auto notFullRectPaint = factory->makeRenderPaint(); + notFullRectPaint->style(rive::RenderPaintStyle::fill); + notFullRectPaint->color(Yellow); + renderer->drawPath(FullRectRenderPath.get(), FullRectPaint.get()); + renderer->drawPath(notFullRectRenderPath.get(), notFullRectPaint.get()); + + // //Shapes Test + rive::RawPath rectPath, rectPath2; + rectPath.addRect({10, 10, 70, 30}); + rectPath2.addRect({80, 10,140, 30}); + auto renderRectPath = factory->makeRenderPath(rectPath, rive::FillRule::nonZero); + auto renderRectPath2 = factory->makeRenderPath(rectPath2, rive::FillRule::nonZero); + + auto fillPaint = factory->makeRenderPaint(); + fillPaint->style(rive::RenderPaintStyle::fill); + fillPaint->color(Black); + + auto strokePaint = factory->makeRenderPaint(); + strokePaint->style(rive::RenderPaintStyle::stroke); + strokePaint->thickness(5.0f); + strokePaint->color(Black); + + renderer->drawPath(renderRectPath.get(), fillPaint.get()); + renderer->drawPath(renderRectPath2.get(), strokePaint.get()); + + strokePaint->thickness(8.0f); + rive::RawPath ovalPath; + ovalPath.addOval({150, 10, 210, 30}); + auto ovalRenderPath = factory->makeRenderPath(ovalPath, rive::FillRule::nonZero); + renderer->drawPath(ovalRenderPath.get(), strokePaint.get()); + + //Line Test + rive::RawPath linePath1; + auto linePaint1 = factory->makeRenderPaint(); + linePath1.moveTo(10, 410); + linePath1.quadTo(556, 364, 428, 420); + linePath1.quadTo(310, 492, 550, 550); + + linePaint1->style(rive::RenderPaintStyle::stroke); + linePaint1->cap(rive::StrokeCap::round); + linePaint1->color(Green); + linePaint1->thickness(10); + + auto lineRenderPath1 = factory->makeEmptyRenderPath(); + linePath1.addTo(lineRenderPath1.get()); + renderer->drawPath(lineRenderPath1.get(), linePaint1.get()); + + //Line Test2 + rive::RawPath linePath2; + auto linePaint2 = factory->makeRenderPaint(); + + linePaint2->style(rive::RenderPaintStyle::stroke); + linePaint2->cap(rive::StrokeCap::round); + linePaint2->color(Blue); + linePaint2->thickness(20); + linePaint2->join(rive::StrokeJoin::bevel); + + linePath2.moveTo(100, 600); + linePath2.lineTo(1000, 600); + linePath2.moveTo(1000, 600); + linePath2.lineTo(1000, 1000); + linePath2.moveTo(1000, 1000); + linePath2.lineTo(100, 1000); + + auto lineRenderPath2 = factory->makeRenderPath(linePath2, rive::FillRule::nonZero); + renderer->drawPath(lineRenderPath2.get(), linePaint2.get()); +} + URiveActorComponent::URiveActorComponent(): Size(500, 500) { // Set this component to be initialized when the game starts, and to be ticked every frame. You can turn these features @@ -65,6 +158,40 @@ void URiveActorComponent::Initialize() RiveRenderer->CallOrRegister_OnInitialized(IRiveRenderer::FOnRendererInitialized::FDelegate::CreateUObject(this, &URiveActorComponent::RiveReady)); } +void URiveActorComponent::RenderRiveTest() +{ + if (!RiveTexture) + { + UE_LOG(LogRive, Error, TEXT("RiveRenderTest, RiveTexture not init")); + return; + } + + if (!IRiveRendererModule::IsAvailable()) + { + UE_LOG(LogRive, Error, TEXT("RiveRenderTest, Rive Renderer Module is either missing or not loaded properly.")); + return; + } + + IRiveRenderer* RiveRenderer = IRiveRendererModule::Get().GetRenderer(); + + if (!RiveRenderer) + { + UE_LOG(LogRive, Error, TEXT("Failed to RiveRenderTest, as we do not have a valid renderer.")); + return; + } + + if (!RiveRenderer->IsInitialized()) + { + UE_LOG(LogRive, Error, TEXT("Could not RiveRenderTest, as the required Rive Renderer is not initialized.")); + return; + } + + RiveRenderTarget->RegisterRenderCommand([Size = this->Size](rive::Factory* factory, rive::Renderer* renderer) + { + DrawDefaultTest(factory, renderer, Size); + }); +} + void URiveActorComponent::ResizeRenderTarget(int32 InSizeX, int32 InSizeY) { if (!RiveTexture) @@ -277,7 +404,7 @@ void URiveActorComponent::RiveReady(IRiveRenderer* InRiveRenderer) RiveTexture = NewObject(); // Initialize Rive Render Target Only after we resize the texture RiveRenderTarget = InRiveRenderer->CreateTextureTarget_GameThread(GetFName(), RiveTexture); - RiveRenderTarget->SetClearColor(FLinearColor::Transparent); + RiveRenderTarget->SetClearColor(FLinearColor::White); RiveTexture->ResizeRenderTargets(FIntPoint(Size.X, Size.Y)); RiveRenderTarget->Initialize(); diff --git a/Source/Rive/Private/Rive/Assets/RiveImageAsset.cpp b/Source/Rive/Private/Rive/Assets/RiveImageAsset.cpp index 9e9d574b..291056a1 100644 --- a/Source/Rive/Private/Rive/Assets/RiveImageAsset.cpp +++ b/Source/Rive/Private/Rive/Assets/RiveImageAsset.cpp @@ -2,17 +2,62 @@ #include "Rive/Assets/RiveImageAsset.h" +#include "IImageWrapper.h" +#include "IImageWrapperModule.h" #include "IRiveRenderer.h" #include "IRiveRendererModule.h" #include "Logs/RiveLog.h" + #include "Engine/Texture2D.h" +#include "Engine/Texture.h" #include "TextureResource.h" +#include "rive/renderer/render_context_helper_impl.hpp" +#include "rive/renderer/rive_render_image.hpp" THIRD_PARTY_INCLUDES_START #include "rive/factory.hpp" #include "rive/renderer/render_context.hpp" THIRD_PARTY_INCLUDES_END +namespace UE::Private::RiveImageAsset +{ + void GetTextureData(UTexture2D* Texture, TArray& OutData) + { + if (!Texture) + { + UE_LOG(LogTemp, Warning, TEXT("Invalid Texture.")); + return; + } + + // Access the platform data + FTexturePlatformData* PlatformData = Texture->GetPlatformData(); + if (!PlatformData || PlatformData->Mips.Num() == 0) + { + UE_LOG(LogTemp, Warning, TEXT("No platform data available.")); + return; + } + + if (PlatformData->Mips.Num() > 1) + { + UE_LOG(LogTemp, Warning, TEXT("Can't load a texture with Mip count > 1")); + return; + } + + // Get the mip data from the first mip level + FTexture2DMipMap& Mip = PlatformData->Mips[0]; + if (const uint8* MipData = static_cast(Mip.BulkData.Lock(LOCK_READ_ONLY))) + { + // Copy data to output array + const uint32 MipSize = Mip.SizeX * Mip.SizeY * sizeof(FColor); + OutData.SetNumUninitialized(MipSize); + FMemory::Memcpy(OutData.GetData(), MipData, MipSize); + } else { + UE_LOG(LogTemp, Warning, TEXT("No texture data available.")); + } + + Mip.BulkData.Unlock(); + } +} URiveImageAsset::URiveImageAsset() { @@ -23,9 +68,23 @@ void URiveImageAsset::LoadTexture(UTexture2D* InTexture) { if (!InTexture) return; - UE_LOG(LogRive, Warning, TEXT("LoadTexture NYI")); - return; + { // Ensure we have a single mip + int32 MipCount = InTexture->GetNumMips(); + if (MipCount != 1) + { + UE_LOG(LogRive, Error, TEXT("LoadTexture: Texture being loaded needs to have 1 mip level to load. You can do this by either setting 'Mip Gen Settings' to 'NoMipMaps', or 'Mip Gen Settings' to 'FromTextureGroup' AND 'Texture Group' set to 'UI'")); + return; + } + } + { // Ensure our compression is simple RGBA + if (InTexture->CompressionSettings != TC_EditorIcon) // TC_EditorIcon is RGBA + { + UE_LOG(LogRive, Error, TEXT("LoadTexture: Texture needs to be set to have a 'CompressionSetting' of 'UserInterface2D'")); + return; + } + } + IRiveRenderer* RiveRenderer = IRiveRendererModule::Get().GetRenderer(); RiveRenderer->CallOrRegister_OnInitialized(IRiveRenderer::FOnRendererInitialized::FDelegate::CreateLambda( @@ -39,50 +98,21 @@ void URiveImageAsset::LoadTexture(UTexture2D* InTexture) if (ensure(RenderContext)) { - InTexture->SetForceMipLevelsToBeResident(30.f); - InTexture->WaitForStreaming(); - - EPixelFormat PixelFormat = InTexture->GetPixelFormat(); - if (PixelFormat != PF_R8G8B8A8) - { - UE_LOG(LogRive, Error, TEXT("Error loading Texture '%s': Rive only supports RGBA pixel formats. This texture is of format"), *InTexture->GetName()) - return; - } - - FTexture2DMipMap& Mip = InTexture->GetPlatformData()->Mips[0]; - uint8* MipData = reinterpret_cast(Mip.BulkData.Lock(LOCK_READ_ONLY)); - int32 BitmapDataSize = Mip.SizeX * Mip.SizeY * sizeof(FColor); - - TArray BitmapData; - BitmapData.AddUninitialized(BitmapDataSize); - FMemory::Memcpy(BitmapData.GetData(), MipData, BitmapDataSize); - Mip.BulkData.Unlock(); - - if (MipData == nullptr) - { - UE_LOG(LogRive, Error, TEXT("Unable to load Mip data for %s"), *InTexture->GetName()); - return; - } - - - // decodeImage() here requires encoded bytes and returns a rive::RenderImage - // it will call: - // PLSRenderContextHelperImpl::decodeImageTexture() -> - // Bitmap::decode() - // // here, Bitmap only decodes webp, jpg, png and discards otherwise - rive::rcp DecodedImage = RenderContext->decodeImage(rive::make_span(BitmapData.GetData(), BitmapDataSize)); - - // This is what we need, to make a RenderImage and supply raw bitmap bytes that aren't already encoded: - // makeImage, createImage, or any other descriptive name could be used - // rive::rcp RenderImage = PLSRenderContext->makeImage(rive::make_span(BitmapData.GetData(), BitmapDataSize)); - - if (DecodedImage == nullptr) + TArray ImageData; + UE::Private::RiveImageAsset::GetTextureData(InTexture, ImageData); + + if (ImageData.IsEmpty()) { - UE_LOG(LogRive, Error, TEXT("Could not decode image asset: %s"), *InTexture->GetName()); + UE_LOG(LogRive, Error, TEXT("LoadTexture: Could not get raw bitmap data from Texture.")); return; } - - NativeAsset->as()->renderImage(DecodedImage); + + TArray64 CompressedImage; + FImageView ImageView = FImageView(ImageData.GetData(), InTexture->GetSizeX(), InTexture->GetSizeY(), ERawImageFormat::BGRA8); + IImageWrapperModule& ImageWrapperModule = FModuleManager::LoadModuleChecked(FName("ImageWrapper")); + ImageWrapperModule.CompressImage(CompressedImage, EImageFormat::PNG, ImageView, 100); + rive::rcp RenderImage = RenderContext->decodeImage(rive::make_span(CompressedImage.GetData(), CompressedImage.Num())); + NativeAsset->as()->renderImage(RenderImage); } } )); @@ -101,17 +131,17 @@ void URiveImageAsset::LoadImageBytes(const TArray& InBytes) FScopeLock Lock(&RiveRenderer->GetThreadDataCS()); RenderContext = RiveRenderer->GetRenderContext(); } - + if (ensure(RenderContext)) { auto DecodedImage = RenderContext->decodeImage(rive::make_span(InBytes.GetData(), InBytes.Num())); - + if (DecodedImage == nullptr) { UE_LOG(LogRive, Error, TEXT("LoadImageBytes: Could not decode image bytes")); return; } - + rive::ImageAsset* ImageAsset = NativeAsset->as(); ImageAsset->renderImage(DecodedImage); } diff --git a/Source/Rive/Private/Rive/RiveTexture.cpp b/Source/Rive/Private/Rive/RiveTexture.cpp index 741b8304..e374e1eb 100644 --- a/Source/Rive/Private/Rive/RiveTexture.cpp +++ b/Source/Rive/Private/Rive/RiveTexture.cpp @@ -133,8 +133,7 @@ void URiveTexture::InitializeResources() const FRHITextureCreateDesc RenderTargetTextureDesc = FRHITextureCreateDesc::Create2D(*DebugName, Size.X, Size.Y, Format) - .SetClearValue(FClearValueBinding(FLinearColor(0.0f, 0.0f, 0.0f))) - .SetFlags(ETextureCreateFlags::Dynamic | ETextureCreateFlags::ShaderResource | ETextureCreateFlags::RenderTargetable); + .SetFlags(ETextureCreateFlags::UAV | ETextureCreateFlags::Dynamic | ETextureCreateFlags::ShaderResource | ETextureCreateFlags::RenderTargetable | ETextureCreateFlags::SRGB); #if !(PLATFORM_IOS || PLATFORM_MAC) //SRGB could hvae been manually overriden if (SRGB) diff --git a/Source/Rive/Private/Rive/RiveTextureObject.cpp b/Source/Rive/Private/Rive/RiveTextureObject.cpp index 003a8343..d728e2a3 100644 --- a/Source/Rive/Private/Rive/RiveTextureObject.cpp +++ b/Source/Rive/Private/Rive/RiveTextureObject.cpp @@ -157,54 +157,18 @@ void URiveTextureObject::Initialize(const FRiveDescriptor& InRiveDescriptor) return; } - RiveRenderer->CallOrRegister_OnInitialized(IRiveRenderer::FOnRendererInitialized::FDelegate::CreateUObject(this, &URiveTextureObject::RiveReady)); + RiveRenderer->CallOrRegister_OnInitialized(IRiveRenderer::FOnRendererInitialized::FDelegate::CreateUObject(this, &URiveTextureObject::OnRiveRendererInitialized)); } -void URiveTextureObject::RiveReady(IRiveRenderer* InRiveRenderer) +void URiveTextureObject::OnRiveRendererInitialized(IRiveRenderer* InRiveRenderer) { - if (Artboard == nullptr) - Artboard = NewObject(this); - else - Artboard->Reinitialize(true); - - RiveRenderTarget.Reset(); - RiveRenderTarget = InRiveRenderer->CreateTextureTarget_GameThread(GetFName(), this); - - if (!OnResourceInitializedOnRenderThread.IsBoundToObject(this)) - { - OnResourceInitializedOnRenderThread.AddUObject(this, &URiveTextureObject::OnResourceInitialized_RenderThread); - } - - RiveRenderTarget->SetClearColor(ClearColor); - - if (RiveDescriptor.ArtboardName.IsEmpty()) - { - Artboard->Initialize(RiveDescriptor.RiveFile, RiveRenderTarget, RiveDescriptor.ArtboardIndex, RiveDescriptor.StateMachineName); - } - else - { - Artboard->Initialize(RiveDescriptor.RiveFile, RiveRenderTarget, RiveDescriptor.ArtboardName, RiveDescriptor.StateMachineName); - } - - RiveDescriptor.ArtboardName = Artboard->GetArtboardName(); - RiveDescriptor.StateMachineName = Artboard->StateMachineName; - - if (Size == FIntPoint::ZeroValue) + if (!RiveDescriptor.RiveFile->IsInitialized()) { - ResizeRenderTargets(Artboard->GetSize()); + RiveDescriptor.RiveFile->OnInitializedDelegate.AddUObject(this, &URiveTextureObject::OnRiveFileInitialized); } else { - ResizeRenderTargets(Size); + OnRiveFileInitialized(true); } - - InitializeAudioEngine(); - - Artboard->OnArtboardTick_Render.BindDynamic(this, &URiveTextureObject::OnArtboardTickRender); - Artboard->OnGetLocalCoordinate.BindDynamic(this, &URiveTextureObject::GetLocalCoordinate); - - RiveRenderTarget->Initialize(); - bIsRendering = true; - OnRiveReady.Broadcast(); } void URiveTextureObject::OnResourceInitialized_RenderThread(FRHICommandListImmediate& RHICmdList, FTextureRHIRef& NewResource) const @@ -216,6 +180,63 @@ void URiveTextureObject::OnResourceInitialized_RenderThread(FRHICommandListImmed } } +void URiveTextureObject::OnRiveFileInitialized(bool bSuccess) +{ + if (!bSuccess) + { + UE_LOG(LogRive, Error, TEXT("RiveTextureObject: RiveFile was not successfully initialized.")); + return; + } + + IRiveRenderer* RiveRenderer = IRiveRendererModule::Get().GetRenderer(); + if (ensure(RiveRenderer)) + { + if (Artboard == nullptr) + Artboard = NewObject(this); + else + Artboard->Reinitialize(true); + + RiveRenderTarget.Reset(); + RiveRenderTarget = RiveRenderer->CreateTextureTarget_GameThread(GetFName(), this); + + if (!OnResourceInitializedOnRenderThread.IsBoundToObject(this)) + { + OnResourceInitializedOnRenderThread.AddUObject(this, &URiveTextureObject::OnResourceInitialized_RenderThread); + } + + RiveRenderTarget->SetClearColor(ClearColor); + + if (RiveDescriptor.ArtboardName.IsEmpty()) + { + Artboard->Initialize(RiveDescriptor.RiveFile, RiveRenderTarget, RiveDescriptor.ArtboardIndex, RiveDescriptor.StateMachineName); + } + else + { + Artboard->Initialize(RiveDescriptor.RiveFile, RiveRenderTarget, RiveDescriptor.ArtboardName, RiveDescriptor.StateMachineName); + } + + RiveDescriptor.ArtboardName = Artboard->GetArtboardName(); + RiveDescriptor.StateMachineName = Artboard->StateMachineName; + + if (Size == FIntPoint::ZeroValue) + { + ResizeRenderTargets(Artboard->GetSize()); + } else + { + ResizeRenderTargets(Size); + } + + InitializeAudioEngine(); + + Artboard->OnArtboardTick_Render.BindDynamic(this, &URiveTextureObject::OnArtboardTickRender); + Artboard->OnGetLocalCoordinate.BindDynamic(this, &URiveTextureObject::GetLocalCoordinate); + + RiveRenderTarget->Initialize(); + bIsRendering = true; + OnRiveReady.Broadcast(); + } +} + #if WITH_EDITOR void URiveTextureObject::PostEditChangeChainProperty(FPropertyChangedChainEvent& PropertyChangedEvent) { diff --git a/Source/Rive/Private/Rive/RiveTextureResource.cpp b/Source/Rive/Private/Rive/RiveTextureResource.cpp index e21657b9..cc4e3c3e 100644 --- a/Source/Rive/Private/Rive/RiveTextureResource.cpp +++ b/Source/Rive/Private/Rive/RiveTextureResource.cpp @@ -11,10 +11,9 @@ #include "RenderUtils.h" #include "Rive/RiveTexture.h" -FRiveTextureResource::FRiveTextureResource(URiveTexture* Owner) -{ - RiveTexture = Owner; -} +FRiveTextureResource::FRiveTextureResource(URiveTexture* Owner): +RiveTexture(Owner) +{ } void FRiveTextureResource::InitRHI(FRHICommandListBase& RHICmdList) { diff --git a/Source/Rive/Private/RiveModule.cpp b/Source/Rive/Private/RiveModule.cpp index 31945819..6a05dad7 100644 --- a/Source/Rive/Private/RiveModule.cpp +++ b/Source/Rive/Private/RiveModule.cpp @@ -18,11 +18,16 @@ THIRD_PARTY_INCLUDES_END void FRiveModule::StartupModule() { + // for some reason the shader file path is not loading correctly. + FString PluginShaderDir = FPaths::Combine(IPluginManager::Get().FindPlugin(TEXT("Rive"))->GetBaseDir(), TEXT("Shaders")); + AddShaderSourceDirectoryMapping(TEXT("/Plugin/Rive"), PluginShaderDir); + TestRiveIntegration(); } void FRiveModule::ShutdownModule() { + ResetAllShaderSourceDirectoryMappings(); } void FRiveModule::TestRiveIntegration() diff --git a/Source/Rive/Public/Game/RiveActorComponent.h b/Source/Rive/Public/Game/RiveActorComponent.h index 4eb9a0a5..6e7e0b80 100644 --- a/Source/Rive/Public/Game/RiveActorComponent.h +++ b/Source/Rive/Public/Game/RiveActorComponent.h @@ -49,6 +49,10 @@ class RIVE_API URiveActorComponent : public UActorComponent UPROPERTY(BlueprintAssignable, Category = Rive) FRiveReadyDelegate OnRiveReady; + + // Render a test example for rive renderer + UFUNCTION(BlueprintCallable, Category= Rive) + void RenderRiveTest(); UFUNCTION(BlueprintCallable, Category = Rive) void ResizeRenderTarget(int32 InSizeX, int32 InSizeY); diff --git a/Source/Rive/Public/Rive/RiveTextureObject.h b/Source/Rive/Public/Rive/RiveTextureObject.h index ad362759..2c84ca89 100644 --- a/Source/Rive/Public/Rive/RiveTextureObject.h +++ b/Source/Rive/Public/Rive/RiveTextureObject.h @@ -109,9 +109,9 @@ class RIVE_API URiveTextureObject : public URiveTexture, public FTickableGameObj protected: - void RiveReady(IRiveRenderer* InRiveRenderer); + void OnRiveRendererInitialized(IRiveRenderer* InRiveRenderer); void OnResourceInitialized_RenderThread(FRHICommandListImmediate& RHICmdList, FTextureRHIRef& NewResource) const; - + void OnRiveFileInitialized(bool bSuccess); public: UPROPERTY(EditAnywhere, Transient, Category = Rive) bool bIsRendering = false; diff --git a/Source/Rive/Rive.Build.cs b/Source/Rive/Rive.Build.cs index 96906a68..0827c972 100644 --- a/Source/Rive/Rive.Build.cs +++ b/Source/Rive/Rive.Build.cs @@ -35,7 +35,7 @@ public Rive(ReadOnlyTargetRules Target) : base(Target) "RenderCore", "RiveLibrary", "RiveRenderer", - "Engine", + "Engine" // ... add other public dependencies that you statically link with here ... } ); @@ -55,8 +55,6 @@ public Rive(ReadOnlyTargetRules Target) : base(Target) "RiveLibrary", "RiveRenderer", "Slate", - "Slate", - "SlateCore", "SlateCore", "UMG" } diff --git a/Source/RiveEditor/Private/RiveEditorModule.cpp b/Source/RiveEditor/Private/RiveEditorModule.cpp index ae0aa005..17ab45da 100644 --- a/Source/RiveEditor/Private/RiveEditorModule.cpp +++ b/Source/RiveEditor/Private/RiveEditorModule.cpp @@ -5,9 +5,11 @@ #include "AssetToolsModule.h" #include "IRiveRendererModule.h" #include "ISettingsEditorModule.h" +#include "ISettingsModule.h" #include "RiveFileDetailCustomization.h" #include "RiveFileAssetTypeActions.h" #include "RiveFileThumbnailRenderer.h" +#include "RiveRendererSettings.h" #include "RiveTextureObjectAssetTypeActions.h" #include "RiveTextureObjectThumbnailRenderer.h" #include "Framework/Notifications/NotificationManager.h" @@ -42,6 +44,20 @@ void FRiveEditorModule::StartupModule() CheckCurrentRHIAndNotify(); FCoreDelegates::OnBeginFrame.Remove(OnBeginFrameHandle); }); + + // Register settings + if (ISettingsModule* SettingsModule = FModuleManager::GetModulePtr("Settings")) + { + SettingsModule->RegisterSettings("Project", "Plugins", "Rive", + LOCTEXT("RiveRendererSettingsName", "Rive"), + LOCTEXT("RiveRendererDescription", "Configure Rive settings"), + GetMutableDefault()); + } + + GetMutableDefault()->OnSettingChanged().AddLambda([this](UObject*, struct FPropertyChangedEvent&) + { + FUnrealEdMisc::Get().RestartEditor(); + }); } void FRiveEditorModule::ShutdownModule() @@ -75,6 +91,11 @@ void FRiveEditorModule::ShutdownModule() } } CreatedAssetTypeActions.Empty(); + + if (ISettingsModule* SettingsModule = FModuleManager::GetModulePtr("Settings")) + { + SettingsModule->UnregisterSettings("Project", "Plugins", "Rive"); + } } bool FRiveEditorModule::CheckCurrentRHIAndNotify() diff --git a/Source/RiveEditor/RiveEditor.Build.cs b/Source/RiveEditor/RiveEditor.Build.cs index 04190723..a0e75aed 100644 --- a/Source/RiveEditor/RiveEditor.Build.cs +++ b/Source/RiveEditor/RiveEditor.Build.cs @@ -33,7 +33,10 @@ public RiveEditor(ReadOnlyTargetRules Target) : base(Target) "UMG", "UMGEditor", "SettingsEditor", - "EditorStyle" + "EditorStyle", + "UnrealEd", + "DeveloperSettings", + "RiveRenderer" } ); diff --git a/Source/RiveRenderer/Private/Platform/RiveRenderTargetRHI.cpp b/Source/RiveRenderer/Private/Platform/RiveRenderTargetRHI.cpp new file mode 100644 index 00000000..26fc07c8 --- /dev/null +++ b/Source/RiveRenderer/Private/Platform/RiveRenderTargetRHI.cpp @@ -0,0 +1,71 @@ +#include "RiveRenderTargetRHI.h" +#include "RiveRendererRHI.h" +#include "Engine/Texture2DDynamic.h" +THIRD_PARTY_INCLUDES_START +#include "rive/renderer/render_target.hpp" +THIRD_PARTY_INCLUDES_END + +FRiveRenderTargetRHI::FRiveRenderTargetRHI(const TSharedRef& InRiveRenderer, const FName& InRiveName, UTexture2DDynamic* InRenderTarget) : +FRiveRenderTarget(InRiveRenderer, InRiveName, InRenderTarget), RiveRenderer(InRiveRenderer) +{ +} + +FRiveRenderTargetRHI::~FRiveRenderTargetRHI() +{ + +} + +DECLARE_GPU_STAT_NAMED(CacheTextureTargetRHI, TEXT("FRiveRenderTargetD3D11::CacheTextureTarget_RenderThread")); +void FRiveRenderTargetRHI::CacheTextureTarget_RenderThread( + FRHICommandListImmediate& RHICmdList, + const FTexture2DRHIRef& InTexture) +{ + check(IsInRenderingThread()); + FScopeLock Lock(&RiveRenderer->GetThreadDataCS()); + +#if WITH_RIVE + rive::gpu::RenderContext* PLSRenderContext = RiveRenderer->GetRenderContext(); + if (PLSRenderContext == nullptr) + { + return; + } +#endif // WITH_RIVE + + if(!InTexture.IsValid()) + { + return; + } + + EPixelFormat PixelFormat = InTexture->GetFormat(); + + if (PixelFormat != PF_R8G8B8A8) + { + return; + } + + SCOPED_GPU_STAT(RHICmdList, CacheTextureTargetRHI); +#if WITH_RIVE + + if(CachedRenderTarget) + { + CachedRenderTarget.reset(); + } + + RenderContextRHIImpl* const PLSRenderContextImpl = PLSRenderContext->static_impl_cast(); + CachedRenderTarget = PLSRenderContextImpl->makeRenderTarget(RHICmdList, InTexture); + +#endif +} + +void FRiveRenderTargetRHI::Render_RenderThread( + FRHICommandListImmediate& RHICmdList, + const TArray& RiveRenderCommands) +{ + FRiveRenderTarget::Render_Internal(RiveRenderCommands); +} + +rive::rcp FRiveRenderTargetRHI:: +GetRenderTarget() const +{ + return CachedRenderTarget; +} \ No newline at end of file diff --git a/Source/RiveRenderer/Private/Platform/RiveRenderTargetRHI.h b/Source/RiveRenderer/Private/Platform/RiveRenderTargetRHI.h new file mode 100644 index 00000000..f2aeae7a --- /dev/null +++ b/Source/RiveRenderer/Private/Platform/RiveRenderTargetRHI.h @@ -0,0 +1,28 @@ +#pragma once +#include "pls_render_context_rhi_impl.hpp" +#include "RiveRenderTarget.h" + +class FRiveRendererRHI; + +class FRiveRenderTargetRHI final: public FRiveRenderTarget +{ +public: + FRiveRenderTargetRHI(const TSharedRef& InRiveRenderer, const FName& InRiveName, UTexture2DDynamic* InRenderTarget); + virtual ~FRiveRenderTargetRHI() override; + + //~ BEGIN : IRiveRenderTarget Interface + virtual void CacheTextureTarget_RenderThread(FRHICommandListImmediate& RHICmdList, const FTexture2DRHIRef& InRHIResource) override; + //~ END : IRiveRenderTarget Interface + +#if WITH_RIVE + //~ BEGIN : FRiveRenderTarget Interface +protected: + // It Might need to be on rendering thread, render QUEUE is required + virtual void Render_RenderThread(FRHICommandListImmediate& RHICmdList, const TArray& RiveRenderCommands) override; + virtual rive::rcp GetRenderTarget() const override; + //~ END : FRiveRenderTarget Interface +#endif // WITH_RIVE +private: + TSharedRef RiveRenderer; + rive::rcp CachedRenderTarget; +}; diff --git a/Source/RiveRenderer/Private/Platform/RiveRendererD3D11.h b/Source/RiveRenderer/Private/Platform/RiveRendererD3D11.h index 379738bc..38d5e7c0 100644 --- a/Source/RiveRenderer/Private/Platform/RiveRendererD3D11.h +++ b/Source/RiveRenderer/Private/Platform/RiveRendererD3D11.h @@ -16,6 +16,12 @@ THIRD_PARTY_INCLUDES_END struct ID3D11DynamicRHI; +namespace rive::gpu +{ + class RenderTargetD3D; + class RenderContextD3DImpl; +} + #endif // WITH_RIVE class FRiveRendererD3D11GPUAdapter diff --git a/Source/RiveRenderer/Private/Platform/RiveRendererRHI.cpp b/Source/RiveRenderer/Private/Platform/RiveRendererRHI.cpp new file mode 100644 index 00000000..5970cb52 --- /dev/null +++ b/Source/RiveRenderer/Private/Platform/RiveRendererRHI.cpp @@ -0,0 +1,35 @@ +#include "RiveRendererRHI.h" +#include "pls_render_context_rhi_impl.hpp" +#include "RiveRenderTargetRHI.h" + +TSharedPtr FRiveRendererRHI::CreateTextureTarget_GameThread(const FName& InRiveName, UTexture2DDynamic* InRenderTarget) +{ + check(IsInGameThread()); + + FScopeLock Lock(&ThreadDataCS); + + const TSharedPtr RiveRenderTarget = MakeShared(SharedThis(this), InRiveName, InRenderTarget); + + RenderTargets.Add(InRiveName, RiveRenderTarget); + + return RiveRenderTarget; +} + +DECLARE_GPU_STAT_NAMED(CreatePLSContextRHI, TEXT("CreatePLSContext_RenderThread")); +void FRiveRendererRHI::CreateRenderContext_RenderThread(FRHICommandListImmediate& RHICmdList) +{ + check(IsInRenderingThread()); + check(GDynamicRHI); + + FScopeLock Lock(&ThreadDataCS); + + SCOPED_GPU_STAT(RHICmdList, CreatePLSContextRHI); + + if(GDynamicRHI->GetInterfaceType() == ERHIInterfaceType::Null) { + return; + } + +#if WITH_RIVE + RenderContext = RenderContextRHIImpl::MakeContext(RHICmdList); +#endif // WITH_RIVE +} \ No newline at end of file diff --git a/Source/RiveRenderer/Private/Platform/RiveRendererRHI.h b/Source/RiveRenderer/Private/Platform/RiveRendererRHI.h new file mode 100644 index 00000000..e8f156f4 --- /dev/null +++ b/Source/RiveRenderer/Private/Platform/RiveRendererRHI.h @@ -0,0 +1,13 @@ +#pragma once +#include "RiveRenderer.h" + + +class RIVERENDERER_API FRiveRendererRHI : public FRiveRenderer +{ +public: + //~ BEGIN : IRiveRenderer Interface + virtual TSharedPtr CreateTextureTarget_GameThread(const FName& InRiveName, UTexture2DDynamic* InRenderTarget) override; + virtual void CreateRenderContext_RenderThread(FRHICommandListImmediate& RHICmdList) override; + virtual void Flush(rive::gpu::RenderContext& context) {} + //~ END : IRiveRenderer Interface +}; diff --git a/Source/RiveRenderer/Private/Platform/pls_render_context_rhi_impl.cpp b/Source/RiveRenderer/Private/Platform/pls_render_context_rhi_impl.cpp new file mode 100644 index 00000000..d1937f15 --- /dev/null +++ b/Source/RiveRenderer/Private/Platform/pls_render_context_rhi_impl.cpp @@ -0,0 +1,1191 @@ +#include "pls_render_context_rhi_impl.hpp" + +#include "CommonRenderResources.h" +#include "IImageWrapperModule.h" +#include "IImageWrapper.h" +#include "RenderGraphBuilder.h" +#include "RHIResourceUpdates.h" +#include "Containers/ResourceArray.h" +#include "RHIStaticStates.h" +#include "Modules/ModuleManager.h" + +#include "RHICommandList.h" + +#include "Shaders/ShaderPipelineManager.h" + +THIRD_PARTY_INCLUDES_START +#include "rive/renderer/rive_render_image.hpp" +#include "rive/shaders/out/generated/shaders/constants.glsl.hpp" + +#include "webp/decode.h" +#include "webp/demux.h" + +THIRD_PARTY_INCLUDES_END +#include "RenderGraphUtils.h" +#include "Logs/RiveRendererLog.h" + +template +void BindShaders(FRHICommandList& CommandList, FGraphicsPipelineStateInitializer& GraphicsPSOInit, + TShaderMapRef VSShader, TShaderMapRef PSShader, FRHIVertexDeclaration* VertexDeclaration) +{ + GraphicsPSOInit.BoundShaderState.VertexDeclarationRHI = VertexDeclaration; + GraphicsPSOInit.BoundShaderState.VertexShaderRHI = VSShader.GetVertexShader(); + GraphicsPSOInit.BoundShaderState.PixelShaderRHI = PSShader.GetPixelShader(); + SetGraphicsPipelineState(CommandList, GraphicsPSOInit, 0, EApplyRendertargetOption::CheckApply, true, EPSOPrecacheResult::NotSupported); +} + +template +void SetParameters(FRHICommandList& CommandList, FRHIBatchedShaderParameters& BatchedParameters, + TShaderMapRef Shader, typename ShaderType::FParameters& VParameters) +{ + ClearUnusedGraphResources(Shader, &VParameters); + SetShaderParameters(BatchedParameters, Shader, VParameters); + CommandList.SetBatchedShaderParameters(Shader.GetVertexShader(), BatchedParameters); +} + +template +struct TStaticResourceData : public FResourceArrayInterface +{ + DataType Data[size]; +public: + TStaticResourceData() {} + + DataType* operator *() + {return Data;} + /** + * @return A pointer to the resource data. + */ + virtual const void* GetResourceData() const + {return Data;} + + /** + * @return size of resource data allocation (in bytes) + */ + virtual uint32 GetResourceDataSize() const + {return size*sizeof(DataType);}; + + /** Do nothing on discard because this is static const CPU data */ + virtual void Discard() {}; + + virtual bool IsStatic() const {return true;} + + /** + * @return true if the resource keeps a copy of its resource data after the RHI resource has been created + */ + virtual bool GetAllowCPUAccess() const + {return true;} + + /** + * Sets whether the resource array will be accessed by CPU. + */ + virtual void SetAllowCPUAccess( bool bInNeedsCPUAccess ){} +}; + +template +struct TStaticExternalResourceData : public FResourceArrayInterface +{ + const DataType (&Data)[size]; +public: + TStaticExternalResourceData(const DataType (&Data)[size]) : Data(Data) + {} + /** + * @return A pointer to the resource data. + */ + virtual const void* GetResourceData() const + {return Data;}; + + /** + * @return size of resource data allocation (in bytes) + */ + virtual uint32 GetResourceDataSize() const + {return size*sizeof(DataType);}; + + /** Do nothing on discard because this is static const CPU data */ + virtual void Discard() {}; + + virtual bool IsStatic() const {return true;} + + /** + * @return true if the resource keeps a copy of its resource data after the RHI resource has been created + */ + virtual bool GetAllowCPUAccess() const + {return true;} + + /** + * Sets whether the resource array will be accessed by CPU. + */ + virtual void SetAllowCPUAccess( bool bInNeedsCPUAccess ){} +}; + +using namespace rive; +using namespace rive::gpu; + + TStaticExternalResourceData GImageRectIndices(kImageRectIndices); + TStaticExternalResourceData GImageRectVertices(kImageRectVertices); + TStaticExternalResourceData GTessSpanIndices(kTessSpanIndices); + + TStaticResourceData GPatchVertices; + TStaticResourceData GPatchIndices; + + void GetPermutationForFeatures(const rive::gpu::ShaderFeatures features,AtomicPixelPermutationDomain& PixelPermutationDomain, AtomicVertexPermutationDomain& VertexPermutationDomain) + { + VertexPermutationDomain.Set(features & ShaderFeatures::ENABLE_CLIPPING); + VertexPermutationDomain.Set(features & ShaderFeatures::ENABLE_CLIP_RECT); + VertexPermutationDomain.Set(features & ShaderFeatures::ENABLE_ADVANCED_BLEND); + + PixelPermutationDomain.Set(features & ShaderFeatures::ENABLE_CLIPPING); + PixelPermutationDomain.Set(features & ShaderFeatures::ENABLE_CLIP_RECT); + PixelPermutationDomain.Set(features & ShaderFeatures::ENABLE_NESTED_CLIPPING); + PixelPermutationDomain.Set(features & ShaderFeatures::ENABLE_ADVANCED_BLEND); + PixelPermutationDomain.Set(!(features & ShaderFeatures::ENABLE_ADVANCED_BLEND)); + PixelPermutationDomain.Set(features & ShaderFeatures::ENABLE_EVEN_ODD); + PixelPermutationDomain.Set(features & ShaderFeatures::ENABLE_HSL_BLEND_MODES); + } + + template + FBufferRHIRef makeSimpleImmutableBuffer(FRHICommandList& RHICmdList, const TCHAR* DebugName, EBufferUsageFlags bindFlags, FResourceArrayInterface &ResourceArray) + { + const size_t size = ResourceArray.GetResourceDataSize(); + FRHIResourceCreateInfo Info(DebugName, &ResourceArray); + auto buffer = RHICmdList.CreateBuffer(size, + EBufferUsageFlags::Static | bindFlags,sizeof(DataType), + ERHIAccess::VertexOrIndexBuffer, Info); + return buffer; + } + +#define SYNC_BUFFER(buffer, command_list) if(buffer)buffer->Sync(command_list); +#define SYNC_BUFFER_WITH_OFFSET(buffer, command_list, offset)if(buffer)buffer->Sync(command_list, offset); + +BufferRingRHIImpl::BufferRingRHIImpl(EBufferUsageFlags flags, +size_t in_sizeInBytes, size_t stride) : BufferRing(in_sizeInBytes), m_flags(flags) +{ + FRHIAsyncCommandList tmpCommandList; + FRHIResourceCreateInfo Info(TEXT("BufferRingRHIImpl_")); + m_buffer = tmpCommandList->CreateBuffer(in_sizeInBytes, + /*EBufferUsageFlags::Volatile |*/ flags, stride, ERHIAccess::WriteOnlyMask, Info); +} + +void BufferRingRHIImpl::Sync(FRHICommandList& commandList) const +{ + auto buffer = commandList.LockBuffer(m_buffer, 0, capacityInBytes(), RLM_WriteOnly_NoOverwrite); + memcpy(buffer, shadowBuffer(), capacityInBytes()); + commandList.UnlockBuffer(m_buffer); +} + +FBufferRHIRef BufferRingRHIImpl::contents()const +{ + return m_buffer; +} + +void* BufferRingRHIImpl::onMapBuffer(int bufferIdx, size_t mapSizeInBytes) +{ + return shadowBuffer(); +} + +void BufferRingRHIImpl::onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) +{ +} + +StructuredBufferRingRHIImpl::StructuredBufferRingRHIImpl(EBufferUsageFlags flags, + size_t in_sizeInBytes, + size_t elementSize) : BufferRing(in_sizeInBytes), m_flags(flags), + m_elementSize(elementSize), m_lastMapSizeInBytes(in_sizeInBytes) +{ + FRHIAsyncCommandList commandList; + FRHIResourceCreateInfo Info(TEXT("BufferRingRHIImpl_")); + m_buffer = commandList->CreateStructuredBuffer(m_elementSize, capacityInBytes(), + m_flags, ERHIAccess::WriteOnlyMask, Info); + m_srv = commandList->CreateShaderResourceView(m_buffer); +} + +FBufferRHIRef StructuredBufferRingRHIImpl::contents()const +{ + return m_buffer; +} + +void* StructuredBufferRingRHIImpl::onMapBuffer(int bufferIdx, size_t mapSizeInBytes) +{ + m_lastMapSizeInBytes = mapSizeInBytes; + return shadowBuffer(); +} + +void StructuredBufferRingRHIImpl::onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) +{ +} + +FShaderResourceViewRHIRef StructuredBufferRingRHIImpl::srv() const +{ + return m_srv; +} + + +RenderBufferRHIImpl::RenderBufferRHIImpl(RenderBufferType in_type, + RenderBufferFlags in_flags, size_t in_sizeInBytes, size_t stride) : + lite_rtti_override(in_type, in_flags, in_sizeInBytes), + m_buffer(in_type == RenderBufferType::vertex ? EBufferUsageFlags::VertexBuffer : EBufferUsageFlags::IndexBuffer, in_sizeInBytes, stride), + m_mappedBuffer(nullptr) +{ + if(in_flags & RenderBufferFlags::mappedOnceAtInitialization) + { + m_mappedBuffer = m_buffer.mapBuffer(in_sizeInBytes); + } +} + +void RenderBufferRHIImpl::Sync(FRHICommandList& commandList) const +{ + m_buffer.Sync(commandList); +} + +FBufferRHIRef RenderBufferRHIImpl::contents()const +{ + return m_buffer.contents(); +} + +void* RenderBufferRHIImpl::onMap() +{ + if(flags() & RenderBufferFlags::mappedOnceAtInitialization) + { + check(m_mappedBuffer); + return m_mappedBuffer; + } + return m_buffer.mapBuffer(sizeInBytes()); +} + +void RenderBufferRHIImpl::onUnmap() +{ + if(flags() & RenderBufferFlags::mappedOnceAtInitialization) + return; + + m_buffer.unmapAndSubmitBuffer(); +} + +class PLSTextureRHIImpl : public Texture +{ +public: + PLSTextureRHIImpl(uint32_t width, uint32_t height, uint32_t mipLevelCount, const TArray& imageDataRGBA, EPixelFormat PixelFormat = PF_B8G8R8A8) : + Texture(width, height) + { + FRHIAsyncCommandList commandList; + auto Desc = FRHITextureCreateDesc::Create2D(TEXT("PLSTextureRHIImpl_"), m_width, m_height, PixelFormat); + Desc.SetNumMips(mipLevelCount); + m_texture = commandList->CreateTexture(Desc); + commandList->UpdateTexture2D(m_texture, 0, + FUpdateTextureRegion2D(0, 0, 0, 0, m_width, m_height), m_width * 4, imageDataRGBA.GetData()); + //commandList->Transition(FRHITransitionInfo(m_texture, ERHIAccess::Unknown, ERHIAccess::SRVGraphics)); + + } + virtual ~PLSTextureRHIImpl()override + { + } + + FTextureRHIRef contents()const + { + return m_texture; + } + +private: + FTextureRHIRef m_texture; +}; + +RenderTargetRHI::RenderTargetRHI(FRHICommandList& RHICmdList, const FTexture2DRHIRef& InTextureTarget) : +RenderTarget(InTextureTarget->GetSizeX(), InTextureTarget->GetSizeY()), m_textureTarget(InTextureTarget) +{ + FRHITextureCreateDesc coverageDesc = FRHITextureCreateDesc::Create2D(TEXT("RiveAtomicCoverage"), width(), height(), PF_R32_UINT); + coverageDesc.SetNumMips(1); + coverageDesc.AddFlags(ETextureCreateFlags::UAV | ETextureCreateFlags::Memoryless); + m_atomicCoverageTexture = RHICmdList.CreateTexture(coverageDesc); + + FRHITextureCreateDesc scratchColorDesc = FRHITextureCreateDesc::Create2D(TEXT("RiveScratchColor"), width(), height(), PF_R8G8B8A8); + scratchColorDesc.SetNumMips(1); + scratchColorDesc.AddFlags(ETextureCreateFlags::UAV); + m_scratchColorTexture = RHICmdList.CreateTexture(scratchColorDesc); + + FRHITextureCreateDesc clipDesc = FRHITextureCreateDesc::Create2D(TEXT("RiveClip"), width(), height(), PF_R32_UINT); + clipDesc.SetNumMips(1); + clipDesc.AddFlags(ETextureCreateFlags::UAV); + m_clipTexture = RHICmdList.CreateTexture(clipDesc); + + RHICmdList.Transition(FRHITransitionInfo(m_coverageUAV, ERHIAccess::Unknown, ERHIAccess::UAVGraphics)); + RHICmdList.Transition(FRHITransitionInfo(m_scratchColorTexture, ERHIAccess::Unknown, ERHIAccess::UAVGraphics)); + RHICmdList.Transition(FRHITransitionInfo(m_clipTexture, ERHIAccess::Unknown, ERHIAccess::UAVGraphics)); + RHICmdList.Transition(FRHITransitionInfo(m_textureTarget, ERHIAccess::Unknown, ERHIAccess::UAVGraphics)); + + m_coverageUAV = RHICmdList.CreateUnorderedAccessView(m_atomicCoverageTexture); + m_clipUAV = RHICmdList.CreateUnorderedAccessView(m_clipTexture); + m_scratchColorUAV = RHICmdList.CreateUnorderedAccessView(m_scratchColorTexture); + m_targetUAV = RHICmdList.CreateUnorderedAccessView(m_textureTarget); +} + +std::unique_ptr RenderContextRHIImpl::MakeContext(FRHICommandListImmediate& CommandListImmediate) +{ + auto plsContextImpl = std::make_unique(CommandListImmediate); + return std::make_unique(std::move(plsContextImpl)); +} + +RenderContextRHIImpl::RenderContextRHIImpl(FRHICommandListImmediate& CommandListImmediate) +{ + m_platformFeatures.supportsFragmentShaderAtomics = true; + m_platformFeatures.supportsClipPlanes = true; + m_platformFeatures.supportsRasterOrdering = false; + m_platformFeatures.invertOffscreenY = true; + + auto ShaderMap = GetGlobalShaderMap(GMaxRHIFeatureLevel); + + VertexDeclarations[static_cast(EVertexDeclarations::Resolve)] = GEmptyVertexDeclaration.VertexDeclarationRHI; + + FVertexDeclarationElementList pathElementList; + pathElementList.Add(FVertexElement(FVertexElement(0, 0, VET_Float4, 0, sizeof(PathData), false))); + pathElementList.Add(FVertexElement(FVertexElement(0, sizeof(float4), VET_Float4, 1, sizeof(PathData), false))); + auto PathVertexDeclaration = PipelineStateCache::GetOrCreateVertexDeclaration(pathElementList); + VertexDeclarations[static_cast(EVertexDeclarations::Paths)] = PathVertexDeclaration; + + FVertexDeclarationElementList trianglesElementList; + trianglesElementList.Add(FVertexElement(0, 0, VET_Float3, 0, sizeof(TriangleVertex), false)); + auto TrianglesVertexDeclaration = PipelineStateCache::GetOrCreateVertexDeclaration(trianglesElementList); + VertexDeclarations[static_cast(EVertexDeclarations::InteriorTriangles)] = TrianglesVertexDeclaration; + + FVertexDeclarationElementList ImageMeshElementList; + ImageMeshElementList.Add(FVertexElement(0, 0, VET_Float2, 0, sizeof(Vec2D), false)); + ImageMeshElementList.Add(FVertexElement(1, 0, VET_Float2, 1, sizeof(Vec2D), false)); + auto ImageMeshVertexDeclaration = PipelineStateCache::GetOrCreateVertexDeclaration(ImageMeshElementList); + VertexDeclarations[static_cast(EVertexDeclarations::ImageMesh)] = ImageMeshVertexDeclaration; + + FVertexDeclarationElementList SpanElementList; + SpanElementList.Add(FVertexElement(0, 0, VET_UInt, 0, sizeof(GradientSpan), true)); + SpanElementList.Add(FVertexElement(0, 4, VET_UInt, 1, sizeof(GradientSpan), true)); + SpanElementList.Add(FVertexElement(0, 8, VET_UInt, 2, sizeof(GradientSpan), true)); + SpanElementList.Add(FVertexElement(0, 12, VET_UInt, 3, sizeof(GradientSpan), true)); + auto SpanVertexDeclaration = PipelineStateCache::GetOrCreateVertexDeclaration(SpanElementList); + VertexDeclarations[static_cast(EVertexDeclarations::Gradient)] = SpanVertexDeclaration; + + FVertexDeclarationElementList TessElementList; + size_t tessOffset = 0; + size_t tessStride = sizeof(TessVertexSpan); + TessElementList.Add(FVertexElement(0, tessOffset, VET_Float4, 0, tessStride, true)); + tessOffset += 4*sizeof(float); + TessElementList.Add(FVertexElement(0, tessOffset, VET_Float4, 1, tessStride, true)); + tessOffset += 4*sizeof(float); + TessElementList.Add(FVertexElement(0, tessOffset, VET_Float4, 2, tessStride, true)); + tessOffset += 4*sizeof(float); + TessElementList.Add(FVertexElement(0, tessOffset, VET_UInt,3, tessStride, true)); + tessOffset += 4; + TessElementList.Add(FVertexElement(0, tessOffset, VET_UInt,4, tessStride, true)); + tessOffset += 4; + TessElementList.Add(FVertexElement(0, tessOffset, VET_UInt,5, tessStride, true)); + tessOffset += 4; + TessElementList.Add(FVertexElement(0, tessOffset, VET_UInt,6, tessStride, true)); + check(tessOffset+4 == sizeof(TessVertexSpan)); + + auto TessVertexDeclaration = PipelineStateCache::GetOrCreateVertexDeclaration(TessElementList); + VertexDeclarations[static_cast(EVertexDeclarations::Tessellation)] = TessVertexDeclaration; + + FVertexDeclarationElementList ImageRectVertexElementList; + ImageRectVertexElementList.Add( + FVertexElement(0, 0, VET_Float4, 0, sizeof(ImageRectVertex), false)); + auto ImageRectDecleration = PipelineStateCache::GetOrCreateVertexDeclaration(ImageRectVertexElementList); + VertexDeclarations[static_cast(EVertexDeclarations::ImageRect)] = ImageRectDecleration; + + GeneratePatchBufferData(*GPatchVertices, *GPatchIndices); + + m_patchVertexBuffer = makeSimpleImmutableBuffer(CommandListImmediate, + TEXT("RivePatchVertexBuffer"), + EBufferUsageFlags::VertexBuffer, GPatchVertices); + m_patchIndexBuffer = makeSimpleImmutableBuffer(CommandListImmediate, + TEXT("RivePatchIndexBuffer"), + EBufferUsageFlags::IndexBuffer, GPatchIndices); + + m_tessSpanIndexBuffer = makeSimpleImmutableBuffer(CommandListImmediate, + TEXT("RiveTessIndexBuffer"), + EBufferUsageFlags::IndexBuffer, + GTessSpanIndices); + + m_imageRectVertexBuffer = makeSimpleImmutableBuffer(CommandListImmediate, + TEXT("ImageRectVertexBuffer"), + EBufferUsageFlags::VertexBuffer, + GImageRectVertices); + + m_imageRectIndexBuffer = makeSimpleImmutableBuffer(CommandListImmediate, + TEXT("ImageRectIndexBuffer"), + EBufferUsageFlags::IndexBuffer, + GImageRectIndices); + + m_mipmapSampler = TStaticSamplerState::GetRHI(); + m_linearSampler = TStaticSamplerState::GetRHI(); +} + +rcp RenderContextRHIImpl::makeRenderTarget(FRHICommandListImmediate& RHICmdList,const FTexture2DRHIRef& InTargetTexture) +{ + return make_rcp(RHICmdList, InTargetTexture); +} + +rcp RenderContextRHIImpl::decodeImageTexture(Span encodedBytes) +{ + + constexpr uint8_t PNG[4] = {0x89, 0x50, 0x4E, 0x47}; + constexpr uint8_t JPEG[3] = {0xFF, 0xD8, 0xFF}; + constexpr uint8_t WEBP[3] = {0x52, 0x49, 0x46}; + + EImageFormat format = EImageFormat::Invalid; + + if(memcmp(PNG, encodedBytes.data(), sizeof(PNG)) == 0) + { + format = EImageFormat::PNG; + } + else if (memcmp(JPEG, encodedBytes.data(), sizeof(JPEG)) == 0) + { + format = EImageFormat::JPEG; + } + else if(memcmp(WEBP, encodedBytes.data(), sizeof(WEBP)) == 0) + { + format = EImageFormat::Invalid; + } + else + { + RIVE_DEBUG_VERBOSE("Invalid Decode Image header"); + return nullptr; + } + + if(format != EImageFormat::Invalid) + { + // Use Unreal for PNG and JPEG + IImageWrapperModule& ImageWrapperModule = FModuleManager::LoadModuleChecked(FName("ImageWrapper")); + TSharedPtr ImageWrapper = ImageWrapperModule.CreateImageWrapper(format); + if(!ImageWrapper.IsValid() || !ImageWrapper->SetCompressed(encodedBytes.data(), encodedBytes.size())) + { + return nullptr; + } + + TArray UncompressedBGRA; + if (!ImageWrapper->GetRaw(ERGBFormat::BGRA, 8, UncompressedBGRA)) + { + return nullptr; + } + + return make_rcp(ImageWrapper->GetWidth(), ImageWrapper->GetHeight(), 1, UncompressedBGRA); + } + else + { + // WEBP Decoding + WebPDecoderConfig config; + if (!WebPInitDecoderConfig(&config)) + { + fprintf(stderr, "DecodeWebP - Library version mismatch!\n"); + return nullptr; + } + config.options.dithering_strength = 50; + config.options.alpha_dithering_strength = 100; + + if (!WebPGetInfo(encodedBytes.data(), encodedBytes.size(), nullptr, nullptr)) + { + fprintf(stderr, "DecodeWebP - Input file doesn't appear to be WebP format.\n"); + } + + WebPData data = {encodedBytes.data(), encodedBytes.size()}; + WebPDemuxer* demuxer = WebPDemux(&data); + if (demuxer == nullptr) + { + RIVE_DEBUG_VERBOSE("DecodeWebP - Could not create demuxer."); + return nullptr; + } + + WebPIterator currentFrame; + if (!WebPDemuxGetFrame(demuxer, 1, ¤tFrame)) + { + RIVE_DEBUG_VERBOSE("DecodeWebP - WebPDemuxGetFrame couldn't get frame."); + WebPDemuxDelete(demuxer); + return nullptr; + } + config.output.colorspace = MODE_RGBA; + + uint32_t width = WebPDemuxGetI(demuxer, WEBP_FF_CANVAS_WIDTH); + uint32_t height = WebPDemuxGetI(demuxer, WEBP_FF_CANVAS_HEIGHT); + + size_t pixelBufferSize = + static_cast(width) * static_cast(height) * static_cast(4); + TArray pixelBuffer; + pixelBuffer.AddUninitialized(pixelBufferSize); + + config.output.u.RGBA.rgba = (uint8_t*)pixelBuffer.GetData(); + config.output.u.RGBA.stride = static_cast(width * 4); + config.output.u.RGBA.size = pixelBufferSize; + config.output.is_external_memory = 1; + + if (WebPDecode(currentFrame.fragment.bytes, currentFrame.fragment.size, &config) != + VP8_STATUS_OK) + { + RIVE_DEBUG_VERBOSE("DecodeWebP - WebPDemuxGetFrame couldn't decode."); + WebPDemuxReleaseIterator(¤tFrame); + WebPDemuxDelete(demuxer); + return nullptr; + } + + WebPDemuxReleaseIterator(¤tFrame); + WebPDemuxDelete(demuxer); + + return make_rcp(width, height, 1, std::move(pixelBuffer), EPixelFormat::PF_R8G8B8A8); + } +} + +void RenderContextRHIImpl::resizeFlushUniformBuffer(size_t sizeInBytes) +{ + m_flushUniformBuffer.reset(); + if(sizeInBytes != 0) + { + m_flushUniformBuffer = std::make_unique>(sizeInBytes); + } +} + +void RenderContextRHIImpl::resizeImageDrawUniformBuffer(size_t sizeInBytes) +{ + m_imageDrawUniformBuffer.reset(); + if(sizeInBytes != 0) + { + m_imageDrawUniformBuffer = std::make_unique>(sizeInBytes); + } +} + +void RenderContextRHIImpl::resizePathBuffer(size_t sizeInBytes, StorageBufferStructure structure) +{ + m_pathBuffer.reset(); + if(sizeInBytes != 0) + { + m_pathBuffer = std::make_unique(EBufferUsageFlags::StructuredBuffer | EBufferUsageFlags::ShaderResource, sizeInBytes, + StorageBufferElementSizeInBytes(structure)); + } +} + +void RenderContextRHIImpl::resizePaintBuffer(size_t sizeInBytes, StorageBufferStructure structure) +{ + m_paintBuffer.reset(); + if(sizeInBytes != 0) + { + m_paintBuffer = std::make_unique(EBufferUsageFlags::StructuredBuffer | EBufferUsageFlags::ShaderResource, sizeInBytes, StorageBufferElementSizeInBytes(structure)); + } +} + +void RenderContextRHIImpl::resizePaintAuxBuffer(size_t sizeInBytes, StorageBufferStructure structure) +{ + m_paintAuxBuffer.reset(); + if(sizeInBytes != 0) + { + m_paintAuxBuffer = std::make_unique(EBufferUsageFlags::StructuredBuffer | EBufferUsageFlags::ShaderResource, sizeInBytes, StorageBufferElementSizeInBytes(structure)); + } +} + +void RenderContextRHIImpl::resizeContourBuffer(size_t sizeInBytes, StorageBufferStructure structure) +{ + m_contourBuffer.reset(); + if(sizeInBytes != 0) + { + m_contourBuffer = std::make_unique(EBufferUsageFlags::StructuredBuffer | EBufferUsageFlags::ShaderResource, sizeInBytes, StorageBufferElementSizeInBytes(structure)); + } +} + +void RenderContextRHIImpl::resizeSimpleColorRampsBuffer(size_t sizeInBytes) +{ + m_simpleColorRampsBuffer.reset(); + if(sizeInBytes != 0) + { + m_simpleColorRampsBuffer = std::make_unique(sizeInBytes); + } +} + +void RenderContextRHIImpl::resizeGradSpanBuffer(size_t sizeInBytes) +{ + m_gradSpanBuffer.reset(); + if(sizeInBytes != 0) + { + m_gradSpanBuffer = std::make_unique(EBufferUsageFlags::VertexBuffer, sizeInBytes, sizeof(GradientSpan)); + } +} + +void RenderContextRHIImpl::resizeTessVertexSpanBuffer(size_t sizeInBytes) +{ + m_tessSpanBuffer.reset(); + if(sizeInBytes != 0) + { + m_tessSpanBuffer = std::make_unique(EBufferUsageFlags::VertexBuffer, sizeInBytes, sizeof(TessVertexSpan)); + } +} + +void RenderContextRHIImpl::resizeTriangleVertexBuffer(size_t sizeInBytes) +{ + m_triangleBuffer.reset(); + if(sizeInBytes != 0) + { + m_triangleBuffer = std::make_unique(EBufferUsageFlags::VertexBuffer, sizeInBytes, sizeof(TriangleVertex)); + } +} + +void* RenderContextRHIImpl::mapFlushUniformBuffer(size_t mapSizeInBytes) +{ + return m_flushUniformBuffer->mapBuffer(mapSizeInBytes); +} + +void* RenderContextRHIImpl::mapImageDrawUniformBuffer(size_t mapSizeInBytes) +{ + return m_imageDrawUniformBuffer->mapBuffer(mapSizeInBytes); +} + +void* RenderContextRHIImpl::mapPathBuffer(size_t mapSizeInBytes) +{ + return m_pathBuffer->mapBuffer(mapSizeInBytes); +} + +void* RenderContextRHIImpl::mapPaintBuffer(size_t mapSizeInBytes) +{ + return m_paintBuffer->mapBuffer(mapSizeInBytes); +} + +void* RenderContextRHIImpl::mapPaintAuxBuffer(size_t mapSizeInBytes) +{ + return m_paintAuxBuffer->mapBuffer(mapSizeInBytes); +} + +void* RenderContextRHIImpl::mapContourBuffer(size_t mapSizeInBytes) +{ + return m_contourBuffer->mapBuffer(mapSizeInBytes); +} + +void* RenderContextRHIImpl::mapSimpleColorRampsBuffer(size_t mapSizeInBytes) +{ + return m_simpleColorRampsBuffer->mapBuffer(mapSizeInBytes); +} + +void* RenderContextRHIImpl::mapGradSpanBuffer(size_t mapSizeInBytes) +{ + return m_gradSpanBuffer->mapBuffer(mapSizeInBytes); +} + +void* RenderContextRHIImpl::mapTessVertexSpanBuffer(size_t mapSizeInBytes) +{ + return m_tessSpanBuffer->mapBuffer(mapSizeInBytes); +} + +void* RenderContextRHIImpl::mapTriangleVertexBuffer(size_t mapSizeInBytes) +{ + return m_triangleBuffer->mapBuffer(mapSizeInBytes); +} + +void RenderContextRHIImpl::unmapFlushUniformBuffer() +{ + m_flushUniformBuffer->unmapAndSubmitBuffer(); +} + +void RenderContextRHIImpl::unmapImageDrawUniformBuffer() +{ + m_imageDrawUniformBuffer->unmapAndSubmitBuffer(); +} + +void RenderContextRHIImpl::unmapPathBuffer() +{ + m_pathBuffer->unmapAndSubmitBuffer(); +} + +void RenderContextRHIImpl::unmapPaintBuffer() +{ + m_paintBuffer->unmapAndSubmitBuffer(); +} + +void RenderContextRHIImpl::unmapPaintAuxBuffer() +{ + m_paintAuxBuffer->unmapAndSubmitBuffer(); +} + +void RenderContextRHIImpl::unmapContourBuffer() +{ + m_contourBuffer->unmapAndSubmitBuffer(); +} + +void RenderContextRHIImpl::unmapSimpleColorRampsBuffer() +{ + m_simpleColorRampsBuffer->unmapAndSubmitBuffer(); +} + +void RenderContextRHIImpl::unmapGradSpanBuffer() +{ + m_gradSpanBuffer->unmapAndSubmitBuffer(); +} + +void RenderContextRHIImpl::unmapTessVertexSpanBuffer() +{ + m_tessSpanBuffer->unmapAndSubmitBuffer(); +} + +void RenderContextRHIImpl::unmapTriangleVertexBuffer() +{ + m_triangleBuffer->unmapAndSubmitBuffer(); +} + +rcp RenderContextRHIImpl::makeRenderBuffer(RenderBufferType type, + RenderBufferFlags flags, + size_t sizeInBytes) +{ + if(sizeInBytes == 0) + return nullptr; + + return make_rcp(type, flags, sizeInBytes, type == RenderBufferType::index ? sizeof(uint16_t) : 0); +} + +void RenderContextRHIImpl::resizeGradientTexture(uint32_t width, uint32_t height) +{ + check(IsInRenderingThread()); + if(width == 0 && height == 0) + { + m_gradiantTexture = nullptr; + return; + } + + width = std::max(width, 1u); + height = std::max(height, 1u); + + auto& commandList = GRHICommandList.GetImmediateCommandList(); + FRHITextureCreateDesc Desc = FRHITextureCreateDesc::Create2D(TEXT("riveGradientTexture"), + {static_cast(width), static_cast(height)}, PF_R8G8B8A8); + Desc.AddFlags(ETextureCreateFlags::RenderTargetable | ETextureCreateFlags::ShaderResource); + Desc.SetClearValue(FClearValueBinding(FLinearColor::Red)); + Desc.DetermineInititialState(); + m_gradiantTexture = commandList.CreateTexture(Desc); + + commandList.Transition(FRHITransitionInfo(m_gradiantTexture, ERHIAccess::Unknown, ERHIAccess::SRVGraphics)); + +} + +void RenderContextRHIImpl::resizeTessellationTexture(uint32_t width, uint32_t height) +{ + check(IsInRenderingThread()); + if(width == 0 && height == 0) + { + m_tesselationTexture = nullptr; + return; + } + + width = std::max(width, 1u); + height = std::max(height, 1u); + + auto& commandList = GRHICommandList.GetImmediateCommandList(); + FRHITextureCreateDesc Desc = FRHITextureCreateDesc::Create2D(TEXT("riveTessTexture"), + {static_cast(width), static_cast(height)}, PF_R32G32B32A32_UINT); + Desc.AddFlags(ETextureCreateFlags::RenderTargetable | ETextureCreateFlags::ShaderResource ); + Desc.DetermineInititialState(); + m_tesselationTexture = commandList.CreateTexture(Desc); + + commandList.Transition(FRHITransitionInfo(m_tesselationTexture, ERHIAccess::Unknown, ERHIAccess::SRVGraphics)); + + + FRHITextureSRVCreateInfo Info(0, 1, 0, 1, EPixelFormat::PF_R32G32B32A32_UINT); + m_tessSRV = commandList.CreateShaderResourceView(m_tesselationTexture, Info); +} + + +void RenderContextRHIImpl::flush(const FlushDescriptor& desc) +{ + check(IsInRenderingThread()); + + auto renderTarget = static_cast(desc.renderTarget); + FTextureRHIRef DestTexture = renderTarget->texture(); + + FRHICommandList& CommandList = GRHICommandList.GetImmediateCommandList(); + auto ShaderMap = GetGlobalShaderMap(GMaxRHIFeatureLevel); + + SYNC_BUFFER_WITH_OFFSET(m_flushUniformBuffer, CommandList, desc.flushUniformDataOffsetInBytes); + if( desc.pathCount > 0) + { + check(m_pathBuffer); + check(m_paintBuffer); + check(m_paintAuxBuffer); + + m_pathBuffer->Sync(CommandList, desc.firstPath, desc.pathCount); + m_paintBuffer->Sync(CommandList, desc.firstPaint, desc.pathCount); + m_paintAuxBuffer->Sync(CommandList, desc.firstPaintAux, desc.pathCount); + } + + if(desc.contourCount > 0) + { + check(m_contourBuffer); + m_contourBuffer->Sync(CommandList, desc.firstContour, desc.contourCount); + } + + SYNC_BUFFER(m_gradSpanBuffer, CommandList); + SYNC_BUFFER(m_tessSpanBuffer, CommandList); + SYNC_BUFFER(m_triangleBuffer, CommandList); + + FGraphicsPipelineStateInitializer GraphicsPSOInit; + GraphicsPSOInit.BlendState = TStaticBlendState<>::GetRHI(); + GraphicsPSOInit.RasterizerState = TStaticRasterizerState::GetRHI(); + GraphicsPSOInit.DepthStencilState = TStaticDepthStencilState::GetRHI(); + FRHIBatchedShaderParameters& BatchedShaderParameters = CommandList.GetScratchShaderParameters(); + + CommandList.ClearUAVUint(renderTarget->coverageUAV(), FUintVector4(desc.coverageClearValue,desc.coverageClearValue,desc.coverageClearValue,desc.coverageClearValue )); + if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING) + { + CommandList.ClearUAVUint(renderTarget->clipUAV(), FUintVector4(0)); + } + + if (desc.complexGradSpanCount > 0) + { + check(m_gradiantTexture); + CommandList.Transition(FRHITransitionInfo(m_gradiantTexture, ERHIAccess::SRVGraphics, ERHIAccess::RTV)); + GraphicsPSOInit.PrimitiveType = PT_TriangleStrip; + + FRHIRenderPassInfo Info(m_gradiantTexture, ERenderTargetActions::Clear_Store); + CommandList.BeginRenderPass(Info, TEXT("Rive_Render_Gradient")); + CommandList.SetViewport(0, desc.complexGradRowsTop, 0, + kGradTextureWidth, desc.complexGradRowsTop + desc.complexGradRowsHeight, 1.0); + CommandList.ApplyCachedRenderTargets(GraphicsPSOInit); + + TShaderMapRef VertexShader(ShaderMap); + TShaderMapRef PixelShader(ShaderMap); + + BindShaders(CommandList, GraphicsPSOInit, VertexShader, + PixelShader, VertexDeclarations[static_cast(EVertexDeclarations::Gradient)]); + + FRiveGradientVertexShader::FParameters VertexParameters; + FRiveGradientPixelShader::FParameters PixelParameters; + + VertexParameters.FlushUniforms = m_flushUniformBuffer->contents(); + PixelParameters.FlushUniforms = m_flushUniformBuffer->contents(); + + SetParameters(CommandList, BatchedShaderParameters, VertexShader,VertexParameters); + SetParameters(CommandList, BatchedShaderParameters, PixelShader,PixelParameters); + + CommandList.SetStreamSource(0, m_gradSpanBuffer->contents(), desc.firstComplexGradSpan * sizeof(GradientSpan)); + + CommandList.DrawPrimitive(0, 2, desc.complexGradSpanCount); + + CommandList.EndRenderPass(); + CommandList.Transition(FRHITransitionInfo(m_gradiantTexture, ERHIAccess::RTV, ERHIAccess::SRVGraphics)); + } + + if (desc.simpleGradTexelsHeight > 0) + { + assert(desc.simpleGradTexelsHeight * desc.simpleGradTexelsWidth * 4 <= + simpleColorRampsBufferRing()->capacityInBytes()); + + CommandList.Transition(FRHITransitionInfo(m_gradiantTexture, ERHIAccess::SRVGraphics, ERHIAccess::CopyDest)); + CommandList.UpdateTexture2D(m_gradiantTexture, 0, + {0, 0, 0, 0, desc.simpleGradTexelsWidth, desc.simpleGradTexelsHeight}, + kGradTextureWidth * 4, m_simpleColorRampsBuffer->contents() + desc.simpleGradDataOffsetInBytes); + CommandList.Transition(FRHITransitionInfo(m_gradiantTexture, ERHIAccess::CopyDest, ERHIAccess::SRVGraphics)); + } + + if (desc.tessVertexSpanCount > 0) + { + check(m_tesselationTexture) + CommandList.Transition(FRHITransitionInfo(m_tesselationTexture, ERHIAccess::SRVGraphics, ERHIAccess::RTV)); + FRHIRenderPassInfo Info(m_tesselationTexture, ERenderTargetActions::DontLoad_Store); + CommandList.BeginRenderPass(Info, TEXT("RiveTessUpdate")); + CommandList.ApplyCachedRenderTargets(GraphicsPSOInit); + + GraphicsPSOInit.RasterizerState = TStaticRasterizerState::GetRHI(); + GraphicsPSOInit.PrimitiveType = PT_TriangleList; + + TShaderMapRef VertexShader(ShaderMap); + TShaderMapRef PixelShader(ShaderMap); + + BindShaders(CommandList, GraphicsPSOInit, VertexShader, + PixelShader, VertexDeclarations[static_cast(EVertexDeclarations::Tessellation)]); + + CommandList.SetStreamSource(0, m_tessSpanBuffer->contents(), desc.firstTessVertexSpan * sizeof(TessVertexSpan)); + + FRiveTessPixelShader::FParameters PixelParameters; + FRiveTessVertexShader::FParameters VertexParameters; + + PixelParameters.FlushUniforms = m_flushUniformBuffer->contents(); + VertexParameters.FlushUniforms = m_flushUniformBuffer->contents(); + VertexParameters.GLSL_pathBuffer_raw = m_pathBuffer->srv(); + VertexParameters.GLSL_contourBuffer_raw = m_contourBuffer->srv(); + + SetParameters(CommandList, BatchedShaderParameters, VertexShader,VertexParameters); + SetParameters(CommandList, BatchedShaderParameters, PixelShader,PixelParameters); + + CommandList.SetViewport(0, 0, 0, + static_cast(kTessTextureWidth), static_cast(desc.tessDataHeight), 1); + + const size_t numTessVerts = (m_tessSpanBuffer->capacityInBytes() / sizeof(TessVertexSpan)) - desc.firstTessVertexSpan; + CommandList.DrawIndexedPrimitive(m_tessSpanIndexBuffer, 0, desc.firstTessVertexSpan, + numTessVerts, 0, std::size(kTessSpanIndices)/3, + desc.tessVertexSpanCount); + CommandList.EndRenderPass(); + CommandList.Transition(FRHITransitionInfo(m_tesselationTexture, ERHIAccess::RTV, ERHIAccess::SRVGraphics)); + } + + ERenderTargetActions loadAction = ERenderTargetActions::Load_Store; + switch (desc.colorLoadAction) + { + case LoadAction::clear: + { + float clearColor4f[4]; + UnpackColorToRGBA32F(desc.clearColor, clearColor4f); + CommandList.ClearUAVFloat(renderTarget->targetUAV(), + FVector4f(clearColor4f[0], clearColor4f[1], clearColor4f[2], clearColor4f[3])); + } + loadAction = ERenderTargetActions::Load_Store; + break; + case LoadAction::preserveRenderTarget: + loadAction = ERenderTargetActions::Load_Store; + break; + case LoadAction::dontCare: + loadAction = ERenderTargetActions::DontLoad_Store; + break; + } + + FRHIRenderPassInfo Info; + if(!(desc.combinedShaderFeatures & ShaderFeatures::ENABLE_ADVANCED_BLEND)) + { + Info.ColorRenderTargets[0].RenderTarget = DestTexture; + Info.ColorRenderTargets[0].Action = loadAction; + CommandList.Transition(FRHITransitionInfo(DestTexture, ERHIAccess::UAVGraphics, ERHIAccess::RTV)); + } + else + { + Info.ResolveRect = FResolveRect(0, 0, renderTarget->width(), renderTarget->height()); + } + + CommandList.BeginRenderPass(Info, TEXT("Rive_Render_Flush")); + CommandList.SetViewport(0, 0, 0, renderTarget->width(), renderTarget->height(), 1.0); + + // FIXED_FUNCTION_BLEND + if(!(desc.combinedShaderFeatures & ShaderFeatures::ENABLE_ADVANCED_BLEND)) + GraphicsPSOInit.BlendState = TStaticBlendState::GetRHI(); + // otherwise no blend + else + GraphicsPSOInit.BlendState = TStaticBlendState::CreateRHI(); + + GraphicsPSOInit.RasterizerState = GetStaticRasterizerState(FM_Solid, CM_CCW); + CommandList.ApplyCachedRenderTargets(GraphicsPSOInit); + + for (const DrawBatch& batch : *desc.drawList) + { + if (batch.elementCount == 0) + { + continue; + } + + AtomicPixelPermutationDomain PixelPermutationDomain; + AtomicVertexPermutationDomain VertexPermutationDomain; + + GetPermutationForFeatures(desc.combinedShaderFeatures, PixelPermutationDomain, VertexPermutationDomain); + + CommandList.Transition(FRHITransitionInfo(renderTarget->coverageUAV(), ERHIAccess::UAVGraphics, ERHIAccess::UAVGraphics)); + if(desc.combinedShaderFeatures & ShaderFeatures::ENABLE_CLIPPING) + CommandList.Transition(FRHITransitionInfo(renderTarget->clipUAV(), ERHIAccess::UAVGraphics, ERHIAccess::UAVGraphics)); + if(desc.combinedShaderFeatures & ShaderFeatures::ENABLE_ADVANCED_BLEND) + CommandList.Transition(FRHITransitionInfo(renderTarget->targetUAV(), ERHIAccess::UAVGraphics, ERHIAccess::UAVGraphics)); + + switch (batch.drawType) + { + case DrawType::midpointFanPatches: + case DrawType::outerCurvePatches: + { + GraphicsPSOInit.RasterizerState = GetStaticRasterizerState(FM_Solid, CM_CCW); + GraphicsPSOInit.PrimitiveType = EPrimitiveType::PT_TriangleList; + + TShaderMapRef VertexShader(ShaderMap, VertexPermutationDomain); + TShaderMapRef PixelShader(ShaderMap, PixelPermutationDomain); + + BindShaders(CommandList, GraphicsPSOInit, VertexShader, + PixelShader, VertexDeclarations[static_cast(EVertexDeclarations::Paths)]); + + FRivePathPixelShader::FParameters PixelParameters; + FRivePathVertexShader::FParameters VertexParameters; + + PixelParameters.FlushUniforms = m_flushUniformBuffer->contents(); + VertexParameters.FlushUniforms = m_flushUniformBuffer->contents(); + + PixelParameters.gradSampler = m_linearSampler; + PixelParameters.GLSL_gradTexture_raw = m_gradiantTexture; + PixelParameters.GLSL_paintAuxBuffer_raw = m_paintAuxBuffer->srv(); + PixelParameters.GLSL_paintBuffer_raw = m_paintBuffer->srv(); + PixelParameters.coverageCountBuffer = renderTarget->coverageUAV(); + PixelParameters.clipBuffer = renderTarget->clipUAV(); + PixelParameters.colorBuffer = renderTarget->targetUAV(); + VertexParameters.GLSL_tessVertexTexture_raw= m_tessSRV; + VertexParameters.GLSL_pathBuffer_raw= m_pathBuffer->srv(); + VertexParameters.GLSL_contourBuffer_raw= m_contourBuffer->srv(); + VertexParameters.baseInstance = batch.baseElement; + + SetParameters(CommandList, BatchedShaderParameters, VertexShader,VertexParameters); + SetParameters(CommandList, BatchedShaderParameters, PixelShader,PixelParameters); + + CommandList.SetStreamSource(0, m_patchVertexBuffer, 0); + CommandList.DrawIndexedPrimitive(m_patchIndexBuffer, 0, + 0, kPatchVertexBufferCount, + PatchBaseIndex(batch.drawType), + PatchIndexCount(batch.drawType) / 3, + batch.elementCount); + } + break; + case DrawType::interiorTriangulation: + { + GraphicsPSOInit.RasterizerState = GetStaticRasterizerState(FM_Solid, CM_CCW); + GraphicsPSOInit.PrimitiveType = EPrimitiveType::PT_TriangleList; + + TShaderMapRef VertexShader(ShaderMap, VertexPermutationDomain); + TShaderMapRef PixelShader(ShaderMap, PixelPermutationDomain); + + BindShaders(CommandList, GraphicsPSOInit, VertexShader, + PixelShader, VertexDeclarations[static_cast(EVertexDeclarations::InteriorTriangles)]); + + FRiveInteriorTrianglesVertexShader::FParameters VertexParameters; + FRiveInteriorTrianglesPixelShader::FParameters PixelParameters; + + PixelParameters.FlushUniforms = m_flushUniformBuffer->contents(); + VertexParameters.FlushUniforms = m_flushUniformBuffer->contents(); + + PixelParameters.gradSampler = m_linearSampler; + PixelParameters.GLSL_gradTexture_raw = m_gradiantTexture; + PixelParameters.GLSL_paintAuxBuffer_raw = m_paintAuxBuffer->srv(); + PixelParameters.GLSL_paintBuffer_raw = m_paintBuffer->srv(); + PixelParameters.coverageCountBuffer = renderTarget->coverageUAV(); + PixelParameters.clipBuffer = renderTarget->clipUAV(); + PixelParameters.colorBuffer = renderTarget->targetUAV(); + VertexParameters.GLSL_pathBuffer_raw= m_pathBuffer->srv(); + + SetParameters(CommandList, BatchedShaderParameters, VertexShader,VertexParameters); + SetParameters(CommandList, BatchedShaderParameters, PixelShader,PixelParameters); + + CommandList.SetStreamSource(0, m_triangleBuffer->contents(), 0); + CommandList.DrawPrimitive(batch.baseElement, + batch.elementCount / 3, 1); + } + break; + case DrawType::imageRect: + SYNC_BUFFER_WITH_OFFSET(m_imageDrawUniformBuffer, CommandList, batch.imageDrawDataOffset); + { + GraphicsPSOInit.RasterizerState = TStaticRasterizerState::GetRHI();; + GraphicsPSOInit.PrimitiveType = EPrimitiveType::PT_TriangleList; + + TShaderMapRef VertexShader(ShaderMap, VertexPermutationDomain); + TShaderMapRef PixelShader(ShaderMap, PixelPermutationDomain); + + BindShaders(CommandList, GraphicsPSOInit, VertexShader, + PixelShader, VertexDeclarations[static_cast(EVertexDeclarations::ImageRect)]); + + auto imageTexture = static_cast(batch.imageTexture); + + FRiveImageRectVertexShader::FParameters VertexParameters; + FRiveImageRectPixelShader::FParameters PixelParameters; + + VertexParameters.FlushUniforms = m_flushUniformBuffer->contents(); + VertexParameters.ImageDrawUniforms = m_imageDrawUniformBuffer->contents(); + + PixelParameters.FlushUniforms = m_flushUniformBuffer->contents(); + PixelParameters.ImageDrawUniforms = m_imageDrawUniformBuffer->contents(); + + PixelParameters.GLSL_gradTexture_raw = m_gradiantTexture; + PixelParameters.GLSL_imageTexture_raw = imageTexture->contents(); + PixelParameters.gradSampler = m_linearSampler; + PixelParameters.imageSampler = m_mipmapSampler; + PixelParameters.GLSL_paintAuxBuffer_raw = m_paintAuxBuffer->srv(); + PixelParameters.GLSL_paintBuffer_raw = m_paintBuffer->srv(); + PixelParameters.coverageCountBuffer = renderTarget->coverageUAV(); + PixelParameters.clipBuffer = renderTarget->clipUAV(); + PixelParameters.colorBuffer = renderTarget->targetUAV(); + + SetParameters(CommandList, BatchedShaderParameters, VertexShader,VertexParameters); + SetParameters(CommandList, BatchedShaderParameters, PixelShader,PixelParameters); + + CommandList.SetStreamSource(0, m_imageRectVertexBuffer, 0); + CommandList.DrawIndexedPrimitive(m_imageRectIndexBuffer, 0, 0, std::size(kImageRectVertices), 0, std::size(kImageRectIndices) / 3, 1); + } + break; + case DrawType::imageMesh: + { + SYNC_BUFFER_WITH_OFFSET(m_imageDrawUniformBuffer, CommandList, batch.imageDrawDataOffset); + GraphicsPSOInit.RasterizerState = TStaticRasterizerState::GetRHI(); + GraphicsPSOInit.PrimitiveType = PT_TriangleList; + + LITE_RTTI_CAST_OR_RETURN(IndexBuffer,const RenderBufferRHIImpl*, batch.indexBuffer); + LITE_RTTI_CAST_OR_RETURN(VertexBuffer,const RenderBufferRHIImpl*, batch.vertexBuffer); + LITE_RTTI_CAST_OR_RETURN(UVBuffer,const RenderBufferRHIImpl*, batch.uvBuffer); + + auto imageTexture = static_cast(batch.imageTexture); + + SYNC_BUFFER(IndexBuffer, CommandList) + SYNC_BUFFER(VertexBuffer, CommandList) + SYNC_BUFFER(UVBuffer, CommandList) + + TShaderMapRef VertexShader(ShaderMap, VertexPermutationDomain); + TShaderMapRef PixelShader(ShaderMap, PixelPermutationDomain); + + BindShaders(CommandList, GraphicsPSOInit, VertexShader, + PixelShader, VertexDeclarations[static_cast(EVertexDeclarations::ImageMesh)]); + + CommandList.SetStreamSource(0, VertexBuffer->contents(), 0); + CommandList.SetStreamSource(1, UVBuffer->contents(), 0); + + FRiveImageMeshVertexShader::FParameters VertexParameters; + FRiveImageMeshPixelShader::FParameters PixelParameters; + + VertexParameters.FlushUniforms = m_flushUniformBuffer->contents(); + VertexParameters.ImageDrawUniforms = m_imageDrawUniformBuffer->contents(); + + PixelParameters.FlushUniforms = m_flushUniformBuffer->contents(); + PixelParameters.ImageDrawUniforms = m_imageDrawUniformBuffer->contents(); + + PixelParameters.GLSL_gradTexture_raw = m_gradiantTexture; + PixelParameters.GLSL_imageTexture_raw = imageTexture->contents(); + PixelParameters.gradSampler = m_linearSampler; + PixelParameters.imageSampler = m_mipmapSampler; + PixelParameters.GLSL_paintAuxBuffer_raw = m_paintAuxBuffer->srv(); + PixelParameters.GLSL_paintBuffer_raw = m_paintBuffer->srv(); + PixelParameters.coverageCountBuffer = renderTarget->coverageUAV(); + PixelParameters.clipBuffer = renderTarget->clipUAV(); + PixelParameters.colorBuffer = renderTarget->targetUAV(); + + SetParameters(CommandList, BatchedShaderParameters, VertexShader,VertexParameters); + SetParameters(CommandList, BatchedShaderParameters, PixelShader,PixelParameters); + + CommandList.DrawIndexedPrimitive(IndexBuffer->contents(), 0, 0, + VertexBuffer->sizeInBytes() / sizeof(Vec2D), 0, batch.elementCount/3, + 1); + } + break; + case DrawType::gpuAtomicResolve: + { + GraphicsPSOInit.RasterizerState = TStaticRasterizerState::GetRHI(); + GraphicsPSOInit.PrimitiveType = PT_TriangleStrip; + + TShaderMapRef VertexShader(ShaderMap, VertexPermutationDomain); + TShaderMapRef PixelShader(ShaderMap, PixelPermutationDomain); + + BindShaders(CommandList, GraphicsPSOInit, VertexShader, + PixelShader, VertexDeclarations[static_cast(EVertexDeclarations::Resolve)]); + + FRiveAtomiResolveVertexShader::FParameters VertexParameters; + FRiveAtomiResolvePixelShader::FParameters PixelParameters; + + PixelParameters.GLSL_gradTexture_raw = m_gradiantTexture; + PixelParameters.gradSampler = m_linearSampler; + PixelParameters.GLSL_paintAuxBuffer_raw = m_paintAuxBuffer->srv(); + PixelParameters.GLSL_paintBuffer_raw = m_paintBuffer->srv(); + PixelParameters.coverageCountBuffer = renderTarget->coverageUAV(); + PixelParameters.clipBuffer = renderTarget->clipUAV(); + PixelParameters.colorBuffer = renderTarget->targetUAV(); + + VertexParameters.FlushUniforms = m_flushUniformBuffer->contents(); + + SetParameters(CommandList, BatchedShaderParameters, VertexShader,VertexParameters); + SetParameters(CommandList, BatchedShaderParameters, PixelShader,PixelParameters); + + CommandList.DrawPrimitive(0, 2, 1); + } + break; + case DrawType::gpuAtomicInitialize: + case DrawType::stencilClipReset: + RIVE_UNREACHABLE(); + } + } + + CommandList.EndRenderPass(); + if(desc.combinedShaderFeatures & ShaderFeatures::ENABLE_ADVANCED_BLEND) + { + CommandList.Transition(FRHITransitionInfo(DestTexture, ERHIAccess::UAVGraphics, ERHIAccess::UAVGraphics)); + } + else + { + // needed for fixed function blend mode + CommandList.Transition(FRHITransitionInfo(DestTexture, ERHIAccess::RTV, ERHIAccess::UAVGraphics)); + } +} diff --git a/Source/RiveRenderer/Private/Platform/pls_render_context_rhi_impl.hpp b/Source/RiveRenderer/Private/Platform/pls_render_context_rhi_impl.hpp new file mode 100644 index 00000000..a4f819cc --- /dev/null +++ b/Source/RiveRenderer/Private/Platform/pls_render_context_rhi_impl.hpp @@ -0,0 +1,239 @@ +#pragma once +#include + +#include "Shaders/ShaderPipelineManager.h" + +THIRD_PARTY_INCLUDES_START +#undef PI +#include "rive/renderer/rive_renderer.hpp" +#include "rive/renderer/rive_render_image.hpp" +#include "rive/renderer/buffer_ring.hpp" +THIRD_PARTY_INCLUDES_END + +class RIVERENDERER_API RenderTargetRHI : public rive::gpu::RenderTarget +{ +public: + RenderTargetRHI(FRHICommandList& RHICmdList, const FTexture2DRHIRef& InTextureTarget); + virtual ~RenderTargetRHI() override {} + + FTexture2DRHIRef texture()const + {return m_textureTarget;} + + FUnorderedAccessViewRHIRef targetUAV()const + {return m_targetUAV;} + + FUnorderedAccessViewRHIRef coverageUAV()const + {return m_coverageUAV;} + + FUnorderedAccessViewRHIRef clipUAV()const + {return m_clipUAV;} + + FUnorderedAccessViewRHIRef scratchColorUAV()const + {return m_scratchColorUAV;} + +private: + FTexture2DRHIRef m_scratchColorTexture; + FTexture2DRHIRef m_textureTarget; + FTexture2DRHIRef m_atomicCoverageTexture; + FTexture2DRHIRef m_clipTexture; + + FUnorderedAccessViewRHIRef m_coverageUAV; + FUnorderedAccessViewRHIRef m_clipUAV; + FUnorderedAccessViewRHIRef m_scratchColorUAV; + FUnorderedAccessViewRHIRef m_targetUAV; +}; + +class StructuredBufferRingRHIImpl; + +class BufferRingRHIImpl final : public rive::gpu::BufferRing +{ +public: + BufferRingRHIImpl(EBufferUsageFlags flags, size_t in_sizeInBytes, size_t stride); + void Sync(FRHICommandList& commandList) const; + FBufferRHIRef contents()const; + +protected: + virtual void* onMapBuffer(int bufferIdx, size_t mapSizeInBytes) override; + virtual void onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) override; + +private: + FBufferRHIRef m_buffer; + EBufferUsageFlags m_flags; +}; + +template +class UniformBufferRHIImpl : public rive::gpu::BufferRing +{ +public: + UniformBufferRHIImpl(size_t in_sizeInBytes): BufferRing(in_sizeInBytes) + { + //m_buffer = TUniformBufferRef::CreateEmptyUniformBufferImmediate( UniformBuffer_MultiFrame); + } + + void Sync(FRHICommandList& commandList, int offset) + { + UniformBufferType* Buffer = reinterpret_cast(shadowBuffer() + offset); + m_buffer = TUniformBufferRef::CreateUniformBufferImmediate( *Buffer,UniformBuffer_SingleFrame); + } + + TUniformBufferRef contents() const + { + return m_buffer; + } + +protected: + virtual void* onMapBuffer(int bufferIdx, size_t mapSizeInBytes) override + { + return shadowBuffer(); + } + + virtual void onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) override + { + } + +private: + TUniformBufferRef m_buffer; +}; + +class RenderBufferRHIImpl final: public rive::lite_rtti_override +{ +public: + RenderBufferRHIImpl(rive::RenderBufferType in_type, + rive::RenderBufferFlags in_flags, size_t in_sizeInBytes, size_t stride); + void Sync(FRHICommandList& commandList) const; + FBufferRHIRef contents()const; + +protected: + virtual void* onMap()override; + virtual void onUnmap()override; + + +private: + BufferRingRHIImpl m_buffer; + void* m_mappedBuffer; +}; + +class StructuredBufferRingRHIImpl final : public rive::gpu::BufferRing +{ +public: + StructuredBufferRingRHIImpl(EBufferUsageFlags flags, size_t in_sizeInBytes, size_t elementSize); + template + void Sync(FRHICommandList& commandList, size_t elementOffset, size_t elementCount) + { + auto data = commandList.LockBuffer(m_buffer, 0, elementCount * sizeof(HighLevelStruct), RLM_WriteOnly_NoOverwrite); + memcpy(data, shadowBuffer() + (elementOffset * sizeof(HighLevelStruct)), elementCount * sizeof(HighLevelStruct)); + commandList.UnlockBuffer(m_buffer); + } + FBufferRHIRef contents()const; + FShaderResourceViewRHIRef srv() const; + +protected: + virtual void* onMapBuffer(int bufferIdx, size_t mapSizeInBytes) override; + virtual void onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) override; + +private: + FBufferRHIRef m_buffer; + EBufferUsageFlags m_flags; + FShaderResourceViewRHIRef m_srv; + size_t m_elementSize; + size_t m_lastMapSizeInBytes; +}; + +enum class EVertexDeclarations : int32 +{ + Tessellation, + Gradient, + Paths, + InteriorTriangles, + ImageRect, + ImageMesh, + Resolve, + NumVertexDeclarations +}; + +class RIVERENDERER_API RenderContextRHIImpl : public rive::gpu::RenderContextImpl +{ +public: + static std::unique_ptr MakeContext(FRHICommandListImmediate& CommandListImmediate); + + RenderContextRHIImpl(FRHICommandListImmediate& CommandListImmediate); + + rive::rcp makeRenderTarget(FRHICommandListImmediate& RHICmdList, const FTexture2DRHIRef& InTargetTexture); + + virtual double secondsNow() const override + { + auto elapsed = std::chrono::steady_clock::now() - m_localEpoch; + return std::chrono::duration(elapsed).count(); + } + + virtual rive::rcp decodeImageTexture(rive::Span encodedBytes) override; + + virtual void resizeFlushUniformBuffer(size_t sizeInBytes) override; + virtual void resizeImageDrawUniformBuffer(size_t sizeInBytes) override; + virtual void resizePathBuffer(size_t sizeInBytes, rive::gpu::StorageBufferStructure) override; + virtual void resizePaintBuffer(size_t sizeInBytes, rive::gpu::StorageBufferStructure) override; + virtual void resizePaintAuxBuffer(size_t sizeInBytes, rive::gpu::StorageBufferStructure) override; + virtual void resizeContourBuffer(size_t sizeInBytes, rive::gpu::StorageBufferStructure) override; + virtual void resizeSimpleColorRampsBuffer(size_t sizeInBytes) override; + virtual void resizeGradSpanBuffer(size_t sizeInBytes) override; + virtual void resizeTessVertexSpanBuffer(size_t sizeInBytes) override; + virtual void resizeTriangleVertexBuffer(size_t sizeInBytes) override; + + virtual void* mapFlushUniformBuffer(size_t mapSizeInBytes) override; + virtual void* mapImageDrawUniformBuffer(size_t mapSizeInBytes) override; + virtual void* mapPathBuffer(size_t mapSizeInBytes) override; + virtual void* mapPaintBuffer(size_t mapSizeInBytes) override; + virtual void* mapPaintAuxBuffer(size_t mapSizeInBytes) override; + virtual void* mapContourBuffer(size_t mapSizeInBytes) override; + virtual void* mapSimpleColorRampsBuffer(size_t mapSizeInBytes) override; + virtual void* mapGradSpanBuffer(size_t mapSizeInBytes) override; + virtual void* mapTessVertexSpanBuffer(size_t mapSizeInBytes) override; + virtual void* mapTriangleVertexBuffer(size_t mapSizeInBytes) override; + + virtual void unmapFlushUniformBuffer() override; + virtual void unmapImageDrawUniformBuffer() override; + virtual void unmapPathBuffer() override; + virtual void unmapPaintBuffer() override; + virtual void unmapPaintAuxBuffer() override; + virtual void unmapContourBuffer() override; + virtual void unmapSimpleColorRampsBuffer() override; + virtual void unmapGradSpanBuffer() override; + virtual void unmapTessVertexSpanBuffer() override; + virtual void unmapTriangleVertexBuffer() override; + + virtual rive::rcp makeRenderBuffer(rive::RenderBufferType, rive::RenderBufferFlags, size_t) override; + virtual void resizeGradientTexture(uint32_t width, uint32_t height) override; + virtual void resizeTessellationTexture(uint32_t width, uint32_t height) override; + + virtual void flush(const rive::gpu::FlushDescriptor&)override; + +private: + FTextureRHIRef m_gradiantTexture; + FTextureRHIRef m_tesselationTexture; + + FBufferRHIRef m_patchVertexBuffer; + FBufferRHIRef m_patchIndexBuffer; + FBufferRHIRef m_imageRectVertexBuffer; + FBufferRHIRef m_imageRectIndexBuffer; + FBufferRHIRef m_tessSpanIndexBuffer; + + FShaderResourceViewRHIRef m_tessSRV; + + FSamplerStateRHIRef m_linearSampler; + FSamplerStateRHIRef m_mipmapSampler; + + FRHIVertexDeclaration* VertexDeclarations[static_cast(EVertexDeclarations::NumVertexDeclarations)]; + + std::unique_ptr> m_flushUniformBuffer; + std::unique_ptr> m_imageDrawUniformBuffer; + std::unique_ptr m_pathBuffer; + std::unique_ptr m_paintBuffer; + std::unique_ptr m_paintAuxBuffer; + std::unique_ptr m_contourBuffer; + std::unique_ptr m_simpleColorRampsBuffer; + std::unique_ptr m_gradSpanBuffer; + std::unique_ptr m_tessSpanBuffer; + std::unique_ptr m_triangleBuffer; + + std::chrono::steady_clock::time_point m_localEpoch = std::chrono::steady_clock::now(); +}; diff --git a/Source/RiveRenderer/Private/RiveRenderTarget.cpp b/Source/RiveRenderer/Private/RiveRenderTarget.cpp index 127a704b..2f88dd3b 100644 --- a/Source/RiveRenderer/Private/RiveRenderTarget.cpp +++ b/Source/RiveRenderer/Private/RiveRenderTarget.cpp @@ -11,6 +11,8 @@ THIRD_PARTY_INCLUDES_START #include "rive/artboard.hpp" #include "rive/renderer/rive_renderer.hpp" +#include "rive/renderer/render_target.hpp" + THIRD_PARTY_INCLUDES_END #if PLATFORM_APPLE @@ -152,10 +154,29 @@ FMatrix FRiveRenderTarget::GetTransformMatrix() const return CurrentMatrix; } +void FRiveRenderTarget::RegisterRenderCommand(RiveRenderFunction RenderFunction) +{ + FScopeLock Lock(&RiveRenderer->GetThreadDataCS()); + ENQUEUE_RENDER_COMMAND(FRiveRenderTarget_CustomRenderCommand)( + [this, RenderFunction = std::move(RenderFunction)](FRHICommandListImmediate& RHICmdList) + { + auto renderer = BeginFrame(); + if(!renderer) + { + return; + } + + rive::gpu::RenderContext* factory = RiveRenderer->GetRenderContext(); + RenderFunction(factory, renderer.get()); + + EndFrame(); + }); +} + std::unique_ptr FRiveRenderTarget::BeginFrame() { - rive::gpu::RenderContext* RenderContext = RiveRenderer->GetRenderContext(); - if (RenderContext == nullptr) + rive::gpu::RenderContext* PLSRenderContextPtr = RiveRenderer->GetRenderContext(); + if (PLSRenderContextPtr == nullptr) { return nullptr; } @@ -179,8 +200,8 @@ std::unique_ptr FRiveRenderTarget::BeginFrame() RIVE_DEBUG_VERBOSE("FRiveRenderTargetOpenGL RenderContext->beginFrame %p", RenderContext); #endif - RenderContext->beginFrame(std::move(FrameDescriptor)); - return std::make_unique(RenderContext); + PLSRenderContextPtr->beginFrame(std::move(FrameDescriptor)); + return std::make_unique(PLSRenderContextPtr); } void FRiveRenderTarget::EndFrame() const diff --git a/Source/RiveRenderer/Private/RiveRenderTarget.h b/Source/RiveRenderer/Private/RiveRenderTarget.h index 2a8e5114..1218b17b 100644 --- a/Source/RiveRenderer/Private/RiveRenderTarget.h +++ b/Source/RiveRenderer/Private/RiveRenderTarget.h @@ -12,6 +12,12 @@ THIRD_PARTY_INCLUDES_START #include "rive/refcnt.hpp" THIRD_PARTY_INCLUDES_END + +namespace rive +{ + class RiveRenderer; +} + #endif // WITH_RIVE class UTexture2DDynamic; @@ -55,6 +61,7 @@ class FRiveRenderTarget : public IRiveRenderTarget virtual void Align(const FBox2f& InBox, ERiveFitType InFit, const FVector2f& InAlignment, rive::Artboard* InArtboard) override; virtual void Align(ERiveFitType InFit, const FVector2f& InAlignment, rive::Artboard* InArtboard) override; virtual FMatrix GetTransformMatrix() const override; + virtual void RegisterRenderCommand(RiveRenderFunction RenderFunction)override; protected: virtual rive::rcp GetRenderTarget() const = 0; diff --git a/Source/RiveRenderer/Private/RiveRenderer.h b/Source/RiveRenderer/Private/RiveRenderer.h index da63402b..c89b0c70 100644 --- a/Source/RiveRenderer/Private/RiveRenderer.h +++ b/Source/RiveRenderer/Private/RiveRenderer.h @@ -9,6 +9,11 @@ #if WITH_RIVE +namespace rive::gpu +{ + class Renderer; + class RenderContext; +} #endif // WITH_RIVE diff --git a/Source/RiveRenderer/Private/RiveRendererModule.cpp b/Source/RiveRenderer/Private/RiveRendererModule.cpp index 1a9d0670..1728f969 100644 --- a/Source/RiveRenderer/Private/RiveRendererModule.cpp +++ b/Source/RiveRenderer/Private/RiveRendererModule.cpp @@ -1,8 +1,11 @@ // Copyright Rive, Inc. All rights reserved. #include "RiveRendererModule.h" + #include "RiveRenderer.h" #include "Logs/RiveRendererLog.h" +#include "Platform/RiveRendererRHI.h" +#include "RiveRendererSettings.h" #if PLATFORM_WINDOWS #include "Platform/RiveRendererD3D11.h" @@ -21,23 +24,74 @@ void FRiveRendererModule::StartupModule() check(GDynamicRHI); // Create Platform Specific Renderer RiveRenderer = nullptr; + + const URiveRendererSettings* PluginSettings = GetDefault(); + if (PluginSettings->bEnableRHITechPreview) + { + UE_LOG(LogRiveRenderer, Warning, TEXT("Rive running on RHI Native Tech Preview !")) + RiveRenderer = MakeShared(); + } + else + { + StartupLegacyRiveRenderer(); + } + + // OnBeginFrameHandle = FCoreDelegates::OnFEngineLoopInitComplete.AddLambda([this]() // Crashes sometimes on Android when on GameThread + OnBeginFrameHandle = FCoreDelegates::OnBeginFrame.AddLambda([this]() + { + if (RiveRenderer.IsValid()) + { + RiveRenderer->Initialize(); + OnRendererInitializedDelegate.Broadcast(); + } + FCoreDelegates::OnBeginFrame.Remove(OnBeginFrameHandle); + }); +} + +void FRiveRendererModule::ShutdownModule() +{ + if (RiveRenderer) + { + RiveRenderer.Reset(); + } +} + +IRiveRenderer* FRiveRendererModule::GetRenderer() +{ + return RiveRenderer.Get(); +} + +void FRiveRendererModule::CallOrRegister_OnRendererInitialized(FSimpleMulticastDelegate::FDelegate&& Delegate) +{ + if (RiveRenderer.IsValid() && RiveRenderer->IsInitialized()) + { + Delegate.Execute(); + } + else + { + OnRendererInitializedDelegate.Add(MoveTemp(Delegate)); + } +} + +void FRiveRendererModule::StartupLegacyRiveRenderer() +{ switch (RHIGetInterfaceType()) { #if PLATFORM_WINDOWS - case ERHIInterfaceType::D3D11: + case ERHIInterfaceType::D3D11: { UE_LOG(LogRiveRenderer, Display, TEXT("Rive running on RHI 'D3D11'")) RiveRenderer = MakeShared(); break; } - case ERHIInterfaceType::D3D12: + case ERHIInterfaceType::D3D12: { UE_LOG(LogRiveRenderer, Error, TEXT("Rive is NOT compatible with RHI 'D3D12'")) break; } #endif // PLATFORM_WINDOWS #if PLATFORM_ANDROID - case ERHIInterfaceType::OpenGL: + case ERHIInterfaceType::OpenGL: { UE_LOG(LogRiveRenderer, Display, TEXT("Rive running on RHI 'OpenGL'")) RiveRenderer = MakeShared(); @@ -57,54 +111,19 @@ void FRiveRendererModule::StartupModule() break; } #endif // PLATFORM_APPLE - case ERHIInterfaceType::Vulkan: + case ERHIInterfaceType::Vulkan: { UE_LOG(LogRiveRenderer, Error, TEXT("Rive is NOT compatible with RHI 'Vulkan'")) break; } - default: - if(!IsRunningCommandlet()) - { - UE_LOG(LogRiveRenderer, Error, TEXT("Rive is NOT compatible with the current unknown RHI")) - } + default: + if(!IsRunningCommandlet()) + { + UE_LOG(LogRiveRenderer, Error, TEXT("Rive is NOT compatible with the current unknown RHI")) + } break; } - // OnBeginFrameHandle = FCoreDelegates::OnFEngineLoopInitComplete.AddLambda([this]() // Crashes sometimes on Android when on GameThread - OnBeginFrameHandle = FCoreDelegates::OnBeginFrame.AddLambda([this]() - { - if (RiveRenderer.IsValid()) - { - RiveRenderer->Initialize(); - OnRendererInitializedDelegate.Broadcast(); - } - FCoreDelegates::OnBeginFrame.Remove(OnBeginFrameHandle); - }); -} - -void FRiveRendererModule::ShutdownModule() -{ - if (RiveRenderer) - { - RiveRenderer.Reset(); - } -} - -IRiveRenderer* FRiveRendererModule::GetRenderer() -{ - return RiveRenderer.Get(); -} - -void FRiveRendererModule::CallOrRegister_OnRendererInitialized(FSimpleMulticastDelegate::FDelegate&& Delegate) -{ - if (RiveRenderer.IsValid() && RiveRenderer->IsInitialized()) - { - Delegate.Execute(); - } - else - { - OnRendererInitializedDelegate.Add(MoveTemp(Delegate)); - } } #undef LOCTEXT_NAMESPACE diff --git a/Source/RiveRenderer/Private/RiveRendererModule.h b/Source/RiveRenderer/Private/RiveRendererModule.h index c4094c9c..216afcee 100644 --- a/Source/RiveRenderer/Private/RiveRendererModule.h +++ b/Source/RiveRenderer/Private/RiveRendererModule.h @@ -29,6 +29,9 @@ class FRiveRendererModule : public IRiveRendererModule * Attribute(s) */ +private: + void StartupLegacyRiveRenderer(); + private: TSharedPtr RiveRenderer; FSimpleMulticastDelegate OnRendererInitializedDelegate; diff --git a/Source/RiveRenderer/Private/RiveRendererSettings.cpp b/Source/RiveRenderer/Private/RiveRendererSettings.cpp new file mode 100644 index 00000000..02b28664 --- /dev/null +++ b/Source/RiveRenderer/Private/RiveRendererSettings.cpp @@ -0,0 +1,9 @@ +// Fill out your copyright notice in the Description page of Project Settings. + + +#include "RiveRendererSettings.h" + +URiveRendererSettings::URiveRendererSettings() : bEnableRHITechPreview(false) +{ + +} diff --git a/Source/RiveRenderer/Private/Shaders/ShaderPipelineManager.cpp b/Source/RiveRenderer/Private/Shaders/ShaderPipelineManager.cpp new file mode 100644 index 00000000..b47f003e --- /dev/null +++ b/Source/RiveRenderer/Private/Shaders/ShaderPipelineManager.cpp @@ -0,0 +1,125 @@ +#include "ShaderPipelineManager.h" + +#include + +#include +#include "GlobalShader.h" + +#include "CommonRenderResources.h" +#include "ResolveShader.h" +#include "Platform/pls_render_context_rhi_impl.hpp" +#include "ShaderCompilerCore.h" + +THIRD_PARTY_INCLUDES_START +#include "rive/shaders/out/generated/shaders/constants.glsl.hpp" +THIRD_PARTY_INCLUDES_END + +void FRiveGradientPixelShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("FRAGMENT"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); + } + +void FRiveGradientVertexShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("VERTEX"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); +} + +void FRiveTessPixelShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("FRAGMENT"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); +} + +void FRiveTessVertexShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("VERTEX"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); + } + +void FRivePathPixelShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("FRAGMENT"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); +} + +void FRivePathVertexShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("VERTEX"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); +} + +void FRiveInteriorTrianglesPixelShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("FRAGMENT"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); +} + +void FRiveInteriorTrianglesVertexShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("VERTEX"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); +} + +void FRiveImageRectPixelShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("FRAGMENT"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); +} + +void FRiveImageRectVertexShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("VERTEX"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); +} + +void FRiveImageMeshPixelShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("FRAGMENT"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); +} + +void FRiveImageMeshVertexShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("VERTEX"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); +} + +void FRiveAtomiResolvePixelShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("FRAGMENT"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); +} + + +void FRiveAtomiResolveVertexShader::ModifyCompilationEnvironment(const FShaderPermutationParameters& Params, FShaderCompilerEnvironment& Environment) +{ + Environment.SetDefine(TEXT("VERTEX"), TEXT("1")); + Environment.CompilerFlags.Add(CFLAG_AllowTypedUAVLoads); +} + + +IMPLEMENT_GLOBAL_SHADER(FRiveGradientPixelShader, "/Plugin/Rive/Private/Rive/color_ramp.usf", GLSL_colorRampFragmentMain, SF_Pixel); +IMPLEMENT_GLOBAL_SHADER(FRiveGradientVertexShader, "/Plugin/Rive/Private/Rive/color_ramp.usf", GLSL_colorRampVertexMain, SF_Vertex); + +IMPLEMENT_GLOBAL_SHADER(FRiveTessPixelShader, "/Plugin/Rive/Private/Rive/tessellate.usf", GLSL_tessellateFragmentMain, SF_Pixel); +IMPLEMENT_GLOBAL_SHADER(FRiveTessVertexShader, "/Plugin/Rive/Private/Rive/tessellate.usf", GLSL_tessellateVertexMain, SF_Vertex); + +IMPLEMENT_GLOBAL_SHADER(FRivePathPixelShader, "/Plugin/Rive/Private/Rive/atomic_draw_path.usf", GLSL_drawFragmentMain, SF_Pixel); +IMPLEMENT_GLOBAL_SHADER(FRivePathVertexShader, "/Plugin/Rive/Private/Rive/atomic_draw_path.usf", GLSL_drawVertexMain, SF_Vertex); + +IMPLEMENT_GLOBAL_SHADER(FRiveInteriorTrianglesPixelShader, "/Plugin/Rive/Private/Rive/atomic_draw_interior_triangles.usf", GLSL_drawFragmentMain, SF_Pixel); +IMPLEMENT_GLOBAL_SHADER(FRiveInteriorTrianglesVertexShader, "/Plugin/Rive/Private/Rive/atomic_draw_interior_triangles.usf", GLSL_drawVertexMain, SF_Vertex); + +IMPLEMENT_GLOBAL_SHADER(FRiveImageRectPixelShader, "/Plugin/Rive/Private/Rive/atomic_draw_image_rect.usf", GLSL_drawFragmentMain, SF_Pixel); +IMPLEMENT_GLOBAL_SHADER(FRiveImageRectVertexShader, "/Plugin/Rive/Private/Rive/atomic_draw_image_rect.usf", GLSL_drawVertexMain, SF_Vertex); + +IMPLEMENT_GLOBAL_SHADER(FRiveImageMeshPixelShader, "/Plugin/Rive/Private/Rive/atomic_draw_image_mesh.usf", GLSL_drawFragmentMain, SF_Pixel); +IMPLEMENT_GLOBAL_SHADER(FRiveImageMeshVertexShader, "/Plugin/Rive/Private/Rive/atomic_draw_image_mesh.usf", GLSL_drawVertexMain, SF_Vertex); + +IMPLEMENT_GLOBAL_SHADER(FRiveAtomiResolvePixelShader, "/Plugin/Rive/Private/Rive/atomic_resolve_pls.usf", GLSL_drawFragmentMain, SF_Pixel); +IMPLEMENT_GLOBAL_SHADER(FRiveAtomiResolveVertexShader, "/Plugin/Rive/Private/Rive/atomic_resolve_pls.usf", GLSL_drawVertexMain, SF_Vertex); + +IMPLEMENT_GLOBAL_SHADER_PARAMETER_STRUCT(FFlushUniforms, "uniforms"); +IMPLEMENT_GLOBAL_SHADER_PARAMETER_STRUCT(FImageDrawUniforms, "imageDrawUniforms"); \ No newline at end of file diff --git a/Source/RiveRenderer/Private/Shaders/ShaderPipelineManager.h b/Source/RiveRenderer/Private/Shaders/ShaderPipelineManager.h new file mode 100644 index 00000000..9ad3102e --- /dev/null +++ b/Source/RiveRenderer/Private/Shaders/ShaderPipelineManager.h @@ -0,0 +1,358 @@ +#pragma once +#include "ShaderParameterStruct.h" +#include "RenderResource.h" +#include "RHI.h" +#include "GlobalShader.h" + +#include "HLSLTypeAliases.h" +#include "rive/shaders/out/generated/shaders/rhi.exports.h" + +namespace rive::gpu +{ +struct DrawBatch; +struct FlushDescriptor; +} + +// shader permutation params +// Whole +class FEnableClip : SHADER_PERMUTATION_BOOL("ENABLE_CLIPPING"); +class FEnableClipRect : SHADER_PERMUTATION_BOOL("ENABLE_CLIP_RECT"); +class FEnableAdvanceBlend : SHADER_PERMUTATION_BOOL("ENABLE_ADVANCED_BLEND"); + +// FragmentOnly +class FEnableFixedFunctionColorBlend : SHADER_PERMUTATION_BOOL("FIXED_FUNCTION_COLOR_BLEND"); +class FEnableHSLBlendMode : SHADER_PERMUTATION_BOOL("ENABLE_HSL_BLEND_MODES"); +class FEnableNestedClip : SHADER_PERMUTATION_BOOL("ENABLE_NESTED_CLIPPING"); +class FEnableEvenOdd: SHADER_PERMUTATION_BOOL("ENABLE_EVEN_ODD"); + +typedef TShaderPermutationDomain AtomicPixelPermutationDomain; +typedef TShaderPermutationDomain AtomicVertexPermutationDomain; + + +#define USE_ATOMIC_PIXEL_PERMUTATIONS \ +using FPermutationDomain = AtomicPixelPermutationDomain; + +#define USE_ATOMIC_VERTEX_PERMUTATIONS \ +using FPermutationDomain = AtomicVertexPermutationDomain; + +BEGIN_GLOBAL_SHADER_PARAMETER_STRUCT(FFlushUniforms, ) + SHADER_PARAMETER(float, gradInverseViewportY) + SHADER_PARAMETER(float, tessInverseViewportY) + SHADER_PARAMETER(float, renderTargetInverseViewportX) + SHADER_PARAMETER(float, renderTargetInverseViewportY) + SHADER_PARAMETER(UE::HLSL::uint, renderTargetWidth) + SHADER_PARAMETER(UE::HLSL::uint, renderTargetHeight) + SHADER_PARAMETER(UE::HLSL::uint, colorClearValue) // Only used if clears are implemented as draws. + SHADER_PARAMETER(UE::HLSL::uint, coverageClearValue) // Only used if clears are implemented as draws. + SHADER_PARAMETER(UE::HLSL::int4, renderTargetUpdateBounds) // drawBounds, or renderTargetBounds if there is a clear. (LTRB.) + SHADER_PARAMETER(UE::HLSL::uint, pathIDGranularity) // Spacing between adjacent path IDs (1 if IEEE compliant). + SHADER_PARAMETER(float, vertexDiscardValue) +END_GLOBAL_SHADER_PARAMETER_STRUCT(); + +BEGIN_GLOBAL_SHADER_PARAMETER_STRUCT(FImageDrawUniforms, ) + SHADER_PARAMETER(UE::HLSL::float4, viewMatrix) + SHADER_PARAMETER(UE::HLSL::float2, translate) + SHADER_PARAMETER(float, opacity) + SHADER_PARAMETER(float, padding) + SHADER_PARAMETER(UE::HLSL::float4, clipRectInverseMatrix) + SHADER_PARAMETER(UE::HLSL::float2, clipRectInverseTranslate) + SHADER_PARAMETER(UE::HLSL::uint, clipID) + SHADER_PARAMETER(UE::HLSL::uint, blendMode) + SHADER_PARAMETER(UE::HLSL::uint, zIndex) +END_GLOBAL_SHADER_PARAMETER_STRUCT() + +class FRiveGradientPixelShader : public FGlobalShader +{ +public: + + DECLARE_GLOBAL_SHADER(FRiveGradientPixelShader); + SHADER_USE_PARAMETER_STRUCT(FRiveGradientPixelShader, FGlobalShader); + + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + SHADER_PARAMETER_STRUCT_REF(FFlushUniforms, FlushUniforms) + END_SHADER_PARAMETER_STRUCT() + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); +}; + +class FRiveGradientVertexShader : public FGlobalShader +{ +public: + + DECLARE_GLOBAL_SHADER(FRiveGradientVertexShader); + SHADER_USE_PARAMETER_STRUCT(FRiveGradientVertexShader, FGlobalShader); + + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + SHADER_PARAMETER_STRUCT_REF(FFlushUniforms, FlushUniforms) + END_SHADER_PARAMETER_STRUCT() + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); +}; + +class FRiveTessPixelShader : public FGlobalShader +{ +public: + + DECLARE_GLOBAL_SHADER(FRiveTessPixelShader); + SHADER_USE_PARAMETER_STRUCT(FRiveTessPixelShader, FGlobalShader); + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + SHADER_PARAMETER_STRUCT_REF(FFlushUniforms, FlushUniforms) + END_SHADER_PARAMETER_STRUCT() + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); +}; + +class FRiveTessVertexShader : public FGlobalShader +{ +public: + + DECLARE_GLOBAL_SHADER(FRiveTessVertexShader); + SHADER_USE_PARAMETER_STRUCT(FRiveTessVertexShader, FGlobalShader); + + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + SHADER_PARAMETER_STRUCT_REF(FFlushUniforms, FlushUniforms) + SHADER_PARAMETER_SRV(Buffer, GLSL_pathBuffer_raw) + SHADER_PARAMETER_SRV(Buffer, GLSL_contourBuffer_raw) + END_SHADER_PARAMETER_STRUCT() + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); +}; + +class FRivePathPixelShader : public FGlobalShader +{ +public: + + DECLARE_GLOBAL_SHADER(FRivePathPixelShader); + SHADER_USE_PARAMETER_STRUCT(FRivePathPixelShader, FGlobalShader); + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + SHADER_PARAMETER_STRUCT_REF(FFlushUniforms, FlushUniforms) + SHADER_PARAMETER_TEXTURE(Texture2D, GLSL_gradTexture_raw) + SHADER_PARAMETER_UAV(Texture2D, coverageCountBuffer) + SHADER_PARAMETER_UAV(Texture2D, clipBuffer) + SHADER_PARAMETER_UAV(Texture2D, colorBuffer) + SHADER_PARAMETER_SAMPLER(SamplerState, gradSampler) + + SHADER_PARAMETER_SRV(Buffer, GLSL_paintBuffer_raw) + SHADER_PARAMETER_SRV(Buffer, GLSL_paintAuxBuffer_raw) + END_SHADER_PARAMETER_STRUCT() + + USE_ATOMIC_PIXEL_PERMUTATIONS + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); + static bool ShouldCompilePermutation(const FShaderPermutationParameters& Parameters) + {return true;} + +}; + +class FRivePathVertexShader : public FGlobalShader +{ +public: + + DECLARE_GLOBAL_SHADER(FRivePathVertexShader); + SHADER_USE_PARAMETER_STRUCT(FRivePathVertexShader, FGlobalShader); + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + SHADER_PARAMETER_STRUCT_REF(FFlushUniforms, FlushUniforms) + + SHADER_PARAMETER_SRV(Texture2D, GLSL_tessVertexTexture_raw) + SHADER_PARAMETER_SRV(Buffer, GLSL_pathBuffer_raw) + SHADER_PARAMETER_SRV(Buffer, GLSL_contourBuffer_raw) + SHADER_PARAMETER(unsigned int, baseInstance) + + END_SHADER_PARAMETER_STRUCT() + + USE_ATOMIC_VERTEX_PERMUTATIONS + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); + static bool ShouldCompilePermutation(const FShaderPermutationParameters& Parameters) + {return true;} + +}; + +class FRiveInteriorTrianglesPixelShader : public FGlobalShader +{ +public: + + DECLARE_GLOBAL_SHADER(FRiveInteriorTrianglesPixelShader); + SHADER_USE_PARAMETER_STRUCT(FRiveInteriorTrianglesPixelShader, FGlobalShader); + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + SHADER_PARAMETER_STRUCT_REF(FFlushUniforms, FlushUniforms) + + SHADER_PARAMETER_TEXTURE(Texture2D, GLSL_gradTexture_raw) + SHADER_PARAMETER_UAV(Texture2D, coverageCountBuffer) + SHADER_PARAMETER_UAV(Texture2D, clipBuffer) + SHADER_PARAMETER_UAV(Texture2D, colorBuffer) + + SHADER_PARAMETER_SAMPLER(SamplerState, gradSampler) + + SHADER_PARAMETER_SRV(Buffer, GLSL_paintBuffer_raw) + SHADER_PARAMETER_SRV(Buffer, GLSL_paintAuxBuffer_raw) + + END_SHADER_PARAMETER_STRUCT() + + USE_ATOMIC_PIXEL_PERMUTATIONS + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); + static bool ShouldCompilePermutation(const FShaderPermutationParameters& Parameters) + {return true;} + +}; + +class FRiveInteriorTrianglesVertexShader : public FGlobalShader +{ +public: + DECLARE_GLOBAL_SHADER(FRiveInteriorTrianglesVertexShader); + SHADER_USE_PARAMETER_STRUCT(FRiveInteriorTrianglesVertexShader, FGlobalShader); + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + SHADER_PARAMETER_STRUCT_REF(FFlushUniforms, FlushUniforms) + SHADER_PARAMETER_SRV(Buffer, GLSL_pathBuffer_raw) + END_SHADER_PARAMETER_STRUCT() + + USE_ATOMIC_VERTEX_PERMUTATIONS + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); + static bool ShouldCompilePermutation(const FShaderPermutationParameters& Parameters) + {return true;} + +}; + +class FRiveImageRectPixelShader : public FGlobalShader +{ +public: + + DECLARE_GLOBAL_SHADER(FRiveImageRectPixelShader); + SHADER_USE_PARAMETER_STRUCT(FRiveImageRectPixelShader, FGlobalShader); + + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + SHADER_PARAMETER_STRUCT_REF(FFlushUniforms, FlushUniforms) + SHADER_PARAMETER_STRUCT_REF(FImageDrawUniforms, ImageDrawUniforms) + + SHADER_PARAMETER_TEXTURE(Texture2D, GLSL_gradTexture_raw) + SHADER_PARAMETER_TEXTURE(Texture2D, GLSL_imageTexture_raw) + + SHADER_PARAMETER_UAV(Texture2D, coverageCountBuffer) + SHADER_PARAMETER_UAV(Texture2D, clipBuffer) + SHADER_PARAMETER_UAV(Texture2D, colorBuffer) + SHADER_PARAMETER_SAMPLER(SamplerState, gradSampler) + SHADER_PARAMETER_SAMPLER(SamplerState, imageSampler) + + SHADER_PARAMETER_SRV(Buffer, GLSL_paintBuffer_raw) + SHADER_PARAMETER_SRV(Buffer, GLSL_paintAuxBuffer_raw) + END_SHADER_PARAMETER_STRUCT() + + USE_ATOMIC_PIXEL_PERMUTATIONS + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); + static bool ShouldCompilePermutation(const FShaderPermutationParameters& Parameters) + {return true;} +}; + +class FRiveImageRectVertexShader : public FGlobalShader +{ +public: + + DECLARE_GLOBAL_SHADER( FRiveImageRectVertexShader); + SHADER_USE_PARAMETER_STRUCT(FRiveImageRectVertexShader, FGlobalShader); + + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + SHADER_PARAMETER_STRUCT_REF(FFlushUniforms, FlushUniforms) + SHADER_PARAMETER_STRUCT_REF(FImageDrawUniforms, ImageDrawUniforms) + END_SHADER_PARAMETER_STRUCT() + + USE_ATOMIC_VERTEX_PERMUTATIONS + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); + static bool ShouldCompilePermutation(const FShaderPermutationParameters& Parameters) + {return true;} +}; + +class FRiveImageMeshPixelShader : public FGlobalShader +{ +public: + + DECLARE_GLOBAL_SHADER(FRiveImageMeshPixelShader); + SHADER_USE_PARAMETER_STRUCT(FRiveImageMeshPixelShader, FGlobalShader); + + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + SHADER_PARAMETER_STRUCT_REF(FFlushUniforms, FlushUniforms) + SHADER_PARAMETER_STRUCT_REF(FImageDrawUniforms, ImageDrawUniforms) + + SHADER_PARAMETER_TEXTURE(Texture2D, GLSL_gradTexture_raw) + SHADER_PARAMETER_TEXTURE(Texture2D, GLSL_imageTexture_raw) + + SHADER_PARAMETER_UAV(Texture2D, coverageCountBuffer) + SHADER_PARAMETER_UAV(Texture2D, clipBuffer) + SHADER_PARAMETER_UAV(Texture2D, colorBuffer) + + SHADER_PARAMETER_SAMPLER(SamplerState, gradSampler) + SHADER_PARAMETER_SAMPLER(SamplerState, imageSampler) + + SHADER_PARAMETER_SRV(Buffer, GLSL_paintBuffer_raw) + SHADER_PARAMETER_SRV(Buffer, GLSL_paintAuxBuffer_raw) + END_SHADER_PARAMETER_STRUCT() + + USE_ATOMIC_PIXEL_PERMUTATIONS + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); + static bool ShouldCompilePermutation(const FShaderPermutationParameters& Parameters) + {return true;} +}; + +class FRiveImageMeshVertexShader : public FGlobalShader +{ +public: + + DECLARE_GLOBAL_SHADER( FRiveImageMeshVertexShader); + SHADER_USE_PARAMETER_STRUCT(FRiveImageMeshVertexShader, FGlobalShader); + + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + SHADER_PARAMETER_STRUCT_REF(FFlushUniforms, FlushUniforms) + SHADER_PARAMETER_STRUCT_REF(FImageDrawUniforms, ImageDrawUniforms) + END_SHADER_PARAMETER_STRUCT() + + USE_ATOMIC_VERTEX_PERMUTATIONS + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); + static bool ShouldCompilePermutation(const FShaderPermutationParameters& Parameters) + {return true;} +}; + +class FRiveAtomiResolvePixelShader : public FGlobalShader +{ +public: + DECLARE_GLOBAL_SHADER( FRiveAtomiResolvePixelShader); + SHADER_USE_PARAMETER_STRUCT(FRiveAtomiResolvePixelShader, FGlobalShader); + + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + + SHADER_PARAMETER_TEXTURE(Texture2D, GLSL_gradTexture_raw) + SHADER_PARAMETER_SAMPLER(SamplerState, gradSampler) + SHADER_PARAMETER_SRV(Buffer, GLSL_paintBuffer_raw) + SHADER_PARAMETER_SRV(Buffer, GLSL_paintAuxBuffer_raw) + SHADER_PARAMETER_UAV(Texture2D, coverageCountBuffer) + SHADER_PARAMETER_UAV(Texture2D, colorBuffer) + SHADER_PARAMETER_UAV(Texture2D, clipBuffer) + END_SHADER_PARAMETER_STRUCT() + + USE_ATOMIC_PIXEL_PERMUTATIONS + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); +}; + +class FRiveAtomiResolveVertexShader : public FGlobalShader +{ +public: + DECLARE_GLOBAL_SHADER( FRiveAtomiResolveVertexShader); + SHADER_USE_PARAMETER_STRUCT(FRiveAtomiResolveVertexShader, FGlobalShader); + + BEGIN_SHADER_PARAMETER_STRUCT(FParameters, ) + SHADER_PARAMETER_STRUCT_REF(FFlushUniforms, FlushUniforms) + END_SHADER_PARAMETER_STRUCT() + + USE_ATOMIC_VERTEX_PERMUTATIONS + + static void ModifyCompilationEnvironment(const FShaderPermutationParameters&, FShaderCompilerEnvironment&); + static bool ShouldCompilePermutation(const FShaderPermutationParameters& Parameters) + {return true;} +}; diff --git a/Source/RiveRenderer/Public/IRiveRenderTarget.h b/Source/RiveRenderer/Public/IRiveRenderTarget.h index a4b458d1..fb996603 100644 --- a/Source/RiveRenderer/Public/IRiveRenderTarget.h +++ b/Source/RiveRenderer/Public/IRiveRenderTarget.h @@ -17,9 +17,16 @@ namespace rive { class Artboard; class Renderer; + + namespace renderer + { + class rendererRenderContext; + } } #endif // WITH_RIVE + +using RiveRenderFunction=TUniqueFunction; class IRiveRenderTarget : public TSharedFromThis { /** @@ -47,6 +54,7 @@ class IRiveRenderTarget : public TSharedFromThis virtual void Draw(rive::Artboard* InArtboard) = 0; virtual void Align(const FBox2f& InBox, ERiveFitType InFit, const FVector2f& InAlignment, rive::Artboard* InArtboard) = 0; virtual void Align(ERiveFitType InFit, const FVector2f& InAlignment, rive::Artboard* InArtboard) = 0; + virtual void RegisterRenderCommand(RiveRenderFunction RenderFunction) =0; /** Returns the transformation Matrix from the start of the Render Queue up to now */ virtual FMatrix GetTransformMatrix() const = 0; diff --git a/Source/RiveRenderer/Public/IRiveRendererModule.h b/Source/RiveRenderer/Public/IRiveRendererModule.h index 89bb0be8..65a184a1 100644 --- a/Source/RiveRenderer/Public/IRiveRendererModule.h +++ b/Source/RiveRenderer/Public/IRiveRendererModule.h @@ -35,7 +35,7 @@ class IRiveRendererModule : public IModuleInterface return FModuleManager::Get().IsModuleLoaded(ModuleName); } - virtual IRiveRenderer* GetRenderer() = 0; + virtual class IRiveRenderer* GetRenderer() = 0; virtual void CallOrRegister_OnRendererInitialized(FSimpleMulticastDelegate::FDelegate&& Delegate) = 0; diff --git a/Source/RiveRenderer/Public/RiveRendererSettings.h b/Source/RiveRenderer/Public/RiveRendererSettings.h new file mode 100644 index 00000000..9a376d6a --- /dev/null +++ b/Source/RiveRenderer/Public/RiveRendererSettings.h @@ -0,0 +1,26 @@ +// Fill out your copyright notice in the Description page of Project Settings. + +#pragma once + +#include "CoreMinimal.h" +#include "Engine/DeveloperSettings.h" +#include "RiveRendererSettings.generated.h" + +/** + * + */ +UCLASS(Config=Engine, DefaultConfig) +class RIVERENDERER_API URiveRendererSettings : public UDeveloperSettings +{ + GENERATED_BODY() +public: + /** + * + */ + URiveRendererSettings(); + + UPROPERTY(EditAnywhere, config, Category = "Rive Experimental Settings", DisplayName="Enable RHI Technical Preview") + bool bEnableRHITechPreview; + + virtual FName GetCategoryName() const override { return FName(TEXT("Rive")); } +}; diff --git a/Source/RiveRenderer/RiveRenderer.Build.cs b/Source/RiveRenderer/RiveRenderer.Build.cs index 1766e4e9..89282fda 100644 --- a/Source/RiveRenderer/RiveRenderer.Build.cs +++ b/Source/RiveRenderer/RiveRenderer.Build.cs @@ -27,7 +27,7 @@ public RiveRenderer(ReadOnlyTargetRules Target) : base(Target) PublicDependencyModuleNames.AddRange( new string[] { - "Core" + "Core", "RiveLibrary" } ); @@ -36,11 +36,13 @@ public RiveRenderer(ReadOnlyTargetRules Target) : base(Target) new string[] { "CoreUObject", + "DeveloperSettings", "Engine", "RHI", "RenderCore", "Renderer", "RiveLibrary", + "ImageWrapper" } ); diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/animation/transition_comparator.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/animation/transition_comparator.hpp index 38aab796..d28deeeb 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/animation/transition_comparator.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/animation/transition_comparator.hpp @@ -22,6 +22,7 @@ class TransitionComparator : public TransitionComparatorBase bool compareEnums(uint16_t left, uint16_t right, TransitionConditionOp op); bool compareColors(int left, int right, TransitionConditionOp op); bool compareStrings(std::string left, std::string right, TransitionConditionOp op); + bool compareTriggers(uint32_t left, uint32_t right, TransitionConditionOp op); }; } // namespace rive diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/animation/transition_value_trigger_comparator.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/animation/transition_value_trigger_comparator.hpp new file mode 100644 index 00000000..3140600b --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/animation/transition_value_trigger_comparator.hpp @@ -0,0 +1,13 @@ +#ifndef _RIVE_TRANSITION_VALUE_TRIGGER_COMPARATOR_HPP_ +#define _RIVE_TRANSITION_VALUE_TRIGGER_COMPARATOR_HPP_ +#include "rive/generated/animation/transition_value_trigger_comparator_base.hpp" +#include +namespace rive +{ +class TransitionValueTriggerComparator : public TransitionValueTriggerComparatorBase +{ +public: +}; +} // namespace rive + +#endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/bindable_property_trigger.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/bindable_property_trigger.hpp new file mode 100644 index 00000000..4bbeb9f9 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/bindable_property_trigger.hpp @@ -0,0 +1,13 @@ +#ifndef _RIVE_BINDABLE_PROPERTY_TRIGGER_HPP_ +#define _RIVE_BINDABLE_PROPERTY_TRIGGER_HPP_ +#include "rive/generated/data_bind/bindable_property_trigger_base.hpp" +#include +namespace rive +{ +class BindablePropertyTrigger : public BindablePropertyTriggerBase +{ +public: +}; +} // namespace rive + +#endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/context/context_value.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/context/context_value.hpp index c26730b2..f9362599 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/context/context_value.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/context/context_value.hpp @@ -27,7 +27,7 @@ class DataBindContextValue { return dataValue->as()->value(); } - return (new T())->value(); + return T::defaultValue; }; template U getReverseDataValue(DataValue* input) { @@ -36,7 +36,7 @@ class DataBindContextValue { return dataValue->as()->value(); } - return (new T())->value(); + return T::defaultValue; }; template U calculateValue(DataValue* input, bool isMainDirection) diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/context/context_value_trigger.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/context/context_value_trigger.hpp new file mode 100644 index 00000000..f831ab1f --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/context/context_value_trigger.hpp @@ -0,0 +1,16 @@ +#ifndef _RIVE_DATA_BIND_CONTEXT_VALUE_TRIGGER_HPP_ +#define _RIVE_DATA_BIND_CONTEXT_VALUE_TRIGGER_HPP_ +#include "rive/data_bind/context/context_value.hpp" +namespace rive +{ +class DataBindContextValueTrigger : public DataBindContextValue +{ + +public: + DataBindContextValueTrigger(ViewModelInstanceValue* source, DataConverter* converter); + void apply(Core* component, uint32_t propertyKey, bool isMainDirection) override; + DataValue* getTargetValue(Core* target, uint32_t propertyKey) override; +}; +} // namespace rive + +#endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/converters/data_converter_trigger.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/converters/data_converter_trigger.hpp new file mode 100644 index 00000000..05ae9eeb --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/converters/data_converter_trigger.hpp @@ -0,0 +1,15 @@ +#ifndef _RIVE_DATA_CONVERTER_TRIGGER_HPP_ +#define _RIVE_DATA_CONVERTER_TRIGGER_HPP_ +#include "rive/generated/data_bind/converters/data_converter_trigger_base.hpp" +#include +namespace rive +{ +class DataConverterTrigger : public DataConverterTriggerBase +{ +public: + DataValue* convert(DataValue* value) override; + DataType outputType() override { return DataType::trigger; }; +}; +} // namespace rive + +#endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_context.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_context.hpp index 5bc65282..30e16886 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_context.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_context.hpp @@ -9,17 +9,13 @@ class DataContext { private: DataContext* m_Parent = nullptr; - std::vector m_ViewModelInstances; ViewModelInstance* m_ViewModelInstance; public: - DataContext(); DataContext(ViewModelInstance* viewModelInstance); - ~DataContext(); DataContext* parent() { return m_Parent; } void parent(DataContext* value) { m_Parent = value; } - void addViewModelInstance(ViewModelInstance* value); ViewModelInstanceValue* getViewModelProperty(const std::vector path) const; ViewModelInstance* getViewModelInstance(const std::vector path) const; void viewModelInstance(ViewModelInstance* value); diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_type.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_type.hpp index c1740b5d..9c914b7a 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_type.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_type.hpp @@ -24,7 +24,10 @@ enum class DataType : unsigned int list = 5, /// Enum. - enumType = 6 + enumType = 6, + + /// Trigger. + trigger = 7 }; } // namespace rive #endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_boolean.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_boolean.hpp index 4fd85750..7c688535 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_boolean.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_boolean.hpp @@ -17,6 +17,7 @@ class DataValueBoolean : public DataValue bool isTypeOf(DataType typeKey) const override { return typeKey == DataType::boolean; } bool value() { return m_value; }; void value(bool value) { m_value = value; }; + static const bool defaultValue = false; }; } // namespace rive diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_color.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_color.hpp index fdc88b5b..a2cda5ea 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_color.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_color.hpp @@ -8,7 +8,7 @@ namespace rive class DataValueColor : public DataValue { private: - int m_value = false; + int m_value = 0; public: DataValueColor(int value) : m_value(value){}; @@ -17,6 +17,7 @@ class DataValueColor : public DataValue bool isTypeOf(DataType typeKey) const override { return typeKey == DataType::color; } int value() { return m_value; }; void value(int value) { m_value = value; }; + static const int defaultValue = 0; }; } // namespace rive diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_enum.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_enum.hpp index 9c2dbe5f..4c49fc81 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_enum.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_enum.hpp @@ -21,6 +21,7 @@ class DataValueEnum : public DataValue void value(uint32_t value) { m_value = value; }; DataEnum* dataEnum() { return m_dataEnum; }; void dataEnum(DataEnum* value) { m_dataEnum = value; }; + static const uint32_t defaultValue = 0; }; } // namespace rive #endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_number.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_number.hpp index 90a3b5dd..63b7b759 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_number.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_number.hpp @@ -17,6 +17,7 @@ class DataValueNumber : public DataValue bool isTypeOf(DataType typeKey) const override { return typeKey == DataType::number; } float value() { return m_value; }; void value(float value) { m_value = value; }; + constexpr static const float defaultValue = 0; }; } // namespace rive diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_string.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_string.hpp index 7bf2676c..1369c94d 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_string.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_string.hpp @@ -17,6 +17,7 @@ class DataValueString : public DataValue bool isTypeOf(DataType typeKey) const override { return typeKey == DataType::string; }; std::string value() { return m_value; }; void value(std::string value) { m_value = value; }; + constexpr static const char* defaultValue = ""; }; } // namespace rive #endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_trigger.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_trigger.hpp new file mode 100644 index 00000000..959a453e --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/data_bind/data_values/data_value_trigger.hpp @@ -0,0 +1,24 @@ +#ifndef _RIVE_DATA_VALUE_TRIGGER_HPP_ +#define _RIVE_DATA_VALUE_TRIGGER_HPP_ +#include "rive/data_bind/data_values/data_value.hpp" + +#include +namespace rive +{ +class DataValueTrigger : public DataValue +{ +private: + uint32_t m_value = 0; + +public: + DataValueTrigger(uint32_t value) : m_value(value){}; + DataValueTrigger(){}; + static const DataType typeKey = DataType::trigger; + bool isTypeOf(DataType typeKey) const override { return typeKey == DataType::trigger; } + uint32_t value() { return m_value; }; + void value(uint32_t value) { m_value = value; }; + constexpr static const uint32_t defaultValue = 0; +}; +} // namespace rive + +#endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/generated/animation/transition_value_trigger_comparator_base.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/animation/transition_value_trigger_comparator_base.hpp new file mode 100644 index 00000000..43bdb43b --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/animation/transition_value_trigger_comparator_base.hpp @@ -0,0 +1,72 @@ +#ifndef _RIVE_TRANSITION_VALUE_TRIGGER_COMPARATOR_BASE_HPP_ +#define _RIVE_TRANSITION_VALUE_TRIGGER_COMPARATOR_BASE_HPP_ +#include "rive/animation/transition_value_comparator.hpp" +#include "rive/core/field_types/core_uint_type.hpp" +namespace rive +{ +class TransitionValueTriggerComparatorBase : public TransitionValueComparator +{ +protected: + typedef TransitionValueComparator Super; + +public: + static const uint16_t typeKey = 505; + + /// Helper to quickly determine if a core object extends another without RTTI + /// at runtime. + bool isTypeOf(uint16_t typeKey) const override + { + switch (typeKey) + { + case TransitionValueTriggerComparatorBase::typeKey: + case TransitionValueComparatorBase::typeKey: + case TransitionComparatorBase::typeKey: + return true; + default: + return false; + } + } + + uint16_t coreType() const override { return typeKey; } + + static const uint16_t valuePropertyKey = 689; + +private: + uint32_t m_Value = 0; + +public: + inline uint32_t value() const { return m_Value; } + void value(uint32_t value) + { + if (m_Value == value) + { + return; + } + m_Value = value; + valueChanged(); + } + + Core* clone() const override; + void copy(const TransitionValueTriggerComparatorBase& object) + { + m_Value = object.m_Value; + TransitionValueComparator::copy(object); + } + + bool deserialize(uint16_t propertyKey, BinaryReader& reader) override + { + switch (propertyKey) + { + case valuePropertyKey: + m_Value = CoreUintType::deserialize(reader); + return true; + } + return TransitionValueComparator::deserialize(propertyKey, reader); + } + +protected: + virtual void valueChanged() {} +}; +} // namespace rive + +#endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/generated/core_registry.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/core_registry.hpp index 5e3b2275..736d758e 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/generated/core_registry.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/core_registry.hpp @@ -76,6 +76,7 @@ #include "rive/animation/transition_value_enum_comparator.hpp" #include "rive/animation/transition_value_number_comparator.hpp" #include "rive/animation/transition_value_string_comparator.hpp" +#include "rive/animation/transition_value_trigger_comparator.hpp" #include "rive/animation/transition_viewmodel_condition.hpp" #include "rive/artboard.hpp" #include "rive/assets/asset.hpp" @@ -120,12 +121,14 @@ #include "rive/data_bind/bindable_property_enum.hpp" #include "rive/data_bind/bindable_property_number.hpp" #include "rive/data_bind/bindable_property_string.hpp" +#include "rive/data_bind/bindable_property_trigger.hpp" #include "rive/data_bind/converters/data_converter.hpp" #include "rive/data_bind/converters/data_converter_group.hpp" #include "rive/data_bind/converters/data_converter_group_item.hpp" #include "rive/data_bind/converters/data_converter_operation.hpp" #include "rive/data_bind/converters/data_converter_rounder.hpp" #include "rive/data_bind/converters/data_converter_to_string.hpp" +#include "rive/data_bind/converters/data_converter_trigger.hpp" #include "rive/data_bind/data_bind.hpp" #include "rive/data_bind/data_bind_context.hpp" #include "rive/draw_rules.hpp" @@ -199,6 +202,7 @@ #include "rive/viewmodel/viewmodel_instance_list_item.hpp" #include "rive/viewmodel/viewmodel_instance_number.hpp" #include "rive/viewmodel/viewmodel_instance_string.hpp" +#include "rive/viewmodel/viewmodel_instance_trigger.hpp" #include "rive/viewmodel/viewmodel_instance_value.hpp" #include "rive/viewmodel/viewmodel_instance_viewmodel.hpp" #include "rive/viewmodel/viewmodel_property.hpp" @@ -208,6 +212,7 @@ #include "rive/viewmodel/viewmodel_property_list.hpp" #include "rive/viewmodel/viewmodel_property_number.hpp" #include "rive/viewmodel/viewmodel_property_string.hpp" +#include "rive/viewmodel/viewmodel_property_trigger.hpp" #include "rive/viewmodel/viewmodel_property_viewmodel.hpp" #include "rive/world_transform_component.hpp" namespace rive @@ -255,10 +260,14 @@ class CoreRegistry return new ViewModelInstanceList(); case ViewModelInstanceNumberBase::typeKey: return new ViewModelInstanceNumber(); + case ViewModelInstanceTriggerBase::typeKey: + return new ViewModelInstanceTrigger(); case ViewModelPropertyStringBase::typeKey: return new ViewModelPropertyString(); case ViewModelInstanceViewModelBase::typeKey: return new ViewModelInstanceViewModel(); + case ViewModelPropertyTriggerBase::typeKey: + return new ViewModelPropertyTrigger(); case DataEnumValueBase::typeKey: return new DataEnumValue(); case DrawTargetBase::typeKey: @@ -299,6 +308,8 @@ class CoreRegistry return new NSlicer(); case ListenerFireEventBase::typeKey: return new ListenerFireEvent(); + case TransitionValueTriggerComparatorBase::typeKey: + return new TransitionValueTriggerComparator(); case KeyFrameUintBase::typeKey: return new KeyFrameUint(); case NestedSimpleAnimationBase::typeKey: @@ -471,6 +482,8 @@ class CoreRegistry return new Backboard(); case OpenUrlEventBase::typeKey: return new OpenUrlEvent(); + case BindablePropertyTriggerBase::typeKey: + return new BindablePropertyTrigger(); case BindablePropertyBooleanBase::typeKey: return new BindablePropertyBoolean(); case DataBindBase::typeKey: @@ -481,6 +494,8 @@ class CoreRegistry return new DataConverterGroup(); case DataConverterRounderBase::typeKey: return new DataConverterRounder(); + case DataConverterTriggerBase::typeKey: + return new DataConverterTrigger(); case DataConverterOperationBase::typeKey: return new DataConverterOperation(); case DataConverterToStringBase::typeKey: @@ -678,6 +693,9 @@ class CoreRegistry case ViewModelPropertyEnumBase::enumIdPropertyKey: object->as()->enumId(value); break; + case ViewModelInstanceTriggerBase::propertyValuePropertyKey: + object->as()->propertyValue(value); + break; case ViewModelInstanceViewModelBase::propertyValuePropertyKey: object->as()->propertyValue(value); break; @@ -864,6 +882,9 @@ class CoreRegistry case LayerStateBase::flagsPropertyKey: object->as()->flags(value); break; + case TransitionValueTriggerComparatorBase::valuePropertyKey: + object->as()->value(value); + break; case KeyFrameBase::framePropertyKey: object->as()->frame(value); break; @@ -1047,6 +1068,9 @@ class CoreRegistry case OpenUrlEventBase::targetValuePropertyKey: object->as()->targetValue(value); break; + case BindablePropertyTriggerBase::propertyValuePropertyKey: + object->as()->propertyValue(value); + break; case DataBindBase::propertyKeyPropertyKey: object->as()->propertyKey(value); break; @@ -1137,6 +1161,9 @@ class CoreRegistry case TextBase::wrapValuePropertyKey: object->as()->wrapValue(value); break; + case TextBase::verticalAlignValuePropertyKey: + object->as()->verticalAlignValue(value); + break; case TextValueRunBase::styleIdPropertyKey: object->as()->styleId(value); break; @@ -1854,6 +1881,8 @@ class CoreRegistry return object->as()->viewModelId(); case ViewModelPropertyEnumBase::enumIdPropertyKey: return object->as()->enumId(); + case ViewModelInstanceTriggerBase::propertyValuePropertyKey: + return object->as()->propertyValue(); case ViewModelInstanceViewModelBase::propertyValuePropertyKey: return object->as()->propertyValue(); case DrawTargetBase::drawableIdPropertyKey: @@ -1978,6 +2007,8 @@ class CoreRegistry return object->as()->eventId(); case LayerStateBase::flagsPropertyKey: return object->as()->flags(); + case TransitionValueTriggerComparatorBase::valuePropertyKey: + return object->as()->value(); case KeyFrameBase::framePropertyKey: return object->as()->frame(); case InterpolatingKeyFrameBase::interpolationTypePropertyKey: @@ -2100,6 +2131,8 @@ class CoreRegistry return object->as()->handleSourceId(); case OpenUrlEventBase::targetValuePropertyKey: return object->as()->targetValue(); + case BindablePropertyTriggerBase::propertyValuePropertyKey: + return object->as()->propertyValue(); case DataBindBase::propertyKeyPropertyKey: return object->as()->propertyKey(); case DataBindBase::flagsPropertyKey: @@ -2160,6 +2193,8 @@ class CoreRegistry return object->as()->originValue(); case TextBase::wrapValuePropertyKey: return object->as()->wrapValue(); + case TextBase::verticalAlignValuePropertyKey: + return object->as()->verticalAlignValue(); case TextValueRunBase::styleIdPropertyKey: return object->as()->styleId(); case FileAssetBase::assetIdPropertyKey: @@ -2625,6 +2660,7 @@ class CoreRegistry case ComponentBase::parentIdPropertyKey: case ViewModelInstanceBase::viewModelIdPropertyKey: case ViewModelPropertyEnumBase::enumIdPropertyKey: + case ViewModelInstanceTriggerBase::propertyValuePropertyKey: case ViewModelInstanceViewModelBase::propertyValuePropertyKey: case DrawTargetBase::drawableIdPropertyKey: case DrawTargetBase::placementValuePropertyKey: @@ -2687,6 +2723,7 @@ class CoreRegistry case LayoutComponentStyleBase::maxHeightUnitsValuePropertyKey: case ListenerFireEventBase::eventIdPropertyKey: case LayerStateBase::flagsPropertyKey: + case TransitionValueTriggerComparatorBase::valuePropertyKey: case KeyFrameBase::framePropertyKey: case InterpolatingKeyFrameBase::interpolationTypePropertyKey: case InterpolatingKeyFrameBase::interpolatorIdPropertyKey: @@ -2748,6 +2785,7 @@ class CoreRegistry case JoystickBase::joystickFlagsPropertyKey: case JoystickBase::handleSourceIdPropertyKey: case OpenUrlEventBase::targetValuePropertyKey: + case BindablePropertyTriggerBase::propertyValuePropertyKey: case DataBindBase::propertyKeyPropertyKey: case DataBindBase::flagsPropertyKey: case DataBindBase::converterIdPropertyKey: @@ -2778,6 +2816,7 @@ class CoreRegistry case TextBase::overflowValuePropertyKey: case TextBase::originValuePropertyKey: case TextBase::wrapValuePropertyKey: + case TextBase::verticalAlignValuePropertyKey: case TextValueRunBase::styleIdPropertyKey: case FileAssetBase::assetIdPropertyKey: case AudioEventBase::assetIdPropertyKey: @@ -3089,6 +3128,8 @@ class CoreRegistry return object->is(); case ViewModelPropertyEnumBase::enumIdPropertyKey: return object->is(); + case ViewModelInstanceTriggerBase::propertyValuePropertyKey: + return object->is(); case ViewModelInstanceViewModelBase::propertyValuePropertyKey: return object->is(); case DrawTargetBase::drawableIdPropertyKey: @@ -3213,6 +3254,8 @@ class CoreRegistry return object->is(); case LayerStateBase::flagsPropertyKey: return object->is(); + case TransitionValueTriggerComparatorBase::valuePropertyKey: + return object->is(); case KeyFrameBase::framePropertyKey: return object->is(); case InterpolatingKeyFrameBase::interpolationTypePropertyKey: @@ -3335,6 +3378,8 @@ class CoreRegistry return object->is(); case OpenUrlEventBase::targetValuePropertyKey: return object->is(); + case BindablePropertyTriggerBase::propertyValuePropertyKey: + return object->is(); case DataBindBase::propertyKeyPropertyKey: return object->is(); case DataBindBase::flagsPropertyKey: @@ -3395,6 +3440,8 @@ class CoreRegistry return object->is(); case TextBase::wrapValuePropertyKey: return object->is(); + case TextBase::verticalAlignValuePropertyKey: + return object->is(); case TextValueRunBase::styleIdPropertyKey: return object->is(); case FileAssetBase::assetIdPropertyKey: diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/generated/data_bind/bindable_property_trigger_base.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/data_bind/bindable_property_trigger_base.hpp new file mode 100644 index 00000000..f5e1b3a5 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/data_bind/bindable_property_trigger_base.hpp @@ -0,0 +1,71 @@ +#ifndef _RIVE_BINDABLE_PROPERTY_TRIGGER_BASE_HPP_ +#define _RIVE_BINDABLE_PROPERTY_TRIGGER_BASE_HPP_ +#include "rive/core/field_types/core_uint_type.hpp" +#include "rive/data_bind/bindable_property.hpp" +namespace rive +{ +class BindablePropertyTriggerBase : public BindableProperty +{ +protected: + typedef BindableProperty Super; + +public: + static const uint16_t typeKey = 503; + + /// Helper to quickly determine if a core object extends another without RTTI + /// at runtime. + bool isTypeOf(uint16_t typeKey) const override + { + switch (typeKey) + { + case BindablePropertyTriggerBase::typeKey: + case BindablePropertyBase::typeKey: + return true; + default: + return false; + } + } + + uint16_t coreType() const override { return typeKey; } + + static const uint16_t propertyValuePropertyKey = 686; + +private: + uint32_t m_PropertyValue = 0; + +public: + inline uint32_t propertyValue() const { return m_PropertyValue; } + void propertyValue(uint32_t value) + { + if (m_PropertyValue == value) + { + return; + } + m_PropertyValue = value; + propertyValueChanged(); + } + + Core* clone() const override; + void copy(const BindablePropertyTriggerBase& object) + { + m_PropertyValue = object.m_PropertyValue; + BindableProperty::copy(object); + } + + bool deserialize(uint16_t propertyKey, BinaryReader& reader) override + { + switch (propertyKey) + { + case propertyValuePropertyKey: + m_PropertyValue = CoreUintType::deserialize(reader); + return true; + } + return BindableProperty::deserialize(propertyKey, reader); + } + +protected: + virtual void propertyValueChanged() {} +}; +} // namespace rive + +#endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/generated/data_bind/converters/data_converter_trigger_base.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/data_bind/converters/data_converter_trigger_base.hpp new file mode 100644 index 00000000..a9cc60cf --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/data_bind/converters/data_converter_trigger_base.hpp @@ -0,0 +1,36 @@ +#ifndef _RIVE_DATA_CONVERTER_TRIGGER_BASE_HPP_ +#define _RIVE_DATA_CONVERTER_TRIGGER_BASE_HPP_ +#include "rive/data_bind/converters/data_converter.hpp" +namespace rive +{ +class DataConverterTriggerBase : public DataConverter +{ +protected: + typedef DataConverter Super; + +public: + static const uint16_t typeKey = 504; + + /// Helper to quickly determine if a core object extends another without RTTI + /// at runtime. + bool isTypeOf(uint16_t typeKey) const override + { + switch (typeKey) + { + case DataConverterTriggerBase::typeKey: + case DataConverterBase::typeKey: + return true; + default: + return false; + } + } + + uint16_t coreType() const override { return typeKey; } + + Core* clone() const override; + +protected: +}; +} // namespace rive + +#endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/generated/text/text_base.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/text/text_base.hpp index 0147f30b..da7ca721 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/generated/text/text_base.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/text/text_base.hpp @@ -44,6 +44,7 @@ class TextBase : public Drawable static const uint16_t paragraphSpacingPropertyKey = 371; static const uint16_t originValuePropertyKey = 377; static const uint16_t wrapValuePropertyKey = 683; + static const uint16_t verticalAlignValuePropertyKey = 685; private: uint32_t m_AlignValue = 0; @@ -56,6 +57,7 @@ class TextBase : public Drawable float m_ParagraphSpacing = 0.0f; uint32_t m_OriginValue = 0; uint32_t m_WrapValue = 0; + uint32_t m_VerticalAlignValue = 0; public: inline uint32_t alignValue() const { return m_AlignValue; } @@ -168,6 +170,17 @@ class TextBase : public Drawable wrapValueChanged(); } + inline uint32_t verticalAlignValue() const { return m_VerticalAlignValue; } + void verticalAlignValue(uint32_t value) + { + if (m_VerticalAlignValue == value) + { + return; + } + m_VerticalAlignValue = value; + verticalAlignValueChanged(); + } + Core* clone() const override; void copy(const TextBase& object) { @@ -181,6 +194,7 @@ class TextBase : public Drawable m_ParagraphSpacing = object.m_ParagraphSpacing; m_OriginValue = object.m_OriginValue; m_WrapValue = object.m_WrapValue; + m_VerticalAlignValue = object.m_VerticalAlignValue; Drawable::copy(object); } @@ -218,6 +232,9 @@ class TextBase : public Drawable case wrapValuePropertyKey: m_WrapValue = CoreUintType::deserialize(reader); return true; + case verticalAlignValuePropertyKey: + m_VerticalAlignValue = CoreUintType::deserialize(reader); + return true; } return Drawable::deserialize(propertyKey, reader); } @@ -233,6 +250,7 @@ class TextBase : public Drawable virtual void paragraphSpacingChanged() {} virtual void originValueChanged() {} virtual void wrapValueChanged() {} + virtual void verticalAlignValueChanged() {} }; } // namespace rive diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/generated/viewmodel/viewmodel_instance_trigger_base.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/viewmodel/viewmodel_instance_trigger_base.hpp new file mode 100644 index 00000000..35737ea1 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/viewmodel/viewmodel_instance_trigger_base.hpp @@ -0,0 +1,71 @@ +#ifndef _RIVE_VIEW_MODEL_INSTANCE_TRIGGER_BASE_HPP_ +#define _RIVE_VIEW_MODEL_INSTANCE_TRIGGER_BASE_HPP_ +#include "rive/core/field_types/core_uint_type.hpp" +#include "rive/viewmodel/viewmodel_instance_value.hpp" +namespace rive +{ +class ViewModelInstanceTriggerBase : public ViewModelInstanceValue +{ +protected: + typedef ViewModelInstanceValue Super; + +public: + static const uint16_t typeKey = 501; + + /// Helper to quickly determine if a core object extends another without RTTI + /// at runtime. + bool isTypeOf(uint16_t typeKey) const override + { + switch (typeKey) + { + case ViewModelInstanceTriggerBase::typeKey: + case ViewModelInstanceValueBase::typeKey: + return true; + default: + return false; + } + } + + uint16_t coreType() const override { return typeKey; } + + static const uint16_t propertyValuePropertyKey = 687; + +private: + uint32_t m_PropertyValue = 0; + +public: + inline uint32_t propertyValue() const { return m_PropertyValue; } + void propertyValue(uint32_t value) + { + if (m_PropertyValue == value) + { + return; + } + m_PropertyValue = value; + propertyValueChanged(); + } + + Core* clone() const override; + void copy(const ViewModelInstanceTriggerBase& object) + { + m_PropertyValue = object.m_PropertyValue; + ViewModelInstanceValue::copy(object); + } + + bool deserialize(uint16_t propertyKey, BinaryReader& reader) override + { + switch (propertyKey) + { + case propertyValuePropertyKey: + m_PropertyValue = CoreUintType::deserialize(reader); + return true; + } + return ViewModelInstanceValue::deserialize(propertyKey, reader); + } + +protected: + virtual void propertyValueChanged() {} +}; +} // namespace rive + +#endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/generated/viewmodel/viewmodel_property_trigger_base.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/viewmodel/viewmodel_property_trigger_base.hpp new file mode 100644 index 00000000..4a0d2014 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/generated/viewmodel/viewmodel_property_trigger_base.hpp @@ -0,0 +1,37 @@ +#ifndef _RIVE_VIEW_MODEL_PROPERTY_TRIGGER_BASE_HPP_ +#define _RIVE_VIEW_MODEL_PROPERTY_TRIGGER_BASE_HPP_ +#include "rive/viewmodel/viewmodel_property.hpp" +namespace rive +{ +class ViewModelPropertyTriggerBase : public ViewModelProperty +{ +protected: + typedef ViewModelProperty Super; + +public: + static const uint16_t typeKey = 502; + + /// Helper to quickly determine if a core object extends another without RTTI + /// at runtime. + bool isTypeOf(uint16_t typeKey) const override + { + switch (typeKey) + { + case ViewModelPropertyTriggerBase::typeKey: + case ViewModelPropertyBase::typeKey: + case ViewModelComponentBase::typeKey: + return true; + default: + return false; + } + } + + uint16_t coreType() const override { return typeKey; } + + Core* clone() const override; + +protected: +}; +} // namespace rive + +#endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/math/math_types.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/math/math_types.hpp index b34adc35..cb21f65b 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/math/math_types.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/math/math_types.hpp @@ -131,6 +131,10 @@ RIVE_ALWAYS_INLINE static float clamp(float x, float lo, float hi) { return fminf(fmaxf(lo, x), hi); } + +inline float degrees_to_radians(float degrees) { return degrees * PI / 180.0f; } + +RIVE_ALWAYS_INLINE static float degreesToRadians(float degrees) { return degrees * (PI / 180.0f); } } // namespace math template T lerp(const T& a, const T& b, float t) { return a + (b - a) * t; } diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/.makecommand b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/.makecommand new file mode 100644 index 00000000..01a3fe72 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/.makecommand @@ -0,0 +1 @@ +make -C C:/Git/rive/packages/runtime/renderer/src/shaders -j22 OUT=C:/Git/rive/packages/runtime/renderer/out/debug/include/generated/shaders FLAGS="--human-readable --minified-extension=ush" diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/advanced_blend.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/advanced_blend.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/advanced_blend.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/advanced_blend.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/advanced_blend.glsl.hpp new file mode 100644 index 00000000..cb9897af --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/advanced_blend.glsl.hpp @@ -0,0 +1,281 @@ +#pragma once + +#include "advanced_blend.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char advanced_blend[] = R"===(/* + * Copyright 2022 Rive + */ + +// From the KHR_blend_equation_advanced spec: +// +// The advanced blend equations are those listed in tables X.1 and X.2. When +// using one of these equations, blending is performed according to the +// following equations: +// +// R = f(Rs',Rd')*p0(As,Ad) + Y*Rs'*p1(As,Ad) + Z*Rd'*p2(As,Ad) +// G = f(Gs',Gd')*p0(As,Ad) + Y*Gs'*p1(As,Ad) + Z*Gd'*p2(As,Ad) +// B = f(Bs',Bd')*p0(As,Ad) + Y*Bs'*p1(As,Ad) + Z*Bd'*p2(As,Ad) +// A = X*p0(As,Ad) + Y*p1(As,Ad) + Z*p2(As,Ad) +// +// where the function f and terms X, Y, and Z are specified in the table. +// The R, G, and B components of the source color used for blending are +// considered to have been premultiplied by the A component prior to +// blending. The base source color (Rs',Gs',Bs') is obtained by dividing +// through by the A component: +// +// (Rs', Gs', Bs') = +// (0, 0, 0), if As == 0 +// (Rs/As, Gs/As, Bs/As), otherwise +// +// The destination color components are always considered to have been +// premultiplied by the destination A component and the base destination +// color (Rd', Gd', Bd') is obtained by dividing through by the A component: +// +// (Rd', Gd', Bd') = +// (0, 0, 0), if Ad == 0 +// (Rd/Ad, Gd/Ad, Bd/Ad), otherwise +// +// When blending using advanced blend equations, we expect that the R, G, and +// B components of premultiplied source and destination color inputs be +// stored as the product of non-premultiplied R, G, and B components and the +// A component of the color. If any R, G, or B component of a premultiplied +// input color is non-zero and the A component is zero, the color is +// considered ill-formed, and the corresponding component of the blend result +// will be undefined. +// +// The weighting functions p0, p1, and p2 are defined as follows: +// +// p0(As,Ad) = As*Ad +// p1(As,Ad) = As*(1-Ad) +// p2(As,Ad) = Ad*(1-As) +// +// In these functions, the A components of the source and destination colors +// are taken to indicate the portion of the pixel covered by the fragment +// (source) and the fragments previously accumulated in the pixel +// (destination). The functions p0, p1, and p2 approximate the relative +// portion of the pixel covered by the intersection of the source and +// destination, covered only by the source, and covered only by the +// destination, respectively. The equations defined here assume that there +// is no correlation between the source and destination coverage. +// + +#ifdef _EXPORTED_FRAGMENT + +#ifdef _EXPORTED_ENABLE_KHR_BLEND +layout( +#ifdef _EXPORTED_ENABLE_HSL_BLEND_MODES + blend_support_all_equations +#else + blend_support_multiply, + blend_support_screen, + blend_support_overlay, + blend_support_darken, + blend_support_lighten, + blend_support_colordodge, + blend_support_colorburn, + blend_support_hardlight, + blend_support_softlight, + blend_support_difference, + blend_support_exclusion +#endif + ) out; +#endif // ENABLE_KHR_BLEND + +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND +#ifdef _EXPORTED_ENABLE_HSL_BLEND_MODES +// When using one of the HSL blend equations in table X.2 as the blend equation, the RGB color +// components produced by the function f() are effectively obtained by converting both the +// non-premultiplied source and destination colors to the HSL (hue, saturation, luminosity) color +// space, generating a new HSL color by selecting H, S, and L components from the source or +// destination according to the blend equation, and then converting the result back to RGB. The HSL +// blend equations are only well defined when the values of the input color components are in the +// range [0..1]. +half minv3(half3 c) { return min(min(c.x, c.y), c.z); } +half maxv3(half3 c) { return max(max(c.x, c.y), c.z); } +half lumv3(half3 c) { return dot(c, make_half3(.30, .59, .11)); } +half satv3(half3 c) { return maxv3(c) - minv3(c); } + +// If any color components are outside [0,1], adjust the color to get the components in range. +half3 clip_color(half3 color) +{ + half lum = lumv3(color); + half mincol = minv3(color); + half maxcol = maxv3(color); + if (mincol < .0) + color = lum + ((color - lum) * lum) / (lum - mincol); + if (maxcol > 1.) + color = lum + ((color - lum) * (1. - lum)) / (maxcol - lum); + return color; +} + +// Take the base RGB color and override its luminosity with that of the RGB color . +half3 set_lum(half3 cbase, half3 clum) +{ + half lbase = lumv3(cbase); + half llum = lumv3(clum); + half ldiff = llum - lbase; + half3 color = cbase + make_half3(ldiff); + return clip_color(color); +} + +// Take the base RGB color and override its saturation with that of the RGB color . +// The override the luminosity of the result with that of the RGB color . +half3 set_lum_sat(half3 cbase, half3 csat, half3 clum) +{ + half minbase = minv3(cbase); + half sbase = satv3(cbase); + half ssat = satv3(csat); + half3 color; + if (sbase > .0) + { + // Equivalent (modulo rounding errors) to setting the smallest (R,G,B) component to 0, the + // largest to , and interpolating the "middle" component based on its original value + // relative to the smallest/largest. + color = (cbase - minbase) * ssat / sbase; + } + else + { + color = make_half3(.0); + } + return set_lum(color, clum); +} +#endif // ENABLE_HSL_BLEND_MODES + +half4 advanced_blend(half4 src, half4 dst, ushort mode) +{ + // The function f() operates on un-multiplied rgb values and dictates the look of the advanced + // blend equations. + half3 f = make_half3(.0); + switch (mode) + { + case BLEND_MODE_MULTIPLY: + f = src.xyz * dst.xyz; + break; + case BLEND_MODE_SCREEN: + f = src.xyz + dst.xyz - src.xyz * dst.xyz; + break; + case BLEND_MODE_OVERLAY: + { + for (int i = 0; i < 3; ++i) + { + if (dst[i] <= .5) + f[i] = 2. * src[i] * dst[i]; + else + f[i] = 1. - 2. * (1. - src[i]) * (1. - dst[i]); + } + break; + } + case BLEND_MODE_DARKEN: + f = min(src.xyz, dst.xyz); + break; + case BLEND_MODE_LIGHTEN: + f = max(src.xyz, dst.xyz); + break; + case BLEND_MODE_COLORDODGE: + // ES3 spec, 4.5.1 Range and Precision: dividing a non-zero by 0 results in the + // appropriately signed IEEE Inf. + f = mix(min(dst.xyz / (1. - src.xyz), make_half3(1.)), + make_half3(.0), + lessThanEqual(dst.xyz, make_half3(.0))); + break; + case BLEND_MODE_COLORBURN: + // ES3 spec, 4.5.1 Range and Precision: dividing a non-zero by 0 results in the + // appropriately signed IEEE Inf. + f = mix(1. - min((1. - dst.xyz) / src.xyz, 1.), + make_half3(1., 1., 1.), + greaterThanEqual(dst.xyz, make_half3(1.))); + break; + case BLEND_MODE_HARDLIGHT: + { + for (int i = 0; i < 3; ++i) + { + if (src[i] <= .5) + f[i] = 2. * src[i] * dst[i]; + else + f[i] = 1. - 2. * (1. - src[i]) * (1. - dst[i]); + } + break; + } + case BLEND_MODE_SOFTLIGHT: + { + for (int i = 0; i < 3; ++i) + { + if (src[i] <= 0.5) + f[i] = dst[i] - (1. - 2. * src[i]) * dst[i] * (1. - dst[i]); + else if (dst[i] <= .25) + f[i] = + dst[i] + (2. * src[i] - 1.) * dst[i] * ((16. * dst[i] - 12.) * dst[i] + 3.); + else + f[i] = dst[i] + (2. * src[i] - 1.) * (sqrt(dst[i]) - dst[i]); + } + break; + } + case BLEND_MODE_DIFFERENCE: + f = abs(dst.xyz - src.xyz); + break; + case BLEND_MODE_EXCLUSION: + f = src.xyz + dst.xyz - 2. * src.xyz * dst.xyz; + break; +#ifdef _EXPORTED_ENABLE_HSL_BLEND_MODES + // The HSL blend equations are only well defined when the values of the input color + // components are in the range [0..1]. + case BLEND_MODE_HUE: + if (_EXPORTED_ENABLE_HSL_BLEND_MODES) + { + src.xyz = clamp(src.xyz, make_half3(.0), make_half3(1.)); + f = set_lum_sat(src.xyz, dst.xyz, dst.xyz); + } + break; + case BLEND_MODE_SATURATION: + if (_EXPORTED_ENABLE_HSL_BLEND_MODES) + { + src.xyz = clamp(src.xyz, make_half3(.0), make_half3(1.)); + f = set_lum_sat(dst.xyz, src.xyz, dst.xyz); + } + break; + case BLEND_MODE_COLOR: + if (_EXPORTED_ENABLE_HSL_BLEND_MODES) + { + src.xyz = clamp(src.xyz, make_half3(.0), make_half3(1.)); + f = set_lum(src.xyz, dst.xyz); + } + break; + case BLEND_MODE_LUMINOSITY: + if (_EXPORTED_ENABLE_HSL_BLEND_MODES) + { + src.xyz = clamp(src.xyz, make_half3(.0), make_half3(1.)); + f = set_lum(dst.xyz, src.xyz); + } + break; +#endif + } + + // The weighting functions p0, p1, and p2 are defined as follows: + // + // p0(As,Ad) = As*Ad + // p1(As,Ad) = As*(1-Ad) + // p2(As,Ad) = Ad*(1-As) + // + half3 p = make_half3(src.w * dst.w, src.w * (1. - dst.w), (1. - src.w) * dst.w); + + // When using one of these equations, blending is performed according to the following + // equations: + // + // R = f(Rs',Rd')*p0(As,Ad) + Y*Rs'*p1(As,Ad) + Z*Rd'*p2(As,Ad) + // G = f(Gs',Gd')*p0(As,Ad) + Y*Gs'*p1(As,Ad) + Z*Gd'*p2(As,Ad) + // B = f(Bs',Bd')*p0(As,Ad) + Y*Bs'*p1(As,Ad) + Z*Bd'*p2(As,Ad) + // A = X*p0(As,Ad) + Y*p1(As,Ad) + Z*p2(As,Ad) + // + // NOTE: (X,Y,Z) always == (1,1,1), so it is ignored in this implementation. + return MUL(make_half3x4(f, 1., src.xyz, 1., dst.xyz, 1.), p); +} +#endif // ENABLE_ADVANCED_BLEND + +#endif // FRAGMENT +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/advanced_blend.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/advanced_blend.minified.ush new file mode 100644 index 00000000..77f69dff --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/advanced_blend.minified.ush @@ -0,0 +1,270 @@ +/* + * Copyright 2022 Rive + */ + +// From the KHR_blend_equation_advanced spec: +// +// The advanced blend equations are those listed in tables X.1 and X.2. When +// using one of these equations, blending is performed according to the +// following equations: +// +// R = f(Rs',Rd')*p0(As,Ad) + Y*Rs'*p1(As,Ad) + Z*Rd'*p2(As,Ad) +// G = f(Gs',Gd')*p0(As,Ad) + Y*Gs'*p1(As,Ad) + Z*Gd'*p2(As,Ad) +// B = f(Bs',Bd')*p0(As,Ad) + Y*Bs'*p1(As,Ad) + Z*Bd'*p2(As,Ad) +// A = X*p0(As,Ad) + Y*p1(As,Ad) + Z*p2(As,Ad) +// +// where the function f and terms X, Y, and Z are specified in the table. +// The R, G, and B components of the source color used for blending are +// considered to have been premultiplied by the A component prior to +// blending. The base source color (Rs',Gs',Bs') is obtained by dividing +// through by the A component: +// +// (Rs', Gs', Bs') = +// (0, 0, 0), if As == 0 +// (Rs/As, Gs/As, Bs/As), otherwise +// +// The destination color components are always considered to have been +// premultiplied by the destination A component and the base destination +// color (Rd', Gd', Bd') is obtained by dividing through by the A component: +// +// (Rd', Gd', Bd') = +// (0, 0, 0), if Ad == 0 +// (Rd/Ad, Gd/Ad, Bd/Ad), otherwise +// +// When blending using advanced blend equations, we expect that the R, G, and +// B components of premultiplied source and destination color inputs be +// stored as the product of non-premultiplied R, G, and B components and the +// A component of the color. If any R, G, or B component of a premultiplied +// input color is non-zero and the A component is zero, the color is +// considered ill-formed, and the corresponding component of the blend result +// will be undefined. +// +// The weighting functions p0, p1, and p2 are defined as follows: +// +// p0(As,Ad) = As*Ad +// p1(As,Ad) = As*(1-Ad) +// p2(As,Ad) = Ad*(1-As) +// +// In these functions, the A components of the source and destination colors +// are taken to indicate the portion of the pixel covered by the fragment +// (source) and the fragments previously accumulated in the pixel +// (destination). The functions p0, p1, and p2 approximate the relative +// portion of the pixel covered by the intersection of the source and +// destination, covered only by the source, and covered only by the +// destination, respectively. The equations defined here assume that there +// is no correlation between the source and destination coverage. +// + +#ifdef FRAGMENT + +#ifdef ENABLE_KHR_BLEND +layout( +#ifdef ENABLE_HSL_BLEND_MODES + blend_support_all_equations +#else + blend_support_multiply, + blend_support_screen, + blend_support_overlay, + blend_support_darken, + blend_support_lighten, + blend_support_colordodge, + blend_support_colorburn, + blend_support_hardlight, + blend_support_softlight, + blend_support_difference, + blend_support_exclusion +#endif + ) out; +#endif // ENABLE_KHR_BLEND + +#ifdef ENABLE_ADVANCED_BLEND +#ifdef ENABLE_HSL_BLEND_MODES +// When using one of the HSL blend equations in table X.2 as the blend equation, the RGB color +// components produced by the function f() are effectively obtained by converting both the +// non-premultiplied source and destination colors to the HSL (hue, saturation, luminosity) color +// space, generating a new HSL color by selecting H, S, and L components from the source or +// destination according to the blend equation, and then converting the result back to RGB. The HSL +// blend equations are only well defined when the values of the input color components are in the +// range [0..1]. +half minv3(half3 c) { return min(min(c.x, c.y), c.z); } +half maxv3(half3 c) { return max(max(c.x, c.y), c.z); } +half lumv3(half3 c) { return dot(c, make_half3(.30, .59, .11)); } +half satv3(half3 c) { return maxv3(c) - minv3(c); } + +// If any color components are outside [0,1], adjust the color to get the components in range. +half3 clip_color(half3 color) +{ + half lum = lumv3(color); + half mincol = minv3(color); + half maxcol = maxv3(color); + if (mincol < .0) + color = lum + ((color - lum) * lum) / (lum - mincol); + if (maxcol > 1.) + color = lum + ((color - lum) * (1. - lum)) / (maxcol - lum); + return color; +} + +// Take the base RGB color and override its luminosity with that of the RGB color . +half3 set_lum(half3 cbase, half3 clum) +{ + half lbase = lumv3(cbase); + half llum = lumv3(clum); + half ldiff = llum - lbase; + half3 color = cbase + make_half3(ldiff); + return clip_color(color); +} + +// Take the base RGB color and override its saturation with that of the RGB color . +// The override the luminosity of the result with that of the RGB color . +half3 set_lum_sat(half3 cbase, half3 csat, half3 clum) +{ + half minbase = minv3(cbase); + half sbase = satv3(cbase); + half ssat = satv3(csat); + half3 color; + if (sbase > .0) + { + // Equivalent (modulo rounding errors) to setting the smallest (R,G,B) component to 0, the + // largest to , and interpolating the "middle" component based on its original value + // relative to the smallest/largest. + color = (cbase - minbase) * ssat / sbase; + } + else + { + color = make_half3(.0); + } + return set_lum(color, clum); +} +#endif // ENABLE_HSL_BLEND_MODES + +half4 advanced_blend(half4 src, half4 dst, ushort mode) +{ + // The function f() operates on un-multiplied rgb values and dictates the look of the advanced + // blend equations. + half3 f = make_half3(.0); + switch (mode) + { + case BLEND_MODE_MULTIPLY: + f = src.xyz * dst.xyz; + break; + case BLEND_MODE_SCREEN: + f = src.xyz + dst.xyz - src.xyz * dst.xyz; + break; + case BLEND_MODE_OVERLAY: + { + for (int i = 0; i < 3; ++i) + { + if (dst[i] <= .5) + f[i] = 2. * src[i] * dst[i]; + else + f[i] = 1. - 2. * (1. - src[i]) * (1. - dst[i]); + } + break; + } + case BLEND_MODE_DARKEN: + f = min(src.xyz, dst.xyz); + break; + case BLEND_MODE_LIGHTEN: + f = max(src.xyz, dst.xyz); + break; + case BLEND_MODE_COLORDODGE: + // ES3 spec, 4.5.1 Range and Precision: dividing a non-zero by 0 results in the + // appropriately signed IEEE Inf. + f = mix(min(dst.xyz / (1. - src.xyz), make_half3(1.)), + make_half3(.0), + lessThanEqual(dst.xyz, make_half3(.0))); + break; + case BLEND_MODE_COLORBURN: + // ES3 spec, 4.5.1 Range and Precision: dividing a non-zero by 0 results in the + // appropriately signed IEEE Inf. + f = mix(1. - min((1. - dst.xyz) / src.xyz, 1.), + make_half3(1., 1., 1.), + greaterThanEqual(dst.xyz, make_half3(1.))); + break; + case BLEND_MODE_HARDLIGHT: + { + for (int i = 0; i < 3; ++i) + { + if (src[i] <= .5) + f[i] = 2. * src[i] * dst[i]; + else + f[i] = 1. - 2. * (1. - src[i]) * (1. - dst[i]); + } + break; + } + case BLEND_MODE_SOFTLIGHT: + { + for (int i = 0; i < 3; ++i) + { + if (src[i] <= 0.5) + f[i] = dst[i] - (1. - 2. * src[i]) * dst[i] * (1. - dst[i]); + else if (dst[i] <= .25) + f[i] = + dst[i] + (2. * src[i] - 1.) * dst[i] * ((16. * dst[i] - 12.) * dst[i] + 3.); + else + f[i] = dst[i] + (2. * src[i] - 1.) * (sqrt(dst[i]) - dst[i]); + } + break; + } + case BLEND_MODE_DIFFERENCE: + f = abs(dst.xyz - src.xyz); + break; + case BLEND_MODE_EXCLUSION: + f = src.xyz + dst.xyz - 2. * src.xyz * dst.xyz; + break; +#ifdef ENABLE_HSL_BLEND_MODES + // The HSL blend equations are only well defined when the values of the input color + // components are in the range [0..1]. + case BLEND_MODE_HUE: + if (ENABLE_HSL_BLEND_MODES) + { + src.xyz = clamp(src.xyz, make_half3(.0), make_half3(1.)); + f = set_lum_sat(src.xyz, dst.xyz, dst.xyz); + } + break; + case BLEND_MODE_SATURATION: + if (ENABLE_HSL_BLEND_MODES) + { + src.xyz = clamp(src.xyz, make_half3(.0), make_half3(1.)); + f = set_lum_sat(dst.xyz, src.xyz, dst.xyz); + } + break; + case BLEND_MODE_COLOR: + if (ENABLE_HSL_BLEND_MODES) + { + src.xyz = clamp(src.xyz, make_half3(.0), make_half3(1.)); + f = set_lum(src.xyz, dst.xyz); + } + break; + case BLEND_MODE_LUMINOSITY: + if (ENABLE_HSL_BLEND_MODES) + { + src.xyz = clamp(src.xyz, make_half3(.0), make_half3(1.)); + f = set_lum(dst.xyz, src.xyz); + } + break; +#endif + } + + // The weighting functions p0, p1, and p2 are defined as follows: + // + // p0(As,Ad) = As*Ad + // p1(As,Ad) = As*(1-Ad) + // p2(As,Ad) = Ad*(1-As) + // + half3 p = make_half3(src.w * dst.w, src.w * (1. - dst.w), (1. - src.w) * dst.w); + + // When using one of these equations, blending is performed according to the following + // equations: + // + // R = f(Rs',Rd')*p0(As,Ad) + Y*Rs'*p1(As,Ad) + Z*Rd'*p2(As,Ad) + // G = f(Gs',Gd')*p0(As,Ad) + Y*Gs'*p1(As,Ad) + Z*Gd'*p2(As,Ad) + // B = f(Bs',Bd')*p0(As,Ad) + Y*Bs'*p1(As,Ad) + Z*Bd'*p2(As,Ad) + // A = X*p0(As,Ad) + Y*p1(As,Ad) + Z*p2(As,Ad) + // + // NOTE: (X,Y,Z) always == (1,1,1), so it is ignored in this implementation. + return MUL(make_half3x4(f, 1., src.xyz, 1., dst.xyz, 1.), p); +} +#endif // ENABLE_ADVANCED_BLEND + +#endif // FRAGMENT diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/atomic_draw.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/atomic_draw.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/atomic_draw.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/atomic_draw.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/atomic_draw.glsl.hpp new file mode 100644 index 00000000..4080bfb8 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/atomic_draw.glsl.hpp @@ -0,0 +1,746 @@ +#pragma once + +#include "atomic_draw.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char atomic_draw[] = R"===(/* + * Copyright 2023 Rive + */ + +#ifdef _EXPORTED_DRAW_PATH +#ifdef _EXPORTED_VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, float4, _EXPORTED_a_patchVertexData); // [localVertexID, outset, fillCoverage, vertexType] +ATTR(1, float4, _EXPORTED_a_mirroredVertexData); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, half2, v_edgeDistance); +FLAT VARYING(1, ushort, v_pathID); +VARYING_BLOCK_END + +#ifdef _EXPORTED_VERTEX +VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_patchVertexData, float4); + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_mirroredVertexData, float4); + + VARYING_INIT(v_edgeDistance, half2); + VARYING_INIT(v_pathID, ushort); + + float4 pos; + float2 vertexPosition; + if (unpack_tessellated_path_vertex(_EXPORTED_a_patchVertexData, + _EXPORTED_a_mirroredVertexData, + _instanceID, + v_pathID, + vertexPosition, + v_edgeDistance VERTEX_CONTEXT_UNPACK)) + { + pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); + } + else + { + pos = float4(uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue); + } + + VARYING_PACK(v_edgeDistance); + VARYING_PACK(v_pathID); + EMIT_VERTEX(pos); +} +#endif // VERTEX +#endif // DRAW_PATH + +#ifdef _EXPORTED_DRAW_INTERIOR_TRIANGLES +#ifdef _EXPORTED_VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, packed_float3, _EXPORTED_a_triangleVertex); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +_EXPORTED_OPTIONALLY_FLAT VARYING(0, half, v_windingWeight); +FLAT VARYING(1, ushort, v_pathID); +VARYING_BLOCK_END + +#ifdef _EXPORTED_VERTEX +VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_triangleVertex, float3); + + VARYING_INIT(v_windingWeight, half); + VARYING_INIT(v_pathID, ushort); + + float2 vertexPosition = unpack_interior_triangle_vertex(_EXPORTED_a_triangleVertex, + v_pathID, + v_windingWeight VERTEX_CONTEXT_UNPACK); + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); + + VARYING_PACK(v_windingWeight); + VARYING_PACK(v_pathID); + EMIT_VERTEX(pos); +} +#endif // VERTEX +#endif // DRAW_INTERIOR_TRIANGLES + +#ifdef _EXPORTED_DRAW_IMAGE +#ifdef _EXPORTED_DRAW_IMAGE_RECT +#ifdef _EXPORTED_VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, float4, _EXPORTED_a_imageRectVertex); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float2, v_texCoord); +NO_PERSPECTIVE VARYING(1, half, v_edgeCoverage); +#ifdef _EXPORTED_ENABLE_CLIP_RECT +NO_PERSPECTIVE VARYING(2, float4, v_clipRect); +#endif +VARYING_BLOCK_END + +#ifdef _EXPORTED_VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +IMAGE_RECT_VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_imageRectVertex, float4); + + VARYING_INIT(v_texCoord, float2); + VARYING_INIT(v_edgeCoverage, half); +#ifdef _EXPORTED_ENABLE_CLIP_RECT + VARYING_INIT(v_clipRect, float4); +#endif + + bool isOuterVertex = _EXPORTED_a_imageRectVertex.z == .0 || _EXPORTED_a_imageRectVertex.w == .0; + v_edgeCoverage = isOuterVertex ? .0 : 1.; + + float2 vertexPosition = _EXPORTED_a_imageRectVertex.xy; + float2x2 M = make_float2x2(imageDrawUniforms.viewMatrix); + float2x2 MIT = transpose(inverse(M)); + if (!isOuterVertex) + { + // Inset the inner vertices to the point where coverage == 1. + // NOTE: if width/height ever change from 1, these equations need to be updated. + float aaRadiusX = AA_RADIUS * manhattan_width(MIT[1]) / dot(M[1], MIT[1]); + if (aaRadiusX >= .5) + { + vertexPosition.x = .5; + v_edgeCoverage *= cast_float_to_half(.5 / aaRadiusX); + } + else + { + vertexPosition.x += aaRadiusX * _EXPORTED_a_imageRectVertex.z; + } + float aaRadiusY = AA_RADIUS * manhattan_width(MIT[0]) / dot(M[0], MIT[0]); + if (aaRadiusY >= .5) + { + vertexPosition.y = .5; + v_edgeCoverage *= cast_float_to_half(.5 / aaRadiusY); + } + else + { + vertexPosition.y += aaRadiusY * _EXPORTED_a_imageRectVertex.w; + } + } + + v_texCoord = vertexPosition; + vertexPosition = MUL(M, vertexPosition) + imageDrawUniforms.translate; + + if (isOuterVertex) + { + // Outset the outer vertices to the point where coverage == 0. + float2 n = MUL(MIT, _EXPORTED_a_imageRectVertex.zw); + n *= manhattan_width(n) / dot(n, n); + vertexPosition += AA_RADIUS * n; + } + +#ifdef _EXPORTED_ENABLE_CLIP_RECT + if (_EXPORTED_ENABLE_CLIP_RECT) + { + v_clipRect = find_clip_rect_coverage_distances( + make_float2x2(imageDrawUniforms.clipRectInverseMatrix), + imageDrawUniforms.clipRectInverseTranslate, + vertexPosition); + } +#endif + + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); + + VARYING_PACK(v_texCoord); + VARYING_PACK(v_edgeCoverage); +#ifdef _EXPORTED_ENABLE_CLIP_RECT + VARYING_PACK(v_clipRect); +#endif + EMIT_VERTEX(pos); +} +#endif // VERTEX + +#else // DRAW_IMAGE_RECT -> DRAW_IMAGE_MESH +#ifdef _EXPORTED_VERTEX +ATTR_BLOCK_BEGIN(PositionAttr) +ATTR(0, float2, _EXPORTED_a_position); +ATTR_BLOCK_END + +ATTR_BLOCK_BEGIN(UVAttr) +ATTR(1, float2, _EXPORTED_a_texCoord); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float2, v_texCoord); +#ifdef _EXPORTED_ENABLE_CLIP_RECT +NO_PERSPECTIVE VARYING(1, float4, v_clipRect); +#endif +VARYING_BLOCK_END + +#ifdef _EXPORTED_VERTEX +IMAGE_MESH_VERTEX_MAIN(_EXPORTED_drawVertexMain, PositionAttr, position, UVAttr, uv, _vertexID) +{ + ATTR_UNPACK(_vertexID, position, _EXPORTED_a_position, float2); + ATTR_UNPACK(_vertexID, uv, _EXPORTED_a_texCoord, float2); + + VARYING_INIT(v_texCoord, float2); +#ifdef _EXPORTED_ENABLE_CLIP_RECT + VARYING_INIT(v_clipRect, float4); +#endif + + float2x2 M = make_float2x2(imageDrawUniforms.viewMatrix); + float2 vertexPosition = MUL(M, _EXPORTED_a_position) + imageDrawUniforms.translate; + v_texCoord = _EXPORTED_a_texCoord; + +#ifdef _EXPORTED_ENABLE_CLIP_RECT + if (_EXPORTED_ENABLE_CLIP_RECT) + { + v_clipRect = find_clip_rect_coverage_distances( + make_float2x2(imageDrawUniforms.clipRectInverseMatrix), + imageDrawUniforms.clipRectInverseTranslate, + vertexPosition); + } +#endif + + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); + + VARYING_PACK(v_texCoord); +#ifdef _EXPORTED_ENABLE_CLIP_RECT + VARYING_PACK(v_clipRect); +#endif + EMIT_VERTEX(pos); +} +#endif // VERTEX +#endif // DRAW_IMAGE_MESH +#endif // DRAW_IMAGE + +#ifdef _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#ifdef _EXPORTED_VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR_BLOCK_END +#endif // VERTEX + +VARYING_BLOCK_BEGIN +VARYING_BLOCK_END + +#ifdef _EXPORTED_VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + int2 coord; + coord.x = (_vertexID & 1) == 0 ? uniforms.renderTargetUpdateBounds.x + : uniforms.renderTargetUpdateBounds.z; + coord.y = (_vertexID & 2) == 0 ? uniforms.renderTargetUpdateBounds.y + : uniforms.renderTargetUpdateBounds.w; + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(float2(coord)); + EMIT_VERTEX(pos); +} +#endif // VERTEX +#endif // DRAW_RENDER_TARGET_UPDATE_BOUNDS + +#ifdef _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define NEEDS_IMAGE_TEXTURE +#endif +#ifdef _EXPORTED_DRAW_IMAGE +#define NEEDS_IMAGE_TEXTURE +#endif + +#ifdef _EXPORTED_FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, GRAD_TEXTURE_IDX, _EXPORTED_gradTexture); +#ifdef NEEDS_IMAGE_TEXTURE +TEXTURE_RGBA8(PER_DRAW_BINDINGS_SET, IMAGE_TEXTURE_IDX, _EXPORTED_imageTexture); +#endif +FRAG_TEXTURE_BLOCK_END + +SAMPLER_LINEAR(GRAD_TEXTURE_IDX, gradSampler) +#ifdef NEEDS_IMAGE_TEXTURE +SAMPLER_MIPMAP(IMAGE_TEXTURE_IDX, imageSampler) +#endif + +PLS_BLOCK_BEGIN +// We only write the framebuffer as a storage texture when there are blend modes. Otherwise, we +// render to it as a normal color attachment. +#ifndef _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#ifdef _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +// D3D11 doesn't let us bind the framebuffer UAV to slot 0 when there is a color output. +PLS_DECL4F(_EXPORTED_COLOR_PLANE_IDX_OVERRIDE, colorBuffer); +#else +PLS_DECL4F(COLOR_PLANE_IDX, colorBuffer); +#endif +#endif +#ifdef _EXPORTED_ENABLE_CLIPPING +PLS_DECLUI(CLIP_PLANE_IDX, clipBuffer); +#endif +PLS_DECLUI_ATOMIC(COVERAGE_PLANE_IDX, coverageCountBuffer); +PLS_BLOCK_END + +FRAG_STORAGE_BUFFER_BLOCK_BEGIN +STORAGE_BUFFER_U32x2(PAINT_BUFFER_IDX, PaintBuffer, _EXPORTED_paintBuffer); +STORAGE_BUFFER_F32x4(PAINT_AUX_BUFFER_IDX, PaintAuxBuffer, _EXPORTED_paintAuxBuffer); +FRAG_STORAGE_BUFFER_BLOCK_END + +uint to_fixed(float x) { return uint(x * FIXED_COVERAGE_FACTOR + FIXED_COVERAGE_ZERO); } + +half from_fixed(uint x) +{ + return cast_float_to_half(float(x) * FIXED_COVERAGE_INVERSE_FACTOR + + (-FIXED_COVERAGE_ZERO * FIXED_COVERAGE_INVERSE_FACTOR)); +} + +// Return the color of the path at index 'pathID' at location '_fragCoord'. +// Also update the PLS clip value if called for. +half4 resolve_path_color(half coverageCount, + uint2 paintData, + uint pathID FRAGMENT_CONTEXT_DECL PLS_CONTEXT_DECL, + OUT(uint) clipData, + bool needsClipData) +{ + clipData = 0u; + half coverage = abs(coverageCount); +#ifdef _EXPORTED_ENABLE_EVEN_ODD + if (_EXPORTED_ENABLE_EVEN_ODD && (paintData.x & PAINT_FLAG_EVEN_ODD) != 0u) + { + coverage = 1. - abs(fract(coverage * .5) * 2. + -1.); + } +#endif // ENABLE_EVEN_ODD + coverage = min(coverage, make_half(1.)); // This also caps stroke coverage, which can be >1. +#ifdef _EXPORTED_ENABLE_CLIPPING + if (_EXPORTED_ENABLE_CLIPPING) + { + uint clipID = paintData.x >> 16u; + if (clipID != 0u || needsClipData) + { + clipData = PLS_LOADUI(clipBuffer); + } + if (clipID != 0u) + { + half clipCoverage = clipID == (clipData >> 16u) ? unpackHalf2x16(clipData).x : .0; + coverage = min(coverage, clipCoverage); + } + } +#endif // ENABLE_CLIPPING + half4 color = make_half4(.0); + uint paintType = paintData.x & 0xfu; + switch (paintType) + { + case SOLID_COLOR_PAINT_TYPE: + color = unpackUnorm4x8(paintData.y); +#ifdef _EXPORTED_ENABLE_CLIPPING + if (_EXPORTED_ENABLE_CLIPPING) + { + PLS_PRESERVE_UI(clipBuffer); + } +#endif + break; + case LINEAR_GRADIENT_PAINT_TYPE: + case RADIAL_GRADIENT_PAINT_TYPE: +#ifdef _EXPORTED_ENABLE_BINDLESS_TEXTURES + case IMAGE_PAINT_TYPE: +#endif // ENABLE_BINDLESS_TEXTURES + { + float2x2 M = make_float2x2(STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u)); + float4 translate = STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 1u); + float2 paintCoord = MUL(M, _fragCoord) + translate.xy; +#ifdef _EXPORTED_ENABLE_BINDLESS_TEXTURES + if (paintType == IMAGE_PAINT_TYPE) + { + color = TEXTURE_SAMPLE_GRAD(sampler2D(floatBitsToUint(translate.zw)), + imageSampler, + paintCoord, + M[0], + M[1]); + float opacity = uintBitsToFloat(paintData.y); + color.w *= opacity; + } + else +#endif // ENABLE_BINDLESS_TEXTURES + { + float t = paintType == LINEAR_GRADIENT_PAINT_TYPE ? /*linear*/ paintCoord.x + : /*radial*/ length(paintCoord); + t = clamp(t, .0, 1.); + float x = t * translate.z + translate.w; + float y = uintBitsToFloat(paintData.y); + color = TEXTURE_SAMPLE_LOD(_EXPORTED_gradTexture, gradSampler, float2(x, y), .0); + } +#ifdef _EXPORTED_ENABLE_CLIPPING + if (_EXPORTED_ENABLE_CLIPPING) + { + PLS_PRESERVE_UI(clipBuffer); + } +#endif + break; + } +#ifdef _EXPORTED_ENABLE_CLIPPING + case CLIP_UPDATE_PAINT_TYPE: + if (_EXPORTED_ENABLE_CLIPPING) + { + clipData = paintData.y | packHalf2x16(make_half2(coverage, .0)); + PLS_STOREUI(clipBuffer, clipData); + } + break; +#endif // ENABLE_CLIPPING + } +#ifdef _EXPORTED_ENABLE_CLIP_RECT + if (_EXPORTED_ENABLE_CLIP_RECT && (paintData.x & PAINT_FLAG_HAS_CLIP_RECT) != 0u) + { + float2x2 M = make_float2x2(STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 2u)); + float4 translate = STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 3u); + float2 clipCoord = MUL(M, _fragCoord) + translate.xy; + // translate.zw contains -1 / fwidth(clipCoord), which we use to calculate antialiasing. + half2 distXY = cast_float2_to_half2(abs(clipCoord) * translate.zw - translate.zw); + half clipRectCoverage = clamp(min(distXY.x, distXY.y) + .5, .0, 1.); + coverage = min(coverage, clipRectCoverage); + } +#endif // ENABLE_CLIP_RECT + color.w *= coverage; + return color; +} + +half4 blend_src_over(half4 srcColorPremul, half4 dstColorPremul) +{ + return srcColorPremul + dstColorPremul * (1. - srcColorPremul.w); +} + +#ifndef _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +half4 blend(half4 srcColorUnmul, half4 dstColorPremul, ushort blendMode) +{ +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND + if (_EXPORTED_ENABLE_ADVANCED_BLEND && blendMode != BLEND_SRC_OVER) + { + return advanced_blend(srcColorUnmul, unmultiply(dstColorPremul), blendMode); + } + else +#endif // ENABLE_ADVANCED_BLEND + { + return blend_src_over(premultiply(srcColorUnmul), dstColorPremul); + } +} + +half4 do_pls_blend(half4 color, uint2 paintData PLS_CONTEXT_DECL) +{ + half4 dstColorPremul = PLS_LOAD4F(colorBuffer); + ushort blendMode = cast_uint_to_ushort((paintData.x >> 4) & 0xfu); + return blend(color, dstColorPremul, blendMode); +} + +void write_pls_blend(half4 color, uint2 paintData PLS_CONTEXT_DECL) +{ + if (color.w != .0) + { + half4 blendedColor = do_pls_blend(color, paintData PLS_CONTEXT_UNPACK); + PLS_STORE4F(colorBuffer, blendedColor); + } + else + { + PLS_PRESERVE_4F(colorBuffer); + } +} +#endif // !FIXED_FUNCTION_COLOR_BLEND + +#ifdef _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define ATOMIC_PLS_MAIN PLS_FRAG_COLOR_MAIN +#define ATOMIC_PLS_MAIN_WITH_IMAGE_UNIFORMS PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS +#define EMIT_ATOMIC_PLS EMIT_PLS_AND_FRAG_COLOR +#else // !FIXED_FUNCTION_COLOR_BLEND +#define ATOMIC_PLS_MAIN PLS_MAIN +#define ATOMIC_PLS_MAIN_WITH_IMAGE_UNIFORMS PLS_MAIN_WITH_IMAGE_UNIFORMS +#define EMIT_ATOMIC_PLS EMIT_PLS +#endif + +#ifdef _EXPORTED_DRAW_PATH +ATOMIC_PLS_MAIN(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_edgeDistance, half2); + VARYING_UNPACK(v_pathID, ushort); + + half coverage = min(min(v_edgeDistance.x, abs(v_edgeDistance.y)), make_half(1.)); + + // Since v_pathID increases monotonically with every draw, and since it lives in the most + // significant bits of the coverage data, an atomic max() function will serve 3 purposes: + // + // * The invocation that changes the pathID is the single first fragment invocation to + // hit the new path, and the one that should resolve the previous path in the framebuffer. + // * Properly resets coverage to zero when we do cross over into processing a new path. + // * Accumulates coverage for strokes. + // + uint fixedCoverage = to_fixed(coverage); + uint minCoverageData = (make_uint(v_pathID) << 16) | fixedCoverage; + uint lastCoverageData = PLS_ATOMIC_MAX(coverageCountBuffer, minCoverageData); + ushort lastPathID = cast_uint_to_ushort(lastCoverageData >> 16); + if (lastPathID != v_pathID) + { + // We crossed into a new path! Resolve the previous path now that we know its exact + // coverage. + half coverageCount = from_fixed(lastCoverageData & 0xffffu); + uint2 paintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, lastPathID); + uint clipData; + half4 color = resolve_path_color(coverageCount, + paintData, + lastPathID FRAGMENT_CONTEXT_UNPACK PLS_CONTEXT_UNPACK, + clipData, + /*needsClipData=*/false); +#ifdef _EXPORTED_FIXED_FUNCTION_COLOR_BLEND + _fragColor = premultiply(color); +#else + write_pls_blend(color, paintData PLS_CONTEXT_UNPACK); +#endif // FIXED_FUNCTION_COLOR_BLEND + } + else + { + if (v_edgeDistance.y < .0 /*fill?*/) + { + // We're a fill, and we did not cross into the new path this time. Count coverage. + if (lastCoverageData < minCoverageData) + { + // We already crossed into this path. Oops. Undo the effect of the min(). + fixedCoverage += lastCoverageData - minCoverageData; + } + fixedCoverage -= uint(FIXED_COVERAGE_ZERO); // Only apply the zero bias once. + PLS_ATOMIC_ADD(coverageCountBuffer, fixedCoverage); + } + // Discard because some PLS implementations require that we assign values to the color & + // clip attachments, but since we aren't raster ordered, we don't have values to assign. + discard; + } + + EMIT_ATOMIC_PLS +} +#endif // DRAW_PATH + +#ifdef _EXPORTED_DRAW_INTERIOR_TRIANGLES +ATOMIC_PLS_MAIN(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_windingWeight, half); + VARYING_UNPACK(v_pathID, ushort); + + half coverage = v_windingWeight; + + uint lastCoverageData = PLS_LOADUI_ATOMIC(coverageCountBuffer); + ushort lastPathID = cast_uint_to_ushort(lastCoverageData >> 16); + half lastCoverageCount = from_fixed(lastCoverageData & 0xffffu); + if (lastPathID != v_pathID) + { + // We crossed into a new path! Resolve the previous path now that we know its exact + // coverage. + uint2 paintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, lastPathID); + uint clipData; + half4 color = resolve_path_color(lastCoverageCount, + paintData, + lastPathID FRAGMENT_CONTEXT_UNPACK PLS_CONTEXT_UNPACK, + clipData, + /*needsClipData=*/false); +#ifdef _EXPORTED_FIXED_FUNCTION_COLOR_BLEND + _fragColor = premultiply(color); +#else + write_pls_blend(color, paintData PLS_CONTEXT_UNPACK); +#endif // FIXED_FUNCTION_COLOR_BLEND + } + else + { + coverage += lastCoverageCount; + } + + PLS_STOREUI_ATOMIC(coverageCountBuffer, (make_uint(v_pathID) << 16) | to_fixed(coverage)); + + if (lastPathID == v_pathID) + { + // Discard because some PLS implementations require that we assign values to the color & + // clip attachments, but since we aren't raster ordered, we don't have values to assign. + discard; + } + + EMIT_ATOMIC_PLS +} +#endif // DRAW_INTERIOR_TRIANGLES + +#ifdef _EXPORTED_DRAW_IMAGE +ATOMIC_PLS_MAIN_WITH_IMAGE_UNIFORMS(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_texCoord, float2); +#ifdef _EXPORTED_DRAW_IMAGE_RECT + VARYING_UNPACK(v_edgeCoverage, half); +#endif +#ifdef _EXPORTED_ENABLE_CLIP_RECT + VARYING_UNPACK(v_clipRect, float4); +#endif + + // Start by finding the image color. We have to do this immediately instead of allowing it to + // get resolved later like other draws because the @imageTexture binding is liable to change, + // and furthermore in the case of imageMeshes, we can't calculate UV coordinates based on + // fragment position. + half4 imageColor = TEXTURE_SAMPLE(_EXPORTED_imageTexture, imageSampler, v_texCoord); + half meshCoverage = 1.; +#ifdef _EXPORTED_DRAW_IMAGE_RECT + meshCoverage = min(v_edgeCoverage, meshCoverage); +#endif +#ifdef _EXPORTED_ENABLE_CLIP_RECT + if (_EXPORTED_ENABLE_CLIP_RECT) + { + half clipRectCoverage = min_value(cast_float4_to_half4(v_clipRect)); + meshCoverage = clamp(clipRectCoverage, make_half(.0), meshCoverage); + } +#endif + +#ifdef _EXPORTED_DRAW_IMAGE_MESH + // TODO: If we care: Use the interlock if we can, since individual meshes may shimmer if they + // have overlapping triangles. + PLS_INTERLOCK_BEGIN; +#endif + + // Find the previous path color. (This might also update the clip buffer.) + // TODO: skip this step if no clipping AND srcOver AND imageColor is solid. + uint lastCoverageData = PLS_LOADUI_ATOMIC(coverageCountBuffer); + half coverageCount = from_fixed(lastCoverageData & 0xffffu); + ushort lastPathID = cast_uint_to_ushort(lastCoverageData >> 16); + uint2 lastPaintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, lastPathID); + uint clipData; + half4 lastColor = resolve_path_color(coverageCount, + lastPaintData, + lastPathID FRAGMENT_CONTEXT_UNPACK PLS_CONTEXT_UNPACK, + clipData, + /*needsClipData=*/true); + + // Clip the image after resolving the previous path, since that can affect the clip buffer. +#ifdef _EXPORTED_ENABLE_CLIPPING // TODO! ENABLE_IMAGE_CLIPPING in addition to ENABLE_CLIPPING? + if (_EXPORTED_ENABLE_CLIPPING && imageDrawUniforms.clipID != 0u) + { + uint clipID = clipData >> 16; + half clipCoverage = clipID == imageDrawUniforms.clipID ? unpackHalf2x16(clipData).x : .0; + meshCoverage = min(meshCoverage, clipCoverage); + } +#endif // ENABLE_CLIPPING + imageColor.w *= meshCoverage * cast_float_to_half(imageDrawUniforms.opacity); + +#ifdef _EXPORTED_FIXED_FUNCTION_COLOR_BLEND + // Leverage the property that premultiplied src-over blending is associative and blend the + // imageColor and lastColor before passing them on to the blending pipeline. + _fragColor = blend_src_over(premultiply(imageColor), premultiply(lastColor)); +#else + if (lastColor.w != .0 || imageColor.w != .0) + { + // Blend the previous path and image both in a single operation. + // TODO: Are advanced blend modes associative? srcOver is, so at least there we can blend + // lastColor and imageColor first, and potentially avoid a framebuffer load if it ends up + // opaque. + half4 dstColorPremul = PLS_LOAD4F(colorBuffer); + ushort lastBlendMode = cast_uint_to_ushort((lastPaintData.x >> 4) & 0xfu); + ushort imageBlendMode = cast_uint_to_ushort(imageDrawUniforms.blendMode); + dstColorPremul = blend(lastColor, dstColorPremul, lastBlendMode); + imageColor = blend(imageColor, dstColorPremul, imageBlendMode); + PLS_STORE4F(colorBuffer, imageColor); + } + else + { + PLS_PRESERVE_4F(colorBuffer); + } +#endif // FIXED_FUNCTION_COLOR_BLEND + + // Write out a coverage value of "zero at pathID=0" so a future resolve attempt doesn't affect + // this pixel. + PLS_STOREUI_ATOMIC(coverageCountBuffer, uint(FIXED_COVERAGE_ZERO)); + +#ifdef _EXPORTED_DRAW_IMAGE_MESH + // TODO: If we care: Use the interlock if we can, since individual meshes may shimmer if they + // have overlapping triangles. + PLS_INTERLOCK_END; +#endif + + EMIT_ATOMIC_PLS +} +#endif // DRAW_IMAGE + +#ifdef _EXPORTED_INITIALIZE_PLS + +ATOMIC_PLS_MAIN(_EXPORTED_drawFragmentMain) +{ +#ifdef _EXPORTED_STORE_COLOR_CLEAR + PLS_STORE4F(colorBuffer, unpackUnorm4x8(uniforms.colorClearValue)); +#endif +#ifdef _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA + half4 color = PLS_LOAD4F(colorBuffer); + PLS_STORE4F(colorBuffer, color.zyxw); +#endif + PLS_STOREUI_ATOMIC(coverageCountBuffer, uniforms.coverageClearValue); +#ifdef _EXPORTED_ENABLE_CLIPPING + if (_EXPORTED_ENABLE_CLIPPING) + { + PLS_STOREUI(clipBuffer, 0u); + } +#endif +#ifdef _EXPORTED_FIXED_FUNCTION_COLOR_BLEND + discard; +#endif + EMIT_ATOMIC_PLS +} + +#endif // INITIALIZE_PLS + +#ifdef _EXPORTED_RESOLVE_PLS + +#ifdef _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +PLS_FRAG_COLOR_MAIN(_EXPORTED_drawFragmentMain) +#else +ATOMIC_PLS_MAIN(_EXPORTED_drawFragmentMain) +#endif +{ + uint lastCoverageData = PLS_LOADUI_ATOMIC(coverageCountBuffer); + half coverageCount = from_fixed(lastCoverageData & 0xffffu); + ushort lastPathID = cast_uint_to_ushort(lastCoverageData >> 16); + uint2 paintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, lastPathID); + uint clipData; + half4 color = resolve_path_color(coverageCount, + paintData, + lastPathID FRAGMENT_CONTEXT_UNPACK PLS_CONTEXT_UNPACK, + clipData, + false); +#ifdef _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER + _fragColor = do_pls_blend(color, paintData PLS_CONTEXT_UNPACK); + EMIT_PLS_AND_FRAG_COLOR +#else +#ifdef _EXPORTED_FIXED_FUNCTION_COLOR_BLEND + _fragColor = premultiply(color); +#else + write_pls_blend(color, paintData PLS_CONTEXT_UNPACK); +#endif // FIXED_FUNCTION_COLOR_BLEND + EMIT_ATOMIC_PLS +#endif // COALESCED_PLS_RESOLVE_AND_TRANSFER +} +#endif // RESOLVE_PLS +#endif // FRAGMENT +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/atomic_draw.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/atomic_draw.minified.ush new file mode 100644 index 00000000..25ffbcb3 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/atomic_draw.minified.ush @@ -0,0 +1,735 @@ +/* + * Copyright 2023 Rive + */ + +#ifdef DRAW_PATH +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, float4, _EXPORTED_a_patchVertexData); // [localVertexID, outset, fillCoverage, vertexType] +ATTR(1, float4, _EXPORTED_a_mirroredVertexData); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, half2, v_edgeDistance); +FLAT VARYING(1, ushort, v_pathID); +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_patchVertexData, float4); + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_mirroredVertexData, float4); + + VARYING_INIT(v_edgeDistance, half2); + VARYING_INIT(v_pathID, ushort); + + float4 pos; + float2 vertexPosition; + if (unpack_tessellated_path_vertex(_EXPORTED_a_patchVertexData, + _EXPORTED_a_mirroredVertexData, + _instanceID, + v_pathID, + vertexPosition, + v_edgeDistance VERTEX_CONTEXT_UNPACK)) + { + pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); + } + else + { + pos = float4(uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue); + } + + VARYING_PACK(v_edgeDistance); + VARYING_PACK(v_pathID); + EMIT_VERTEX(pos); +} +#endif // VERTEX +#endif // DRAW_PATH + +#ifdef DRAW_INTERIOR_TRIANGLES +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, packed_float3, _EXPORTED_a_triangleVertex); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +OPTIONALLY_FLAT VARYING(0, half, v_windingWeight); +FLAT VARYING(1, ushort, v_pathID); +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_triangleVertex, float3); + + VARYING_INIT(v_windingWeight, half); + VARYING_INIT(v_pathID, ushort); + + float2 vertexPosition = unpack_interior_triangle_vertex(_EXPORTED_a_triangleVertex, + v_pathID, + v_windingWeight VERTEX_CONTEXT_UNPACK); + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); + + VARYING_PACK(v_windingWeight); + VARYING_PACK(v_pathID); + EMIT_VERTEX(pos); +} +#endif // VERTEX +#endif // DRAW_INTERIOR_TRIANGLES + +#ifdef DRAW_IMAGE +#ifdef DRAW_IMAGE_RECT +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, float4, _EXPORTED_a_imageRectVertex); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float2, v_texCoord); +NO_PERSPECTIVE VARYING(1, half, v_edgeCoverage); +#ifdef ENABLE_CLIP_RECT +NO_PERSPECTIVE VARYING(2, float4, v_clipRect); +#endif +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +IMAGE_RECT_VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_imageRectVertex, float4); + + VARYING_INIT(v_texCoord, float2); + VARYING_INIT(v_edgeCoverage, half); +#ifdef ENABLE_CLIP_RECT + VARYING_INIT(v_clipRect, float4); +#endif + + bool isOuterVertex = _EXPORTED_a_imageRectVertex.z == .0 || _EXPORTED_a_imageRectVertex.w == .0; + v_edgeCoverage = isOuterVertex ? .0 : 1.; + + float2 vertexPosition = _EXPORTED_a_imageRectVertex.xy; + float2x2 M = make_float2x2(imageDrawUniforms.viewMatrix); + float2x2 MIT = transpose(inverse(M)); + if (!isOuterVertex) + { + // Inset the inner vertices to the point where coverage == 1. + // NOTE: if width/height ever change from 1, these equations need to be updated. + float aaRadiusX = AA_RADIUS * manhattan_width(MIT[1]) / dot(M[1], MIT[1]); + if (aaRadiusX >= .5) + { + vertexPosition.x = .5; + v_edgeCoverage *= cast_float_to_half(.5 / aaRadiusX); + } + else + { + vertexPosition.x += aaRadiusX * _EXPORTED_a_imageRectVertex.z; + } + float aaRadiusY = AA_RADIUS * manhattan_width(MIT[0]) / dot(M[0], MIT[0]); + if (aaRadiusY >= .5) + { + vertexPosition.y = .5; + v_edgeCoverage *= cast_float_to_half(.5 / aaRadiusY); + } + else + { + vertexPosition.y += aaRadiusY * _EXPORTED_a_imageRectVertex.w; + } + } + + v_texCoord = vertexPosition; + vertexPosition = MUL(M, vertexPosition) + imageDrawUniforms.translate; + + if (isOuterVertex) + { + // Outset the outer vertices to the point where coverage == 0. + float2 n = MUL(MIT, _EXPORTED_a_imageRectVertex.zw); + n *= manhattan_width(n) / dot(n, n); + vertexPosition += AA_RADIUS * n; + } + +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { + v_clipRect = find_clip_rect_coverage_distances( + make_float2x2(imageDrawUniforms.clipRectInverseMatrix), + imageDrawUniforms.clipRectInverseTranslate, + vertexPosition); + } +#endif + + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); + + VARYING_PACK(v_texCoord); + VARYING_PACK(v_edgeCoverage); +#ifdef ENABLE_CLIP_RECT + VARYING_PACK(v_clipRect); +#endif + EMIT_VERTEX(pos); +} +#endif // VERTEX + +#else // DRAW_IMAGE_RECT -> DRAW_IMAGE_MESH +#ifdef VERTEX +ATTR_BLOCK_BEGIN(PositionAttr) +ATTR(0, float2, _EXPORTED_a_position); +ATTR_BLOCK_END + +ATTR_BLOCK_BEGIN(UVAttr) +ATTR(1, float2, _EXPORTED_a_texCoord); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float2, v_texCoord); +#ifdef ENABLE_CLIP_RECT +NO_PERSPECTIVE VARYING(1, float4, v_clipRect); +#endif +VARYING_BLOCK_END + +#ifdef VERTEX +IMAGE_MESH_VERTEX_MAIN(_EXPORTED_drawVertexMain, PositionAttr, position, UVAttr, uv, _vertexID) +{ + ATTR_UNPACK(_vertexID, position, _EXPORTED_a_position, float2); + ATTR_UNPACK(_vertexID, uv, _EXPORTED_a_texCoord, float2); + + VARYING_INIT(v_texCoord, float2); +#ifdef ENABLE_CLIP_RECT + VARYING_INIT(v_clipRect, float4); +#endif + + float2x2 M = make_float2x2(imageDrawUniforms.viewMatrix); + float2 vertexPosition = MUL(M, _EXPORTED_a_position) + imageDrawUniforms.translate; + v_texCoord = _EXPORTED_a_texCoord; + +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { + v_clipRect = find_clip_rect_coverage_distances( + make_float2x2(imageDrawUniforms.clipRectInverseMatrix), + imageDrawUniforms.clipRectInverseTranslate, + vertexPosition); + } +#endif + + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); + + VARYING_PACK(v_texCoord); +#ifdef ENABLE_CLIP_RECT + VARYING_PACK(v_clipRect); +#endif + EMIT_VERTEX(pos); +} +#endif // VERTEX +#endif // DRAW_IMAGE_MESH +#endif // DRAW_IMAGE + +#ifdef DRAW_RENDER_TARGET_UPDATE_BOUNDS +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR_BLOCK_END +#endif // VERTEX + +VARYING_BLOCK_BEGIN +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + int2 coord; + coord.x = (_vertexID & 1) == 0 ? uniforms.renderTargetUpdateBounds.x + : uniforms.renderTargetUpdateBounds.z; + coord.y = (_vertexID & 2) == 0 ? uniforms.renderTargetUpdateBounds.y + : uniforms.renderTargetUpdateBounds.w; + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(float2(coord)); + EMIT_VERTEX(pos); +} +#endif // VERTEX +#endif // DRAW_RENDER_TARGET_UPDATE_BOUNDS + +#ifdef ENABLE_BINDLESS_TEXTURES +#define NEEDS_IMAGE_TEXTURE +#endif +#ifdef DRAW_IMAGE +#define NEEDS_IMAGE_TEXTURE +#endif + +#ifdef FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, GRAD_TEXTURE_IDX, _EXPORTED_gradTexture); +#ifdef NEEDS_IMAGE_TEXTURE +TEXTURE_RGBA8(PER_DRAW_BINDINGS_SET, IMAGE_TEXTURE_IDX, _EXPORTED_imageTexture); +#endif +FRAG_TEXTURE_BLOCK_END + +SAMPLER_LINEAR(GRAD_TEXTURE_IDX, gradSampler) +#ifdef NEEDS_IMAGE_TEXTURE +SAMPLER_MIPMAP(IMAGE_TEXTURE_IDX, imageSampler) +#endif + +PLS_BLOCK_BEGIN +// We only write the framebuffer as a storage texture when there are blend modes. Otherwise, we +// render to it as a normal color attachment. +#ifndef FIXED_FUNCTION_COLOR_BLEND +#ifdef COLOR_PLANE_IDX_OVERRIDE +// D3D11 doesn't let us bind the framebuffer UAV to slot 0 when there is a color output. +PLS_DECL4F(COLOR_PLANE_IDX_OVERRIDE, colorBuffer); +#else +PLS_DECL4F(COLOR_PLANE_IDX, colorBuffer); +#endif +#endif +#ifdef ENABLE_CLIPPING +PLS_DECLUI(CLIP_PLANE_IDX, clipBuffer); +#endif +PLS_DECLUI_ATOMIC(COVERAGE_PLANE_IDX, coverageCountBuffer); +PLS_BLOCK_END + +FRAG_STORAGE_BUFFER_BLOCK_BEGIN +STORAGE_BUFFER_U32x2(PAINT_BUFFER_IDX, PaintBuffer, _EXPORTED_paintBuffer); +STORAGE_BUFFER_F32x4(PAINT_AUX_BUFFER_IDX, PaintAuxBuffer, _EXPORTED_paintAuxBuffer); +FRAG_STORAGE_BUFFER_BLOCK_END + +uint to_fixed(float x) { return uint(x * FIXED_COVERAGE_FACTOR + FIXED_COVERAGE_ZERO); } + +half from_fixed(uint x) +{ + return cast_float_to_half(float(x) * FIXED_COVERAGE_INVERSE_FACTOR + + (-FIXED_COVERAGE_ZERO * FIXED_COVERAGE_INVERSE_FACTOR)); +} + +// Return the color of the path at index 'pathID' at location '_fragCoord'. +// Also update the PLS clip value if called for. +half4 resolve_path_color(half coverageCount, + uint2 paintData, + uint pathID FRAGMENT_CONTEXT_DECL PLS_CONTEXT_DECL, + OUT(uint) clipData, + bool needsClipData) +{ + clipData = 0u; + half coverage = abs(coverageCount); +#ifdef ENABLE_EVEN_ODD + if (ENABLE_EVEN_ODD && (paintData.x & PAINT_FLAG_EVEN_ODD) != 0u) + { + coverage = 1. - abs(fract(coverage * .5) * 2. + -1.); + } +#endif // ENABLE_EVEN_ODD + coverage = min(coverage, make_half(1.)); // This also caps stroke coverage, which can be >1. +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + uint clipID = paintData.x >> 16u; + if (clipID != 0u || needsClipData) + { + clipData = PLS_LOADUI(clipBuffer); + } + if (clipID != 0u) + { + half clipCoverage = clipID == (clipData >> 16u) ? unpackHalf2x16(clipData).x : .0; + coverage = min(coverage, clipCoverage); + } + } +#endif // ENABLE_CLIPPING + half4 color = make_half4(.0); + uint paintType = paintData.x & 0xfu; + switch (paintType) + { + case SOLID_COLOR_PAINT_TYPE: + color = unpackUnorm4x8(paintData.y); +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + PLS_PRESERVE_UI(clipBuffer); + } +#endif + break; + case LINEAR_GRADIENT_PAINT_TYPE: + case RADIAL_GRADIENT_PAINT_TYPE: +#ifdef ENABLE_BINDLESS_TEXTURES + case IMAGE_PAINT_TYPE: +#endif // ENABLE_BINDLESS_TEXTURES + { + float2x2 M = make_float2x2(STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u)); + float4 translate = STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 1u); + float2 paintCoord = MUL(M, _fragCoord) + translate.xy; +#ifdef ENABLE_BINDLESS_TEXTURES + if (paintType == IMAGE_PAINT_TYPE) + { + color = TEXTURE_SAMPLE_GRAD(sampler2D(floatBitsToUint(translate.zw)), + imageSampler, + paintCoord, + M[0], + M[1]); + float opacity = uintBitsToFloat(paintData.y); + color.w *= opacity; + } + else +#endif // ENABLE_BINDLESS_TEXTURES + { + float t = paintType == LINEAR_GRADIENT_PAINT_TYPE ? /*linear*/ paintCoord.x + : /*radial*/ length(paintCoord); + t = clamp(t, .0, 1.); + float x = t * translate.z + translate.w; + float y = uintBitsToFloat(paintData.y); + color = TEXTURE_SAMPLE_LOD(_EXPORTED_gradTexture, gradSampler, float2(x, y), .0); + } +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + PLS_PRESERVE_UI(clipBuffer); + } +#endif + break; + } +#ifdef ENABLE_CLIPPING + case CLIP_UPDATE_PAINT_TYPE: + if (ENABLE_CLIPPING) + { + clipData = paintData.y | packHalf2x16(make_half2(coverage, .0)); + PLS_STOREUI(clipBuffer, clipData); + } + break; +#endif // ENABLE_CLIPPING + } +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT && (paintData.x & PAINT_FLAG_HAS_CLIP_RECT) != 0u) + { + float2x2 M = make_float2x2(STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 2u)); + float4 translate = STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 3u); + float2 clipCoord = MUL(M, _fragCoord) + translate.xy; + // translate.zw contains -1 / fwidth(clipCoord), which we use to calculate antialiasing. + half2 distXY = cast_float2_to_half2(abs(clipCoord) * translate.zw - translate.zw); + half clipRectCoverage = clamp(min(distXY.x, distXY.y) + .5, .0, 1.); + coverage = min(coverage, clipRectCoverage); + } +#endif // ENABLE_CLIP_RECT + color.w *= coverage; + return color; +} + +half4 blend_src_over(half4 srcColorPremul, half4 dstColorPremul) +{ + return srcColorPremul + dstColorPremul * (1. - srcColorPremul.w); +} + +#ifndef FIXED_FUNCTION_COLOR_BLEND +half4 blend(half4 srcColorUnmul, half4 dstColorPremul, ushort blendMode) +{ +#ifdef ENABLE_ADVANCED_BLEND + if (ENABLE_ADVANCED_BLEND && blendMode != BLEND_SRC_OVER) + { + return advanced_blend(srcColorUnmul, unmultiply(dstColorPremul), blendMode); + } + else +#endif // ENABLE_ADVANCED_BLEND + { + return blend_src_over(premultiply(srcColorUnmul), dstColorPremul); + } +} + +half4 do_pls_blend(half4 color, uint2 paintData PLS_CONTEXT_DECL) +{ + half4 dstColorPremul = PLS_LOAD4F(colorBuffer); + ushort blendMode = cast_uint_to_ushort((paintData.x >> 4) & 0xfu); + return blend(color, dstColorPremul, blendMode); +} + +void write_pls_blend(half4 color, uint2 paintData PLS_CONTEXT_DECL) +{ + if (color.w != .0) + { + half4 blendedColor = do_pls_blend(color, paintData PLS_CONTEXT_UNPACK); + PLS_STORE4F(colorBuffer, blendedColor); + } + else + { + PLS_PRESERVE_4F(colorBuffer); + } +} +#endif // !FIXED_FUNCTION_COLOR_BLEND + +#ifdef FIXED_FUNCTION_COLOR_BLEND +#define ATOMIC_PLS_MAIN PLS_FRAG_COLOR_MAIN +#define ATOMIC_PLS_MAIN_WITH_IMAGE_UNIFORMS PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS +#define EMIT_ATOMIC_PLS EMIT_PLS_AND_FRAG_COLOR +#else // !FIXED_FUNCTION_COLOR_BLEND +#define ATOMIC_PLS_MAIN PLS_MAIN +#define ATOMIC_PLS_MAIN_WITH_IMAGE_UNIFORMS PLS_MAIN_WITH_IMAGE_UNIFORMS +#define EMIT_ATOMIC_PLS EMIT_PLS +#endif + +#ifdef DRAW_PATH +ATOMIC_PLS_MAIN(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_edgeDistance, half2); + VARYING_UNPACK(v_pathID, ushort); + + half coverage = min(min(v_edgeDistance.x, abs(v_edgeDistance.y)), make_half(1.)); + + // Since v_pathID increases monotonically with every draw, and since it lives in the most + // significant bits of the coverage data, an atomic max() function will serve 3 purposes: + // + // * The invocation that changes the pathID is the single first fragment invocation to + // hit the new path, and the one that should resolve the previous path in the framebuffer. + // * Properly resets coverage to zero when we do cross over into processing a new path. + // * Accumulates coverage for strokes. + // + uint fixedCoverage = to_fixed(coverage); + uint minCoverageData = (make_uint(v_pathID) << 16) | fixedCoverage; + uint lastCoverageData = PLS_ATOMIC_MAX(coverageCountBuffer, minCoverageData); + ushort lastPathID = cast_uint_to_ushort(lastCoverageData >> 16); + if (lastPathID != v_pathID) + { + // We crossed into a new path! Resolve the previous path now that we know its exact + // coverage. + half coverageCount = from_fixed(lastCoverageData & 0xffffu); + uint2 paintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, lastPathID); + uint clipData; + half4 color = resolve_path_color(coverageCount, + paintData, + lastPathID FRAGMENT_CONTEXT_UNPACK PLS_CONTEXT_UNPACK, + clipData, + /*needsClipData=*/false); +#ifdef FIXED_FUNCTION_COLOR_BLEND + _fragColor = premultiply(color); +#else + write_pls_blend(color, paintData PLS_CONTEXT_UNPACK); +#endif // FIXED_FUNCTION_COLOR_BLEND + } + else + { + if (v_edgeDistance.y < .0 /*fill?*/) + { + // We're a fill, and we did not cross into the new path this time. Count coverage. + if (lastCoverageData < minCoverageData) + { + // We already crossed into this path. Oops. Undo the effect of the min(). + fixedCoverage += lastCoverageData - minCoverageData; + } + fixedCoverage -= uint(FIXED_COVERAGE_ZERO); // Only apply the zero bias once. + PLS_ATOMIC_ADD(coverageCountBuffer, fixedCoverage); + } + // Discard because some PLS implementations require that we assign values to the color & + // clip attachments, but since we aren't raster ordered, we don't have values to assign. + discard; + } + + EMIT_ATOMIC_PLS +} +#endif // DRAW_PATH + +#ifdef DRAW_INTERIOR_TRIANGLES +ATOMIC_PLS_MAIN(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_windingWeight, half); + VARYING_UNPACK(v_pathID, ushort); + + half coverage = v_windingWeight; + + uint lastCoverageData = PLS_LOADUI_ATOMIC(coverageCountBuffer); + ushort lastPathID = cast_uint_to_ushort(lastCoverageData >> 16); + half lastCoverageCount = from_fixed(lastCoverageData & 0xffffu); + if (lastPathID != v_pathID) + { + // We crossed into a new path! Resolve the previous path now that we know its exact + // coverage. + uint2 paintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, lastPathID); + uint clipData; + half4 color = resolve_path_color(lastCoverageCount, + paintData, + lastPathID FRAGMENT_CONTEXT_UNPACK PLS_CONTEXT_UNPACK, + clipData, + /*needsClipData=*/false); +#ifdef FIXED_FUNCTION_COLOR_BLEND + _fragColor = premultiply(color); +#else + write_pls_blend(color, paintData PLS_CONTEXT_UNPACK); +#endif // FIXED_FUNCTION_COLOR_BLEND + } + else + { + coverage += lastCoverageCount; + } + + PLS_STOREUI_ATOMIC(coverageCountBuffer, (make_uint(v_pathID) << 16) | to_fixed(coverage)); + + if (lastPathID == v_pathID) + { + // Discard because some PLS implementations require that we assign values to the color & + // clip attachments, but since we aren't raster ordered, we don't have values to assign. + discard; + } + + EMIT_ATOMIC_PLS +} +#endif // DRAW_INTERIOR_TRIANGLES + +#ifdef DRAW_IMAGE +ATOMIC_PLS_MAIN_WITH_IMAGE_UNIFORMS(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_texCoord, float2); +#ifdef DRAW_IMAGE_RECT + VARYING_UNPACK(v_edgeCoverage, half); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_UNPACK(v_clipRect, float4); +#endif + + // Start by finding the image color. We have to do this immediately instead of allowing it to + // get resolved later like other draws because the @imageTexture binding is liable to change, + // and furthermore in the case of imageMeshes, we can't calculate UV coordinates based on + // fragment position. + half4 imageColor = TEXTURE_SAMPLE(_EXPORTED_imageTexture, imageSampler, v_texCoord); + half meshCoverage = 1.; +#ifdef DRAW_IMAGE_RECT + meshCoverage = min(v_edgeCoverage, meshCoverage); +#endif +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { + half clipRectCoverage = min_value(cast_float4_to_half4(v_clipRect)); + meshCoverage = clamp(clipRectCoverage, make_half(.0), meshCoverage); + } +#endif + +#ifdef DRAW_IMAGE_MESH + // TODO: If we care: Use the interlock if we can, since individual meshes may shimmer if they + // have overlapping triangles. + PLS_INTERLOCK_BEGIN; +#endif + + // Find the previous path color. (This might also update the clip buffer.) + // TODO: skip this step if no clipping AND srcOver AND imageColor is solid. + uint lastCoverageData = PLS_LOADUI_ATOMIC(coverageCountBuffer); + half coverageCount = from_fixed(lastCoverageData & 0xffffu); + ushort lastPathID = cast_uint_to_ushort(lastCoverageData >> 16); + uint2 lastPaintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, lastPathID); + uint clipData; + half4 lastColor = resolve_path_color(coverageCount, + lastPaintData, + lastPathID FRAGMENT_CONTEXT_UNPACK PLS_CONTEXT_UNPACK, + clipData, + /*needsClipData=*/true); + + // Clip the image after resolving the previous path, since that can affect the clip buffer. +#ifdef ENABLE_CLIPPING // TODO! ENABLE_IMAGE_CLIPPING in addition to ENABLE_CLIPPING? + if (ENABLE_CLIPPING && imageDrawUniforms.clipID != 0u) + { + uint clipID = clipData >> 16; + half clipCoverage = clipID == imageDrawUniforms.clipID ? unpackHalf2x16(clipData).x : .0; + meshCoverage = min(meshCoverage, clipCoverage); + } +#endif // ENABLE_CLIPPING + imageColor.w *= meshCoverage * cast_float_to_half(imageDrawUniforms.opacity); + +#ifdef FIXED_FUNCTION_COLOR_BLEND + // Leverage the property that premultiplied src-over blending is associative and blend the + // imageColor and lastColor before passing them on to the blending pipeline. + _fragColor = blend_src_over(premultiply(imageColor), premultiply(lastColor)); +#else + if (lastColor.w != .0 || imageColor.w != .0) + { + // Blend the previous path and image both in a single operation. + // TODO: Are advanced blend modes associative? srcOver is, so at least there we can blend + // lastColor and imageColor first, and potentially avoid a framebuffer load if it ends up + // opaque. + half4 dstColorPremul = PLS_LOAD4F(colorBuffer); + ushort lastBlendMode = cast_uint_to_ushort((lastPaintData.x >> 4) & 0xfu); + ushort imageBlendMode = cast_uint_to_ushort(imageDrawUniforms.blendMode); + dstColorPremul = blend(lastColor, dstColorPremul, lastBlendMode); + imageColor = blend(imageColor, dstColorPremul, imageBlendMode); + PLS_STORE4F(colorBuffer, imageColor); + } + else + { + PLS_PRESERVE_4F(colorBuffer); + } +#endif // FIXED_FUNCTION_COLOR_BLEND + + // Write out a coverage value of "zero at pathID=0" so a future resolve attempt doesn't affect + // this pixel. + PLS_STOREUI_ATOMIC(coverageCountBuffer, uint(FIXED_COVERAGE_ZERO)); + +#ifdef DRAW_IMAGE_MESH + // TODO: If we care: Use the interlock if we can, since individual meshes may shimmer if they + // have overlapping triangles. + PLS_INTERLOCK_END; +#endif + + EMIT_ATOMIC_PLS +} +#endif // DRAW_IMAGE + +#ifdef INITIALIZE_PLS + +ATOMIC_PLS_MAIN(_EXPORTED_drawFragmentMain) +{ +#ifdef STORE_COLOR_CLEAR + PLS_STORE4F(colorBuffer, unpackUnorm4x8(uniforms.colorClearValue)); +#endif +#ifdef SWIZZLE_COLOR_BGRA_TO_RGBA + half4 color = PLS_LOAD4F(colorBuffer); + PLS_STORE4F(colorBuffer, color.zyxw); +#endif + PLS_STOREUI_ATOMIC(coverageCountBuffer, uniforms.coverageClearValue); +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + PLS_STOREUI(clipBuffer, 0u); + } +#endif +#ifdef FIXED_FUNCTION_COLOR_BLEND + discard; +#endif + EMIT_ATOMIC_PLS +} + +#endif // INITIALIZE_PLS + +#ifdef RESOLVE_PLS + +#ifdef COALESCED_PLS_RESOLVE_AND_TRANSFER +PLS_FRAG_COLOR_MAIN(_EXPORTED_drawFragmentMain) +#else +ATOMIC_PLS_MAIN(_EXPORTED_drawFragmentMain) +#endif +{ + uint lastCoverageData = PLS_LOADUI_ATOMIC(coverageCountBuffer); + half coverageCount = from_fixed(lastCoverageData & 0xffffu); + ushort lastPathID = cast_uint_to_ushort(lastCoverageData >> 16); + uint2 paintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, lastPathID); + uint clipData; + half4 color = resolve_path_color(coverageCount, + paintData, + lastPathID FRAGMENT_CONTEXT_UNPACK PLS_CONTEXT_UNPACK, + clipData, + false); +#ifdef COALESCED_PLS_RESOLVE_AND_TRANSFER + _fragColor = do_pls_blend(color, paintData PLS_CONTEXT_UNPACK); + EMIT_PLS_AND_FRAG_COLOR +#else +#ifdef FIXED_FUNCTION_COLOR_BLEND + _fragColor = premultiply(color); +#else + write_pls_blend(color, paintData PLS_CONTEXT_UNPACK); +#endif // FIXED_FUNCTION_COLOR_BLEND + EMIT_ATOMIC_PLS +#endif // COALESCED_PLS_RESOLVE_AND_TRANSFER +} +#endif // RESOLVE_PLS +#endif // FRAGMENT diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/blit_texture_as_draw.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/blit_texture_as_draw.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/blit_texture_as_draw.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/blit_texture_as_draw.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/blit_texture_as_draw.glsl.hpp new file mode 100644 index 00000000..e021f3b1 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/blit_texture_as_draw.glsl.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include "blit_texture_as_draw.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char blit_texture_as_draw[] = R"===(/* + * Copyright 2024 Rive + */ + +#ifdef _EXPORTED_VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +VERTEX_MAIN(_EXPORTED_blitVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + // Fill the entire screen. The caller will use a scissor test to control the bounds being drawn. + float2 coord; + coord.x = (_vertexID & 1) == 0 ? -1. : 1.; + coord.y = (_vertexID & 2) == 0 ? -1. : 1.; + float4 pos = float4(coord, 0, 1); + EMIT_VERTEX(pos); +} +#endif + +#ifdef _EXPORTED_FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, 0, _EXPORTED_blitTextureSource); +FRAG_TEXTURE_BLOCK_END + +FRAG_DATA_MAIN(half4, _EXPORTED_blitFragmentMain) +{ + half4 srcColor = TEXEL_FETCH(_EXPORTED_blitTextureSource, int2(floor(_fragCoord.xy))); + EMIT_FRAG_DATA(srcColor); +} +#endif // FRAGMENT +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/blit_texture_as_draw.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/blit_texture_as_draw.minified.ush new file mode 100644 index 00000000..475a15f9 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/blit_texture_as_draw.minified.ush @@ -0,0 +1,33 @@ +/* + * Copyright 2024 Rive + */ + +#ifdef VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +VERTEX_MAIN(_EXPORTED_blitVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + // Fill the entire screen. The caller will use a scissor test to control the bounds being drawn. + float2 coord; + coord.x = (_vertexID & 1) == 0 ? -1. : 1.; + coord.y = (_vertexID & 2) == 0 ? -1. : 1.; + float4 pos = float4(coord, 0, 1); + EMIT_VERTEX(pos); +} +#endif + +#ifdef FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, 0, _EXPORTED_blitTextureSource); +FRAG_TEXTURE_BLOCK_END + +FRAG_DATA_MAIN(half4, _EXPORTED_blitFragmentMain) +{ + half4 srcColor = TEXEL_FETCH(_EXPORTED_blitTextureSource, int2(floor(_fragCoord.xy))); + EMIT_FRAG_DATA(srcColor); +} +#endif // FRAGMENT diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/color_ramp.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/color_ramp.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/color_ramp.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/color_ramp.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/color_ramp.glsl.hpp new file mode 100644 index 00000000..b30446b9 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/color_ramp.glsl.hpp @@ -0,0 +1,91 @@ +#pragma once + +#include "color_ramp.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char color_ramp[] = R"===(/* + * Copyright 2022 Rive + */ + +// This shader draws horizontal color ramps into a gradient texture, which will later be sampled by +// the renderer for drawing gradients. + +#ifdef _EXPORTED_VERTEX +ATTR_BLOCK_BEGIN(Attrs) +#ifdef SPLIT_UINT4_ATTRIBUTES +ATTR(0, uint, _EXPORTED_a_span_a); +ATTR(1, uint, _EXPORTED_a_span_b); +ATTR(2, uint, _EXPORTED_a_span_c); +ATTR(3, uint, _EXPORTED_a_span_d); +#else +ATTR(0, uint4, _EXPORTED_a_span); // [spanX, y, color0, color1] +#endif +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, half4, v_rampColor); +VARYING_BLOCK_END + +#ifdef _EXPORTED_VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +half4 unpackColorInt(uint color) +{ + return cast_uint4_to_half4((uint4(color, color, color, color) >> uint4(16, 8, 0, 24)) & 0xffu) / + 255.; +} + +VERTEX_MAIN(_EXPORTED_colorRampVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + +#ifdef SPLIT_UINT4_ATTRIBUTES + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span_a, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span_b, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span_c, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span_d, uint); + uint4 _EXPORTED_a_span = uint4( _EXPORTED_a_span_a, _EXPORTED_a_span_b, _EXPORTED_a_span_c, _EXPORTED_a_span_d); + +#else + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span, uint4); + +#endif + VARYING_INIT(v_rampColor, half4); + + float x = float((_vertexID & 1) == 0 ? _EXPORTED_a_span.x & 0xffffu : _EXPORTED_a_span.x >> 16) / 65536.; + float offsetY = (_vertexID & 2) == 0 ? 1. : .0; + if (uniforms.gradInverseViewportY < .0) + { + // Make sure we always emit clockwise triangles. Swap the top and bottom vertices. + offsetY = 1. - offsetY; + } + v_rampColor = unpackColorInt((_vertexID & 1) == 0 ? _EXPORTED_a_span.z : _EXPORTED_a_span.w); + + float4 pos; + pos.x = x * 2. - 1.; + pos.y = (float(_EXPORTED_a_span.y) + offsetY) * uniforms.gradInverseViewportY - + sign(uniforms.gradInverseViewportY); + pos.zw = float2(0, 1); + + VARYING_PACK(v_rampColor); + EMIT_VERTEX(pos); +} +#endif + +#ifdef _EXPORTED_FRAGMENT +FRAG_DATA_MAIN(half4, _EXPORTED_colorRampFragmentMain) +{ + VARYING_UNPACK(v_rampColor, half4); + EMIT_FRAG_DATA(v_rampColor); +} +#endif +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/color_ramp.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/color_ramp.minified.ush new file mode 100644 index 00000000..76a9e6f2 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/color_ramp.minified.ush @@ -0,0 +1,80 @@ +/* + * Copyright 2022 Rive + */ + +// This shader draws horizontal color ramps into a gradient texture, which will later be sampled by +// the renderer for drawing gradients. + +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +#ifdef SPLIT_UINT4_ATTRIBUTES +ATTR(0, uint, _EXPORTED_a_span_a); +ATTR(1, uint, _EXPORTED_a_span_b); +ATTR(2, uint, _EXPORTED_a_span_c); +ATTR(3, uint, _EXPORTED_a_span_d); +#else +ATTR(0, uint4, _EXPORTED_a_span); // [spanX, y, color0, color1] +#endif +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, half4, v_rampColor); +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +half4 unpackColorInt(uint color) +{ + return cast_uint4_to_half4((uint4(color, color, color, color) >> uint4(16, 8, 0, 24)) & 0xffu) / + 255.; +} + +VERTEX_MAIN(_EXPORTED_colorRampVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + +#ifdef SPLIT_UINT4_ATTRIBUTES + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span_a, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span_b, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span_c, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span_d, uint); + uint4 _EXPORTED_a_span = uint4( _EXPORTED_a_span_a, _EXPORTED_a_span_b, _EXPORTED_a_span_c, _EXPORTED_a_span_d); + +#else + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_span, uint4); + +#endif + VARYING_INIT(v_rampColor, half4); + + float x = float((_vertexID & 1) == 0 ? _EXPORTED_a_span.x & 0xffffu : _EXPORTED_a_span.x >> 16) / 65536.; + float offsetY = (_vertexID & 2) == 0 ? 1. : .0; + if (uniforms.gradInverseViewportY < .0) + { + // Make sure we always emit clockwise triangles. Swap the top and bottom vertices. + offsetY = 1. - offsetY; + } + v_rampColor = unpackColorInt((_vertexID & 1) == 0 ? _EXPORTED_a_span.z : _EXPORTED_a_span.w); + + float4 pos; + pos.x = x * 2. - 1.; + pos.y = (float(_EXPORTED_a_span.y) + offsetY) * uniforms.gradInverseViewportY - + sign(uniforms.gradInverseViewportY); + pos.zw = float2(0, 1); + + VARYING_PACK(v_rampColor); + EMIT_VERTEX(pos); +} +#endif + +#ifdef FRAGMENT +FRAG_DATA_MAIN(half4, _EXPORTED_colorRampFragmentMain) +{ + VARYING_UNPACK(v_rampColor, half4); + EMIT_FRAG_DATA(v_rampColor); +} +#endif diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/common.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/common.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/common.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/common.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/common.glsl.hpp new file mode 100644 index 00000000..49f24cbe --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/common.glsl.hpp @@ -0,0 +1,254 @@ +#pragma once + +#include "common.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char common[] = R"===(/* + * Copyright 2022 Rive + */ + +// Common functions shared by multiple shaders. + +#ifdef USE_GENERATED_UNIFORMS +#include "/Engine/Generated/GeneratedUniformBuffers.ush" +#endif + +#define PI float(3.141592653589793238) + +#ifndef _EXPORTED_USING_DEPTH_STENCIL +#define AA_RADIUS float(.5) +#else +#define AA_RADIUS float(.0) +#endif + +#ifdef GLSL +// GLSL has different semantics around precision. Normalize type conversions across +// languages with "cast_*_to_*()" methods. +INLINE half cast_float_to_half(float x) { return x; } +INLINE half cast_uint_to_half(uint x) { return float(x); } +INLINE half cast_ushort_to_half(ushort x) { return float(x); } +INLINE half cast_int_to_half(int x) { return float(x); } +INLINE half4 cast_float4_to_half4(float4 xyzw) { return xyzw; } +INLINE half2 cast_float2_to_half2(float2 xy) { return xy; } +INLINE half4 cast_uint4_to_half4(uint4 xyzw) { return vec4(xyzw); } +INLINE ushort cast_half_to_ushort(half x) { return uint(x); } +INLINE ushort cast_uint_to_ushort(uint x) { return x; } +#else +INLINE half cast_float_to_half(float x) { return (half)x; } +INLINE half cast_uint_to_half(uint x) { return (half)x; } +INLINE half cast_ushort_to_half(ushort x) { return (half)x; } +INLINE half cast_int_to_half(int x) { return (half)x; } +INLINE half4 cast_float4_to_half4(float4 xyzw) { return (half4)xyzw; } +INLINE half2 cast_float2_to_half2(float2 xy) { return (half2)xy; } +INLINE half4 cast_uint4_to_half4(uint4 xyzw) { return (half4)xyzw; } +INLINE ushort cast_half_to_ushort(half x) { return (ushort)x; } +INLINE ushort cast_uint_to_ushort(uint x) { return (ushort)x; } +#endif + +INLINE half make_half(half x) { return x; } + +INLINE half2 make_half2(half2 xy) { return xy; } + +INLINE half2 make_half2(half x, half y) +{ + half2 ret; + ret.x = x, ret.y = y; + return ret; +} + +INLINE half3 make_half3(half x, half y, half z) +{ + half3 ret; + ret.x = x, ret.y = y, ret.z = z; + return ret; +} + +INLINE half3 make_half3(half x) +{ + half3 ret; + ret.x = x, ret.y = x, ret.z = x; + return ret; +} + +INLINE half4 make_half4(half x, half y, half z, half w) +{ + half4 ret; + ret.x = x, ret.y = y, ret.z = z, ret.w = w; + return ret; +} + +INLINE half4 make_half4(half3 xyz, half w) +{ + half4 ret; + ret.xyz = xyz; + ret.w = w; + return ret; +} + +INLINE half4 make_half4(half x) +{ + half4 ret; + ret.x = x, ret.y = x, ret.z = x, ret.w = x; + return ret; +} + +INLINE half3x4 make_half3x4(half3 a, half b, half3 c, half d, half3 e, half f) +{ + half3x4 ret; + ret[0] = make_half4(a, b); + ret[1] = make_half4(c, d); + ret[2] = make_half4(e, f); + return ret; +} + +INLINE float2x2 make_float2x2(float4 x) { return float2x2(x.xy, x.zw); } + +INLINE uint make_uint(ushort x) { return x; } + +INLINE uint contour_data_idx(uint contourIDWithFlags) +{ + return (contourIDWithFlags & CONTOUR_ID_MASK) - 1u; +} + +INLINE float2 unchecked_mix(float2 a, float2 b, float t) { return (b - a) * t + a; } + +INLINE half id_bits_to_f16(uint idBits, uint pathIDGranularity) +{ + return idBits == 0u ? .0 : unpackHalf2x16((idBits + MAX_DENORM_F16) * pathIDGranularity).x; +} + +INLINE float atan2(float2 v) +{ + float bias = .0; + if (abs(v.x) > abs(v.y)) + { + v = float2(v.y, -v.x); + bias = PI / 2.; + } + return atan(v.y, v.x) + bias; +} + +INLINE half4 premultiply(half4 color) { return make_half4(color.xyz * color.w, color.w); } + +INLINE half4 unmultiply(half4 color) +{ + if (color.w != .0) + color.xyz *= 1.0 / color.w; + return color; +} + +INLINE half min_value(half4 min4) +{ + half2 min2 = min(min4.xy, min4.zw); + half min1 = min(min2.x, min2.y); + return min1; +} + +INLINE float manhattan_width(float2 x) { return abs(x.x) + abs(x.y); } + +#ifdef _EXPORTED_VERTEX + +#ifndef _EXPORTED_USE_GENERATED_UNIFORMS +UNIFORM_BLOCK_BEGIN(FLUSH_UNIFORM_BUFFER_IDX, _EXPORTED_FlushUniforms) +float gradInverseViewportY; +float tessInverseViewportY; +float renderTargetInverseViewportX; +float renderTargetInverseViewportY; +uint renderTargetWidth; +uint renderTargetHeight; +uint colorClearValue; // Only used if clears are implemented as draws. +uint coverageClearValue; // Only used if clears are implemented as draws. +int4 renderTargetUpdateBounds; // drawBounds, or renderTargetBounds if there is a clear. (LTRB.) +uint pathIDGranularity; // Spacing between adjacent path IDs (1 if IEEE compliant). +float vertexDiscardValue; +UNIFORM_BLOCK_END(uniforms) +#endif + +#define RENDER_TARGET_COORD_TO_CLIP_COORD(COORD) \ + float4((COORD).x* uniforms.renderTargetInverseViewportX - 1., \ + (COORD).y * -uniforms.renderTargetInverseViewportY + \ + sign(uniforms.renderTargetInverseViewportY), \ + .0, \ + 1.) + +#ifndef _EXPORTED_USING_DEPTH_STENCIL +// Calculates the Manhattan distance in pixels from the given pixelPosition, to the point at each +// edge of the clipRect where coverage = 0. +// +// clipRectInverseMatrix transforms from pixel coordinates to a space where the clipRect is the +// normalized rectangle: [-1, -1, 1, 1]. +INLINE float4 find_clip_rect_coverage_distances(float2x2 clipRectInverseMatrix, + float2 clipRectInverseTranslate, + float2 pixelPosition) +{ + float2 clipRectAAWidth = abs(clipRectInverseMatrix[0]) + abs(clipRectInverseMatrix[1]); + if (clipRectAAWidth.x != .0 && clipRectAAWidth.y != .0) + { + float2 r = 1. / clipRectAAWidth; + float2 clipRectCoord = MUL(clipRectInverseMatrix, pixelPosition) + clipRectInverseTranslate; + // When the center of a pixel falls exactly on an edge, coverage should be .5. + const float coverageWhenDistanceIsZero = .5; + return float4(clipRectCoord, -clipRectCoord) * r.xyxy + r.xyxy + coverageWhenDistanceIsZero; + } + else + { + // The caller gave us a singular clipRectInverseMatrix. This is a special case where we are + // expected to use tx and ty as uniform coverage. + return clipRectInverseTranslate.xyxy; + } +} + +#else // USING_DEPTH_STENCIL + +INLINE float normalize_z_index(uint zIndex) { return 1. - float(zIndex) * (2. / 32768.); } + +#ifdef _EXPORTED_ENABLE_CLIP_RECT +INLINE void set_clip_rect_plane_distances(float2x2 clipRectInverseMatrix, + float2 clipRectInverseTranslate, + float2 pixelPosition) +{ + if (clipRectInverseMatrix != float2x2(0)) + { + float2 clipRectCoord = + MUL(clipRectInverseMatrix, pixelPosition) + clipRectInverseTranslate.xy; + gl_ClipDistance[0] = clipRectCoord.x + 1.; + gl_ClipDistance[1] = clipRectCoord.y + 1.; + gl_ClipDistance[2] = 1. - clipRectCoord.x; + gl_ClipDistance[3] = 1. - clipRectCoord.y; + } + else + { + // "clipRectInverseMatrix == 0" is a special case: + // "clipRectInverseTranslate.x == 1" => all in. + // "clipRectInverseTranslate.x == 0" => all out. + gl_ClipDistance[0] = gl_ClipDistance[1] = gl_ClipDistance[2] = gl_ClipDistance[3] = + clipRectInverseTranslate.x - .5; + } +} +#endif // ENABLE_CLIP_RECT +#endif // USING_DEPTH_STENCIL +#endif // VERTEX + +#ifdef _EXPORTED_DRAW_IMAGE +#ifndef _EXPORTED_USE_GENERATED_UNIFORMS +UNIFORM_BLOCK_BEGIN(IMAGE_DRAW_UNIFORM_BUFFER_IDX, _EXPORTED_ImageDrawUniforms) +float4 viewMatrix; +float2 translate; +float opacity; +float padding; +// clipRectInverseMatrix transforms from pixel coordinates to a space where the clipRect is the +// normalized rectangle: [-1, -1, 1, 1]. +float4 clipRectInverseMatrix; +float2 clipRectInverseTranslate; +uint clipID; +uint blendMode; +uint zIndex; +UNIFORM_BLOCK_END(imageDrawUniforms) +#endif +#endif +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/common.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/common.minified.ush new file mode 100644 index 00000000..ebe51006 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/common.minified.ush @@ -0,0 +1,243 @@ +/* + * Copyright 2022 Rive + */ + +// Common functions shared by multiple shaders. + +#ifdef USE_GENERATED_UNIFORMS +#include "/Engine/Generated/GeneratedUniformBuffers.ush" +#endif + +#define PI float(3.141592653589793238) + +#ifndef USING_DEPTH_STENCIL +#define AA_RADIUS float(.5) +#else +#define AA_RADIUS float(.0) +#endif + +#ifdef GLSL +// GLSL has different semantics around precision. Normalize type conversions across +// languages with "cast_*_to_*()" methods. +INLINE half cast_float_to_half(float x) { return x; } +INLINE half cast_uint_to_half(uint x) { return float(x); } +INLINE half cast_ushort_to_half(ushort x) { return float(x); } +INLINE half cast_int_to_half(int x) { return float(x); } +INLINE half4 cast_float4_to_half4(float4 xyzw) { return xyzw; } +INLINE half2 cast_float2_to_half2(float2 xy) { return xy; } +INLINE half4 cast_uint4_to_half4(uint4 xyzw) { return vec4(xyzw); } +INLINE ushort cast_half_to_ushort(half x) { return uint(x); } +INLINE ushort cast_uint_to_ushort(uint x) { return x; } +#else +INLINE half cast_float_to_half(float x) { return (half)x; } +INLINE half cast_uint_to_half(uint x) { return (half)x; } +INLINE half cast_ushort_to_half(ushort x) { return (half)x; } +INLINE half cast_int_to_half(int x) { return (half)x; } +INLINE half4 cast_float4_to_half4(float4 xyzw) { return (half4)xyzw; } +INLINE half2 cast_float2_to_half2(float2 xy) { return (half2)xy; } +INLINE half4 cast_uint4_to_half4(uint4 xyzw) { return (half4)xyzw; } +INLINE ushort cast_half_to_ushort(half x) { return (ushort)x; } +INLINE ushort cast_uint_to_ushort(uint x) { return (ushort)x; } +#endif + +INLINE half make_half(half x) { return x; } + +INLINE half2 make_half2(half2 xy) { return xy; } + +INLINE half2 make_half2(half x, half y) +{ + half2 ret; + ret.x = x, ret.y = y; + return ret; +} + +INLINE half3 make_half3(half x, half y, half z) +{ + half3 ret; + ret.x = x, ret.y = y, ret.z = z; + return ret; +} + +INLINE half3 make_half3(half x) +{ + half3 ret; + ret.x = x, ret.y = x, ret.z = x; + return ret; +} + +INLINE half4 make_half4(half x, half y, half z, half w) +{ + half4 ret; + ret.x = x, ret.y = y, ret.z = z, ret.w = w; + return ret; +} + +INLINE half4 make_half4(half3 xyz, half w) +{ + half4 ret; + ret.xyz = xyz; + ret.w = w; + return ret; +} + +INLINE half4 make_half4(half x) +{ + half4 ret; + ret.x = x, ret.y = x, ret.z = x, ret.w = x; + return ret; +} + +INLINE half3x4 make_half3x4(half3 a, half b, half3 c, half d, half3 e, half f) +{ + half3x4 ret; + ret[0] = make_half4(a, b); + ret[1] = make_half4(c, d); + ret[2] = make_half4(e, f); + return ret; +} + +INLINE float2x2 make_float2x2(float4 x) { return float2x2(x.xy, x.zw); } + +INLINE uint make_uint(ushort x) { return x; } + +INLINE uint contour_data_idx(uint contourIDWithFlags) +{ + return (contourIDWithFlags & CONTOUR_ID_MASK) - 1u; +} + +INLINE float2 unchecked_mix(float2 a, float2 b, float t) { return (b - a) * t + a; } + +INLINE half id_bits_to_f16(uint idBits, uint pathIDGranularity) +{ + return idBits == 0u ? .0 : unpackHalf2x16((idBits + MAX_DENORM_F16) * pathIDGranularity).x; +} + +INLINE float atan2(float2 v) +{ + float bias = .0; + if (abs(v.x) > abs(v.y)) + { + v = float2(v.y, -v.x); + bias = PI / 2.; + } + return atan(v.y, v.x) + bias; +} + +INLINE half4 premultiply(half4 color) { return make_half4(color.xyz * color.w, color.w); } + +INLINE half4 unmultiply(half4 color) +{ + if (color.w != .0) + color.xyz *= 1.0 / color.w; + return color; +} + +INLINE half min_value(half4 min4) +{ + half2 min2 = min(min4.xy, min4.zw); + half min1 = min(min2.x, min2.y); + return min1; +} + +INLINE float manhattan_width(float2 x) { return abs(x.x) + abs(x.y); } + +#ifdef VERTEX + +#ifndef USE_GENERATED_UNIFORMS +UNIFORM_BLOCK_BEGIN(FLUSH_UNIFORM_BUFFER_IDX, _EXPORTED_FlushUniforms) +float gradInverseViewportY; +float tessInverseViewportY; +float renderTargetInverseViewportX; +float renderTargetInverseViewportY; +uint renderTargetWidth; +uint renderTargetHeight; +uint colorClearValue; // Only used if clears are implemented as draws. +uint coverageClearValue; // Only used if clears are implemented as draws. +int4 renderTargetUpdateBounds; // drawBounds, or renderTargetBounds if there is a clear. (LTRB.) +uint pathIDGranularity; // Spacing between adjacent path IDs (1 if IEEE compliant). +float vertexDiscardValue; +UNIFORM_BLOCK_END(uniforms) +#endif + +#define RENDER_TARGET_COORD_TO_CLIP_COORD(COORD) \ + float4((COORD).x* uniforms.renderTargetInverseViewportX - 1., \ + (COORD).y * -uniforms.renderTargetInverseViewportY + \ + sign(uniforms.renderTargetInverseViewportY), \ + .0, \ + 1.) + +#ifndef USING_DEPTH_STENCIL +// Calculates the Manhattan distance in pixels from the given pixelPosition, to the point at each +// edge of the clipRect where coverage = 0. +// +// clipRectInverseMatrix transforms from pixel coordinates to a space where the clipRect is the +// normalized rectangle: [-1, -1, 1, 1]. +INLINE float4 find_clip_rect_coverage_distances(float2x2 clipRectInverseMatrix, + float2 clipRectInverseTranslate, + float2 pixelPosition) +{ + float2 clipRectAAWidth = abs(clipRectInverseMatrix[0]) + abs(clipRectInverseMatrix[1]); + if (clipRectAAWidth.x != .0 && clipRectAAWidth.y != .0) + { + float2 r = 1. / clipRectAAWidth; + float2 clipRectCoord = MUL(clipRectInverseMatrix, pixelPosition) + clipRectInverseTranslate; + // When the center of a pixel falls exactly on an edge, coverage should be .5. + const float coverageWhenDistanceIsZero = .5; + return float4(clipRectCoord, -clipRectCoord) * r.xyxy + r.xyxy + coverageWhenDistanceIsZero; + } + else + { + // The caller gave us a singular clipRectInverseMatrix. This is a special case where we are + // expected to use tx and ty as uniform coverage. + return clipRectInverseTranslate.xyxy; + } +} + +#else // USING_DEPTH_STENCIL + +INLINE float normalize_z_index(uint zIndex) { return 1. - float(zIndex) * (2. / 32768.); } + +#ifdef ENABLE_CLIP_RECT +INLINE void set_clip_rect_plane_distances(float2x2 clipRectInverseMatrix, + float2 clipRectInverseTranslate, + float2 pixelPosition) +{ + if (clipRectInverseMatrix != float2x2(0)) + { + float2 clipRectCoord = + MUL(clipRectInverseMatrix, pixelPosition) + clipRectInverseTranslate.xy; + gl_ClipDistance[0] = clipRectCoord.x + 1.; + gl_ClipDistance[1] = clipRectCoord.y + 1.; + gl_ClipDistance[2] = 1. - clipRectCoord.x; + gl_ClipDistance[3] = 1. - clipRectCoord.y; + } + else + { + // "clipRectInverseMatrix == 0" is a special case: + // "clipRectInverseTranslate.x == 1" => all in. + // "clipRectInverseTranslate.x == 0" => all out. + gl_ClipDistance[0] = gl_ClipDistance[1] = gl_ClipDistance[2] = gl_ClipDistance[3] = + clipRectInverseTranslate.x - .5; + } +} +#endif // ENABLE_CLIP_RECT +#endif // USING_DEPTH_STENCIL +#endif // VERTEX + +#ifdef DRAW_IMAGE +#ifndef USE_GENERATED_UNIFORMS +UNIFORM_BLOCK_BEGIN(IMAGE_DRAW_UNIFORM_BUFFER_IDX, _EXPORTED_ImageDrawUniforms) +float4 viewMatrix; +float2 translate; +float opacity; +float padding; +// clipRectInverseMatrix transforms from pixel coordinates to a space where the clipRect is the +// normalized rectangle: [-1, -1, 1, 1]. +float4 clipRectInverseMatrix; +float2 clipRectInverseTranslate; +uint clipID; +uint blendMode; +uint zIndex; +UNIFORM_BLOCK_END(imageDrawUniforms) +#endif +#endif diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/constants.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/constants.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/constants.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/constants.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/constants.glsl.hpp new file mode 100644 index 00000000..c03b3106 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/constants.glsl.hpp @@ -0,0 +1,165 @@ +#pragma once + +#include "constants.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char constants[] = R"===(/* + * Copyright 2022 Rive + */ + +#define TESS_TEXTURE_WIDTH float(2048) +#define TESS_TEXTURE_WIDTH_LOG2 11 + +#define GRAD_TEXTURE_WIDTH float(512) +#define GRAD_TEXTURE_INVERSE_WIDTH float(0.001953125) + +// Width to use for a texture that emulates a storage buffer. +// +// Minimize width since the texture needs to be updated in entire rows from the resource buffer. +// Since these only serve paths and contours, both of those are limited to 16-bit indices, 2048 +// is the min specified texture size in ES3, and no path buffer uses more than 4 texels, we can +// safely use a width of 128. +#define STORAGE_TEXTURE_WIDTH 128 +#define STORAGE_TEXTURE_SHIFT_Y 7 +#define STORAGE_TEXTURE_MASK_X 0x7fu + +// Tells shaders that a cubic should actually be drawn as the single, non-AA triangle: [p0, p1, p3]. +// This is used to squeeze in more rare triangles, like "grout" triangles from self intersections on +// interior triangulation, where it wouldn't be worth it to put them in their own dedicated draw +// call. +#define RETROFITTED_TRIANGLE_CONTOUR_FLAG (1u << 31u) + +// Tells the tessellation shader to re-run Wang's formula on the given curve, figure out how many +// segments it actually needs, and make any excess segments degenerate by co-locating their vertices +// at T=0. (Used on the "outerCurve" patches that are drawn with interior triangulations.) +#define CULL_EXCESS_TESSELLATION_SEGMENTS_CONTOUR_FLAG (1u << 30u) + +// Flags for specifying the join type. +#define JOIN_TYPE_MASK (3u << 28u) +#define MITER_CLIP_JOIN_CONTOUR_FLAG (3u << 28u) +#define MITER_REVERT_JOIN_CONTOUR_FLAG (2u << 28u) +#define BEVEL_JOIN_CONTOUR_FLAG (1u << 28u) + +// When a join is being used to emulate a stroke cap, the shader emits additional vertices at T=0 +// and T=1 for round joins, and changes the miter limit to 1 for miter-clip joins. +#define EMULATED_STROKE_CAP_CONTOUR_FLAG (1u << 27u) + +// Internal contour flags. +#define MIRRORED_CONTOUR_CONTOUR_FLAG (1u << 26u) +#define JOIN_TANGENT_0_CONTOUR_FLAG (1u << 25u) +#define JOIN_TANGENT_INNER_CONTOUR_FLAG (1u << 24u) +#define LEFT_JOIN_CONTOUR_FLAG (1u << 23u) +#define RIGHT_JOIN_CONTOUR_FLAG (1u << 22u) +#define CONTOUR_ID_MASK 0xffffu + +// Says which part of the patch a vertex belongs to. +#define STROKE_VERTEX 0 +#define FAN_VERTEX 1 +#define FAN_MIDPOINT_VERTEX 2 + +// Says which part of the patch a vertex belongs to. +#define STROKE_VERTEX 0 +#define FAN_VERTEX 1 +#define FAN_MIDPOINT_VERTEX 2 + +// Mirrors pls::PaintType. +#define SOLID_COLOR_PAINT_TYPE 0u +#define LINEAR_GRADIENT_PAINT_TYPE 1u +#define RADIAL_GRADIENT_PAINT_TYPE 2u +#define IMAGE_PAINT_TYPE 3u +#define CLIP_UPDATE_PAINT_TYPE 4u + +// Paint flags, found in the x-component value of @paintBuffer. +#define PAINT_FLAG_EVEN_ODD 0x100u +#define PAINT_FLAG_HAS_CLIP_RECT 0x200u + +// PLS draw resources are either updated per flush or per draw. They go into set 0 +// or set 1, depending on how often they are updated. +#define PER_FLUSH_BINDINGS_SET 0 +#define PER_DRAW_BINDINGS_SET 1 + +// Index at which we access each resource. +#define TESS_VERTEX_TEXTURE_IDX 0 +#define GRAD_TEXTURE_IDX 1 +#define IMAGE_TEXTURE_IDX 2 +#define PATH_BUFFER_IDX 3 +#define PAINT_BUFFER_IDX 4 +#define PAINT_AUX_BUFFER_IDX 5 +#define CONTOUR_BUFFER_IDX 6 +#define FLUSH_UNIFORM_BUFFER_IDX 7 +#define PATH_BASE_INSTANCE_UNIFORM_BUFFER_IDX 8 +#define IMAGE_DRAW_UNIFORM_BUFFER_IDX 9 +#define DST_COLOR_TEXTURE_IDX 10 +#define DEFAULT_BINDINGS_SET_SIZE 11 + +// Samplers are accessed at the same index as their corresponding texture, so we put them in a +// separate binding set. +#define SAMPLER_BINDINGS_SET 2 + +// PLS textures are accessed at the same index as their PLS planes, so we put them in a separate +// binding set. +#define PLS_TEXTURE_BINDINGS_SET 3 + +#define BINDINGS_SET_COUNT 4 + +// Index of each pixel local storage plane. +#define COLOR_PLANE_IDX 0 +#define CLIP_PLANE_IDX 1 +#define SCRATCH_COLOR_PLANE_IDX 2 +#define COVERAGE_PLANE_IDX 3 + +// acos(1/4), because the miter limit is always 4. +#define MITER_ANGLE_LIMIT float(1.318116071652817965746) + +// Raw bit representation of the largest denormalized fp16 value. We offset all (1-based) path IDs +// by this value in order to avoid denorms, which have been empirically unreliable on Android as ID +// values. +#define MAX_DENORM_F16 1023u + +// Blend modes. Mirrors rive::BlendMode, but 0-based and contiguous for tighter packing. +#define BLEND_SRC_OVER 0u +#define BLEND_MODE_SCREEN 1u +#define BLEND_MODE_OVERLAY 2u +#define BLEND_MODE_DARKEN 3u +#define BLEND_MODE_LIGHTEN 4u +#define BLEND_MODE_COLORDODGE 5u +#define BLEND_MODE_COLORBURN 6u +#define BLEND_MODE_HARDLIGHT 7u +#define BLEND_MODE_SOFTLIGHT 8u +#define BLEND_MODE_DIFFERENCE 9u +#define BLEND_MODE_EXCLUSION 10u +#define BLEND_MODE_MULTIPLY 11u +#define BLEND_MODE_HUE 12u +#define BLEND_MODE_SATURATION 13u +#define BLEND_MODE_COLOR 14u +#define BLEND_MODE_LUMINOSITY 15u + +// Fixed-point coverage values for the experimental atomic mode. +// Atomic mode uses 7:9 fixed point, so the winding number breaks if a shape has more than 64 +// levels of self overlap in either winding direction at any point. +#define FIXED_COVERAGE_FACTOR float(512) +#define FIXED_COVERAGE_INVERSE_FACTOR float(0.001953125) +#define FIXED_COVERAGE_ZERO float(1 << 15) +#define FIXED_COVERAGE_ONE (FIXED_COVERAGE_FACTOR + FIXED_COVERAGE_ZERO) + +// Binding points for storage buffers. +#define PAINT_STORAGE_BUFFER_IDX 8 +#define PAINT_MATRIX_STORAGE_BUFFER_IDX 9 +#define PAINT_TRANSLATE_STORAGE_BUFFER_IDX 10 +#define CLIPRECT_MATRIX_STORAGE_BUFFER_IDX 11 +#define CLIPRECT_TRANSLATE_STORAGE_BUFFER_IDX 12 + +// Indices for SPIRV specialization constants (used in lieu of #defines in Vulkan.) +#define CLIPPING_SPECIALIZATION_IDX 0 +#define CLIP_RECT_SPECIALIZATION_IDX 1 +#define ADVANCED_BLEND_SPECIALIZATION_IDX 2 +#define EVEN_ODD_SPECIALIZATION_IDX 3 +#define NESTED_CLIPPING_SPECIALIZATION_IDX 4 +#define HSL_BLEND_MODES_SPECIALIZATION_IDX 5 +#define SPECIALIZATION_COUNT 6 +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/constants.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/constants.minified.ush new file mode 100644 index 00000000..5a7fc5ef --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/constants.minified.ush @@ -0,0 +1,154 @@ +/* + * Copyright 2022 Rive + */ + +#define TESS_TEXTURE_WIDTH float(2048) +#define TESS_TEXTURE_WIDTH_LOG2 11 + +#define GRAD_TEXTURE_WIDTH float(512) +#define GRAD_TEXTURE_INVERSE_WIDTH float(0.001953125) + +// Width to use for a texture that emulates a storage buffer. +// +// Minimize width since the texture needs to be updated in entire rows from the resource buffer. +// Since these only serve paths and contours, both of those are limited to 16-bit indices, 2048 +// is the min specified texture size in ES3, and no path buffer uses more than 4 texels, we can +// safely use a width of 128. +#define STORAGE_TEXTURE_WIDTH 128 +#define STORAGE_TEXTURE_SHIFT_Y 7 +#define STORAGE_TEXTURE_MASK_X 0x7fu + +// Tells shaders that a cubic should actually be drawn as the single, non-AA triangle: [p0, p1, p3]. +// This is used to squeeze in more rare triangles, like "grout" triangles from self intersections on +// interior triangulation, where it wouldn't be worth it to put them in their own dedicated draw +// call. +#define RETROFITTED_TRIANGLE_CONTOUR_FLAG (1u << 31u) + +// Tells the tessellation shader to re-run Wang's formula on the given curve, figure out how many +// segments it actually needs, and make any excess segments degenerate by co-locating their vertices +// at T=0. (Used on the "outerCurve" patches that are drawn with interior triangulations.) +#define CULL_EXCESS_TESSELLATION_SEGMENTS_CONTOUR_FLAG (1u << 30u) + +// Flags for specifying the join type. +#define JOIN_TYPE_MASK (3u << 28u) +#define MITER_CLIP_JOIN_CONTOUR_FLAG (3u << 28u) +#define MITER_REVERT_JOIN_CONTOUR_FLAG (2u << 28u) +#define BEVEL_JOIN_CONTOUR_FLAG (1u << 28u) + +// When a join is being used to emulate a stroke cap, the shader emits additional vertices at T=0 +// and T=1 for round joins, and changes the miter limit to 1 for miter-clip joins. +#define EMULATED_STROKE_CAP_CONTOUR_FLAG (1u << 27u) + +// Internal contour flags. +#define MIRRORED_CONTOUR_CONTOUR_FLAG (1u << 26u) +#define JOIN_TANGENT_0_CONTOUR_FLAG (1u << 25u) +#define JOIN_TANGENT_INNER_CONTOUR_FLAG (1u << 24u) +#define LEFT_JOIN_CONTOUR_FLAG (1u << 23u) +#define RIGHT_JOIN_CONTOUR_FLAG (1u << 22u) +#define CONTOUR_ID_MASK 0xffffu + +// Says which part of the patch a vertex belongs to. +#define STROKE_VERTEX 0 +#define FAN_VERTEX 1 +#define FAN_MIDPOINT_VERTEX 2 + +// Says which part of the patch a vertex belongs to. +#define STROKE_VERTEX 0 +#define FAN_VERTEX 1 +#define FAN_MIDPOINT_VERTEX 2 + +// Mirrors pls::PaintType. +#define SOLID_COLOR_PAINT_TYPE 0u +#define LINEAR_GRADIENT_PAINT_TYPE 1u +#define RADIAL_GRADIENT_PAINT_TYPE 2u +#define IMAGE_PAINT_TYPE 3u +#define CLIP_UPDATE_PAINT_TYPE 4u + +// Paint flags, found in the x-component value of @paintBuffer. +#define PAINT_FLAG_EVEN_ODD 0x100u +#define PAINT_FLAG_HAS_CLIP_RECT 0x200u + +// PLS draw resources are either updated per flush or per draw. They go into set 0 +// or set 1, depending on how often they are updated. +#define PER_FLUSH_BINDINGS_SET 0 +#define PER_DRAW_BINDINGS_SET 1 + +// Index at which we access each resource. +#define TESS_VERTEX_TEXTURE_IDX 0 +#define GRAD_TEXTURE_IDX 1 +#define IMAGE_TEXTURE_IDX 2 +#define PATH_BUFFER_IDX 3 +#define PAINT_BUFFER_IDX 4 +#define PAINT_AUX_BUFFER_IDX 5 +#define CONTOUR_BUFFER_IDX 6 +#define FLUSH_UNIFORM_BUFFER_IDX 7 +#define PATH_BASE_INSTANCE_UNIFORM_BUFFER_IDX 8 +#define IMAGE_DRAW_UNIFORM_BUFFER_IDX 9 +#define DST_COLOR_TEXTURE_IDX 10 +#define DEFAULT_BINDINGS_SET_SIZE 11 + +// Samplers are accessed at the same index as their corresponding texture, so we put them in a +// separate binding set. +#define SAMPLER_BINDINGS_SET 2 + +// PLS textures are accessed at the same index as their PLS planes, so we put them in a separate +// binding set. +#define PLS_TEXTURE_BINDINGS_SET 3 + +#define BINDINGS_SET_COUNT 4 + +// Index of each pixel local storage plane. +#define COLOR_PLANE_IDX 0 +#define CLIP_PLANE_IDX 1 +#define SCRATCH_COLOR_PLANE_IDX 2 +#define COVERAGE_PLANE_IDX 3 + +// acos(1/4), because the miter limit is always 4. +#define MITER_ANGLE_LIMIT float(1.318116071652817965746) + +// Raw bit representation of the largest denormalized fp16 value. We offset all (1-based) path IDs +// by this value in order to avoid denorms, which have been empirically unreliable on Android as ID +// values. +#define MAX_DENORM_F16 1023u + +// Blend modes. Mirrors rive::BlendMode, but 0-based and contiguous for tighter packing. +#define BLEND_SRC_OVER 0u +#define BLEND_MODE_SCREEN 1u +#define BLEND_MODE_OVERLAY 2u +#define BLEND_MODE_DARKEN 3u +#define BLEND_MODE_LIGHTEN 4u +#define BLEND_MODE_COLORDODGE 5u +#define BLEND_MODE_COLORBURN 6u +#define BLEND_MODE_HARDLIGHT 7u +#define BLEND_MODE_SOFTLIGHT 8u +#define BLEND_MODE_DIFFERENCE 9u +#define BLEND_MODE_EXCLUSION 10u +#define BLEND_MODE_MULTIPLY 11u +#define BLEND_MODE_HUE 12u +#define BLEND_MODE_SATURATION 13u +#define BLEND_MODE_COLOR 14u +#define BLEND_MODE_LUMINOSITY 15u + +// Fixed-point coverage values for the experimental atomic mode. +// Atomic mode uses 7:9 fixed point, so the winding number breaks if a shape has more than 64 +// levels of self overlap in either winding direction at any point. +#define FIXED_COVERAGE_FACTOR float(512) +#define FIXED_COVERAGE_INVERSE_FACTOR float(0.001953125) +#define FIXED_COVERAGE_ZERO float(1 << 15) +#define FIXED_COVERAGE_ONE (FIXED_COVERAGE_FACTOR + FIXED_COVERAGE_ZERO) + +// Binding points for storage buffers. +#define PAINT_STORAGE_BUFFER_IDX 8 +#define PAINT_MATRIX_STORAGE_BUFFER_IDX 9 +#define PAINT_TRANSLATE_STORAGE_BUFFER_IDX 10 +#define CLIPRECT_MATRIX_STORAGE_BUFFER_IDX 11 +#define CLIPRECT_TRANSLATE_STORAGE_BUFFER_IDX 12 + +// Indices for SPIRV specialization constants (used in lieu of #defines in Vulkan.) +#define CLIPPING_SPECIALIZATION_IDX 0 +#define CLIP_RECT_SPECIALIZATION_IDX 1 +#define ADVANCED_BLEND_SPECIALIZATION_IDX 2 +#define EVEN_ODD_SPECIALIZATION_IDX 3 +#define NESTED_CLIPPING_SPECIALIZATION_IDX 4 +#define HSL_BLEND_MODES_SPECIALIZATION_IDX 5 +#define SPECIALIZATION_COUNT 6 diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_image_mesh.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_image_mesh.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_image_mesh.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_image_mesh.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_image_mesh.glsl.hpp new file mode 100644 index 00000000..a0947115 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_image_mesh.glsl.hpp @@ -0,0 +1,204 @@ +#pragma once + +#include "draw_image_mesh.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char draw_image_mesh[] = R"===(/* + * Copyright 2023 Rive + */ + +#ifdef _EXPORTED_VERTEX +ATTR_BLOCK_BEGIN(PositionAttr) +ATTR(0, float2, _EXPORTED_a_position); +ATTR_BLOCK_END + +ATTR_BLOCK_BEGIN(UVAttr) +ATTR(1, float2, _EXPORTED_a_texCoord); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float2, v_texCoord); +#ifdef _EXPORTED_ENABLE_CLIPPING +_EXPORTED_OPTIONALLY_FLAT VARYING(1, half, v_clipID); +#endif +#ifdef _EXPORTED_ENABLE_CLIP_RECT +NO_PERSPECTIVE VARYING(2, float4, v_clipRect); +#endif +VARYING_BLOCK_END + +#ifdef _EXPORTED_VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +IMAGE_MESH_VERTEX_MAIN(_EXPORTED_drawVertexMain, PositionAttr, position, UVAttr, uv, _vertexID) +{ + ATTR_UNPACK(_vertexID, position, _EXPORTED_a_position, float2); + ATTR_UNPACK(_vertexID, uv, _EXPORTED_a_texCoord, float2); + + VARYING_INIT(v_texCoord, float2); +#ifdef _EXPORTED_ENABLE_CLIPPING + VARYING_INIT(v_clipID, half); +#endif +#ifdef _EXPORTED_ENABLE_CLIP_RECT + VARYING_INIT(v_clipRect, float4); +#endif + + float2 vertexPosition = + MUL(make_float2x2(imageDrawUniforms.viewMatrix), _EXPORTED_a_position) + imageDrawUniforms.translate; + v_texCoord = _EXPORTED_a_texCoord; +#ifdef _EXPORTED_ENABLE_CLIPPING + if (_EXPORTED_ENABLE_CLIPPING) + { + v_clipID = id_bits_to_f16(imageDrawUniforms.clipID, uniforms.pathIDGranularity); + } +#endif +#ifdef _EXPORTED_ENABLE_CLIP_RECT + if (_EXPORTED_ENABLE_CLIP_RECT) + { +#ifndef _EXPORTED_USING_DEPTH_STENCIL + v_clipRect = find_clip_rect_coverage_distances( + make_float2x2(imageDrawUniforms.clipRectInverseMatrix), + imageDrawUniforms.clipRectInverseTranslate, + vertexPosition); +#else // USING_DEPTH_STENCIL + set_clip_rect_plane_distances(make_float2x2(imageDrawUniforms.clipRectInverseMatrix), + imageDrawUniforms.clipRectInverseTranslate, + vertexPosition); +#endif // USING_DEPTH_STENCIL + } +#endif // ENABLE_CLIP_RECT + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); +#ifdef _EXPORTED_USING_DEPTH_STENCIL + pos.z = normalize_z_index(imageDrawUniforms.zIndex); +#endif + + VARYING_PACK(v_texCoord); +#ifdef _EXPORTED_ENABLE_CLIPPING + VARYING_PACK(v_clipID); +#endif +#ifdef _EXPORTED_ENABLE_CLIP_RECT + VARYING_PACK(v_clipRect); +#endif + EMIT_VERTEX(pos); +} +#endif + +#ifdef _EXPORTED_FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA8(PER_DRAW_BINDINGS_SET, IMAGE_TEXTURE_IDX, _EXPORTED_imageTexture); +#ifdef _EXPORTED_USING_DEPTH_STENCIL +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, DST_COLOR_TEXTURE_IDX, _EXPORTED_dstColorTexture); +#endif +#endif +FRAG_TEXTURE_BLOCK_END + +SAMPLER_MIPMAP(IMAGE_TEXTURE_IDX, imageSampler) + +FRAG_STORAGE_BUFFER_BLOCK_BEGIN +FRAG_STORAGE_BUFFER_BLOCK_END + +#ifndef _EXPORTED_USING_DEPTH_STENCIL + +PLS_BLOCK_BEGIN +PLS_DECL4F(COLOR_PLANE_IDX, colorBuffer); +#if defined(_EXPORTED_ENABLE_CLIPPING) || defined(_EXPORTED_PLS_IMPL_ANGLE) +PLS_DECLUI(CLIP_PLANE_IDX, clipBuffer); +#endif +PLS_DECL4F(SCRATCH_COLOR_PLANE_IDX, scratchColorBuffer); +PLS_DECLUI(COVERAGE_PLANE_IDX, coverageCountBuffer); +PLS_BLOCK_END + +PLS_MAIN_WITH_IMAGE_UNIFORMS(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_texCoord, float2); +#ifdef _EXPORTED_ENABLE_CLIPPING + VARYING_UNPACK(v_clipID, half); +#endif +#ifdef _EXPORTED_ENABLE_CLIP_RECT + VARYING_UNPACK(v_clipRect, float4); +#endif + + half4 color = TEXTURE_SAMPLE(_EXPORTED_imageTexture, imageSampler, v_texCoord); + half coverage = 1.; + +#ifdef _EXPORTED_ENABLE_CLIP_RECT + if (_EXPORTED_ENABLE_CLIP_RECT) + { + half clipRectCoverage = min_value(cast_float4_to_half4(v_clipRect)); + coverage = clamp(clipRectCoverage, make_half(.0), coverage); + } +#endif + + PLS_INTERLOCK_BEGIN; + +#ifdef _EXPORTED_ENABLE_CLIPPING + if (_EXPORTED_ENABLE_CLIPPING && v_clipID != .0) + { + half2 clipData = unpackHalf2x16(PLS_LOADUI(clipBuffer)); + half clipContentID = clipData.y; + half clipCoverage = clipContentID == v_clipID ? clipData.x : make_half(.0); + coverage = min(coverage, clipCoverage); + } +#endif + + // Blend with the framebuffer color. + color.w *= imageDrawUniforms.opacity * coverage; + half4 dstColor = PLS_LOAD4F(colorBuffer); +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND + if (_EXPORTED_ENABLE_ADVANCED_BLEND && imageDrawUniforms.blendMode != BLEND_SRC_OVER) + { + color = advanced_blend(color, + unmultiply(dstColor), + cast_uint_to_ushort(imageDrawUniforms.blendMode)); + } + else +#endif + { + color.xyz *= color.w; + color = color + dstColor * (1. - color.w); + } + + PLS_STORE4F(colorBuffer, color); +#ifdef _EXPORTED_ENABLE_CLIPPING + PLS_PRESERVE_UI(clipBuffer); +#endif + + PLS_INTERLOCK_END; + + EMIT_PLS; +} + +#else // USING_DEPTH_STENCIL + +FRAG_DATA_MAIN(half4, _EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_texCoord, float2); + + half4 color = TEXTURE_SAMPLE(_EXPORTED_imageTexture, imageSampler, v_texCoord); + color.w *= imageDrawUniforms.opacity; + +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND + if (_EXPORTED_ENABLE_ADVANCED_BLEND) + { + half4 dstColor = TEXEL_FETCH(_EXPORTED_dstColorTexture, int2(floor(_fragCoord.xy))); + color = advanced_blend(color, unmultiply(dstColor), imageDrawUniforms.blendMode); + } + else +#endif // !ENABLE_ADVANCED_BLEND + { + color = premultiply(color); + } + + EMIT_FRAG_DATA(color); +} + +#endif // USING_DEPTH_STENCIL +#endif // FRAGMENT +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_image_mesh.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_image_mesh.minified.ush new file mode 100644 index 00000000..0cc8d8ab --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_image_mesh.minified.ush @@ -0,0 +1,193 @@ +/* + * Copyright 2023 Rive + */ + +#ifdef VERTEX +ATTR_BLOCK_BEGIN(PositionAttr) +ATTR(0, float2, _EXPORTED_a_position); +ATTR_BLOCK_END + +ATTR_BLOCK_BEGIN(UVAttr) +ATTR(1, float2, _EXPORTED_a_texCoord); +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float2, v_texCoord); +#ifdef ENABLE_CLIPPING +OPTIONALLY_FLAT VARYING(1, half, v_clipID); +#endif +#ifdef ENABLE_CLIP_RECT +NO_PERSPECTIVE VARYING(2, float4, v_clipRect); +#endif +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +IMAGE_MESH_VERTEX_MAIN(_EXPORTED_drawVertexMain, PositionAttr, position, UVAttr, uv, _vertexID) +{ + ATTR_UNPACK(_vertexID, position, _EXPORTED_a_position, float2); + ATTR_UNPACK(_vertexID, uv, _EXPORTED_a_texCoord, float2); + + VARYING_INIT(v_texCoord, float2); +#ifdef ENABLE_CLIPPING + VARYING_INIT(v_clipID, half); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_INIT(v_clipRect, float4); +#endif + + float2 vertexPosition = + MUL(make_float2x2(imageDrawUniforms.viewMatrix), _EXPORTED_a_position) + imageDrawUniforms.translate; + v_texCoord = _EXPORTED_a_texCoord; +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + v_clipID = id_bits_to_f16(imageDrawUniforms.clipID, uniforms.pathIDGranularity); + } +#endif +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { +#ifndef USING_DEPTH_STENCIL + v_clipRect = find_clip_rect_coverage_distances( + make_float2x2(imageDrawUniforms.clipRectInverseMatrix), + imageDrawUniforms.clipRectInverseTranslate, + vertexPosition); +#else // USING_DEPTH_STENCIL + set_clip_rect_plane_distances(make_float2x2(imageDrawUniforms.clipRectInverseMatrix), + imageDrawUniforms.clipRectInverseTranslate, + vertexPosition); +#endif // USING_DEPTH_STENCIL + } +#endif // ENABLE_CLIP_RECT + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); +#ifdef USING_DEPTH_STENCIL + pos.z = normalize_z_index(imageDrawUniforms.zIndex); +#endif + + VARYING_PACK(v_texCoord); +#ifdef ENABLE_CLIPPING + VARYING_PACK(v_clipID); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_PACK(v_clipRect); +#endif + EMIT_VERTEX(pos); +} +#endif + +#ifdef FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA8(PER_DRAW_BINDINGS_SET, IMAGE_TEXTURE_IDX, _EXPORTED_imageTexture); +#ifdef USING_DEPTH_STENCIL +#ifdef ENABLE_ADVANCED_BLEND +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, DST_COLOR_TEXTURE_IDX, _EXPORTED_dstColorTexture); +#endif +#endif +FRAG_TEXTURE_BLOCK_END + +SAMPLER_MIPMAP(IMAGE_TEXTURE_IDX, imageSampler) + +FRAG_STORAGE_BUFFER_BLOCK_BEGIN +FRAG_STORAGE_BUFFER_BLOCK_END + +#ifndef USING_DEPTH_STENCIL + +PLS_BLOCK_BEGIN +PLS_DECL4F(COLOR_PLANE_IDX, colorBuffer); +#if defined(ENABLE_CLIPPING) || defined(PLS_IMPL_ANGLE) +PLS_DECLUI(CLIP_PLANE_IDX, clipBuffer); +#endif +PLS_DECL4F(SCRATCH_COLOR_PLANE_IDX, scratchColorBuffer); +PLS_DECLUI(COVERAGE_PLANE_IDX, coverageCountBuffer); +PLS_BLOCK_END + +PLS_MAIN_WITH_IMAGE_UNIFORMS(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_texCoord, float2); +#ifdef ENABLE_CLIPPING + VARYING_UNPACK(v_clipID, half); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_UNPACK(v_clipRect, float4); +#endif + + half4 color = TEXTURE_SAMPLE(_EXPORTED_imageTexture, imageSampler, v_texCoord); + half coverage = 1.; + +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { + half clipRectCoverage = min_value(cast_float4_to_half4(v_clipRect)); + coverage = clamp(clipRectCoverage, make_half(.0), coverage); + } +#endif + + PLS_INTERLOCK_BEGIN; + +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING && v_clipID != .0) + { + half2 clipData = unpackHalf2x16(PLS_LOADUI(clipBuffer)); + half clipContentID = clipData.y; + half clipCoverage = clipContentID == v_clipID ? clipData.x : make_half(.0); + coverage = min(coverage, clipCoverage); + } +#endif + + // Blend with the framebuffer color. + color.w *= imageDrawUniforms.opacity * coverage; + half4 dstColor = PLS_LOAD4F(colorBuffer); +#ifdef ENABLE_ADVANCED_BLEND + if (ENABLE_ADVANCED_BLEND && imageDrawUniforms.blendMode != BLEND_SRC_OVER) + { + color = advanced_blend(color, + unmultiply(dstColor), + cast_uint_to_ushort(imageDrawUniforms.blendMode)); + } + else +#endif + { + color.xyz *= color.w; + color = color + dstColor * (1. - color.w); + } + + PLS_STORE4F(colorBuffer, color); +#ifdef ENABLE_CLIPPING + PLS_PRESERVE_UI(clipBuffer); +#endif + + PLS_INTERLOCK_END; + + EMIT_PLS; +} + +#else // USING_DEPTH_STENCIL + +FRAG_DATA_MAIN(half4, _EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_texCoord, float2); + + half4 color = TEXTURE_SAMPLE(_EXPORTED_imageTexture, imageSampler, v_texCoord); + color.w *= imageDrawUniforms.opacity; + +#ifdef ENABLE_ADVANCED_BLEND + if (ENABLE_ADVANCED_BLEND) + { + half4 dstColor = TEXEL_FETCH(_EXPORTED_dstColorTexture, int2(floor(_fragCoord.xy))); + color = advanced_blend(color, unmultiply(dstColor), imageDrawUniforms.blendMode); + } + else +#endif // !ENABLE_ADVANCED_BLEND + { + color = premultiply(color); + } + + EMIT_FRAG_DATA(color); +} + +#endif // USING_DEPTH_STENCIL +#endif // FRAGMENT diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path.glsl.hpp new file mode 100644 index 00000000..6a13e110 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path.glsl.hpp @@ -0,0 +1,540 @@ +#pragma once + +#include "draw_path.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char draw_path[] = R"===(/* + * Copyright 2022 Rive + */ + +#ifdef _EXPORTED_VERTEX +ATTR_BLOCK_BEGIN(Attrs) +#ifdef _EXPORTED_DRAW_INTERIOR_TRIANGLES +ATTR(0, packed_float3, _EXPORTED_a_triangleVertex); +#else +ATTR(0, float4, _EXPORTED_a_patchVertexData); // [localVertexID, outset, fillCoverage, vertexType] +ATTR(1, float4, _EXPORTED_a_mirroredVertexData); +#endif +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float4, v_paint); +#ifndef _EXPORTED_USING_DEPTH_STENCIL +#ifdef _EXPORTED_DRAW_INTERIOR_TRIANGLES +_EXPORTED_OPTIONALLY_FLAT VARYING(1, half, v_windingWeight); +#else +NO_PERSPECTIVE VARYING(2, half2, v_edgeDistance); +#endif +_EXPORTED_OPTIONALLY_FLAT VARYING(3, half, v_pathID); +#ifdef _EXPORTED_ENABLE_CLIPPING +_EXPORTED_OPTIONALLY_FLAT VARYING(4, half, v_clipID); +#endif +#ifdef _EXPORTED_ENABLE_CLIP_RECT +NO_PERSPECTIVE VARYING(5, float4, v_clipRect); +#endif +#endif // !USING_DEPTH_STENCIL +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND +_EXPORTED_OPTIONALLY_FLAT VARYING(6, half, v_blendMode); +#endif +VARYING_BLOCK_END + +#ifdef _EXPORTED_VERTEX +VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ +#ifdef _EXPORTED_DRAW_INTERIOR_TRIANGLES + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_triangleVertex, float3); +#else + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_patchVertexData, float4); + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_mirroredVertexData, float4); +#endif + + VARYING_INIT(v_paint, float4); +#ifndef USING_DEPTH_STENCIL +#ifdef _EXPORTED_DRAW_INTERIOR_TRIANGLES + VARYING_INIT(v_windingWeight, half); +#else + VARYING_INIT(v_edgeDistance, half2); +#endif + VARYING_INIT(v_pathID, half); +#ifdef _EXPORTED_ENABLE_CLIPPING + VARYING_INIT(v_clipID, half); +#endif +#ifdef _EXPORTED_ENABLE_CLIP_RECT + VARYING_INIT(v_clipRect, float4); +#endif +#endif // !USING_DEPTH_STENCIL +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND + VARYING_INIT(v_blendMode, half); +#endif + + bool shouldDiscardVertex = false; + ushort pathID; + float2 vertexPosition; +#ifdef _EXPORTED_USING_DEPTH_STENCIL + ushort pathZIndex; +#endif + +#ifdef _EXPORTED_DRAW_INTERIOR_TRIANGLES + vertexPosition = unpack_interior_triangle_vertex(_EXPORTED_a_triangleVertex, + pathID, + v_windingWeight VERTEX_CONTEXT_UNPACK); +#else + shouldDiscardVertex = !unpack_tessellated_path_vertex(_EXPORTED_a_patchVertexData, + _EXPORTED_a_mirroredVertexData, + _instanceID, + pathID, + vertexPosition +#ifndef _EXPORTED_USING_DEPTH_STENCIL + , + v_edgeDistance +#else + , + pathZIndex +#endif + VERTEX_CONTEXT_UNPACK); +#endif // !DRAW_INTERIOR_TRIANGLES + + uint2 paintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, pathID); + +#ifndef _EXPORTED_USING_DEPTH_STENCIL + // Encode the integral pathID as a "half" that we know the hardware will see as a unique value + // in the fragment shader. + v_pathID = id_bits_to_f16(pathID, uniforms.pathIDGranularity); + + // Indicate even-odd fill rule by making pathID negative. + if ((paintData.x & PAINT_FLAG_EVEN_ODD) != 0u) + v_pathID = -v_pathID; +#endif // !USING_DEPTH_STENCIL + + uint paintType = paintData.x & 0xfu; +#ifdef _EXPORTED_ENABLE_CLIPPING + if (_EXPORTED_ENABLE_CLIPPING) + { + uint clipIDBits = (paintType == CLIP_UPDATE_PAINT_TYPE ? paintData.y : paintData.x) >> 16; + v_clipID = id_bits_to_f16(clipIDBits, uniforms.pathIDGranularity); + // Negative clipID means to update the clip buffer instead of the color buffer. + if (paintType == CLIP_UPDATE_PAINT_TYPE) + v_clipID = -v_clipID; + } +#endif +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND + if (_EXPORTED_ENABLE_ADVANCED_BLEND) + { + v_blendMode = float((paintData.x >> 4) & 0xfu); + } +#endif + + // Paint matrices operate on the fragment shader's "_fragCoord", which is bottom-up in GL. + float2 fragCoord = vertexPosition; +#ifdef FRAG_COORD_BOTTOM_UP + fragCoord.y = float(uniforms.renderTargetHeight) - fragCoord.y; +#endif + +#ifdef _EXPORTED_ENABLE_CLIP_RECT + if (_EXPORTED_ENABLE_CLIP_RECT) + { + // clipRectInverseMatrix transforms from pixel coordinates to a space where the clipRect is + // the normalized rectangle: [-1, -1, 1, 1]. + float2x2 clipRectInverseMatrix = + make_float2x2(STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 2u)); + float4 clipRectInverseTranslate = STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 3u); +#ifndef _EXPORTED_USING_DEPTH_STENCIL + v_clipRect = find_clip_rect_coverage_distances(clipRectInverseMatrix, + clipRectInverseTranslate.xy, + fragCoord); +#else // USING_DEPTH_STENCIL + set_clip_rect_plane_distances(clipRectInverseMatrix, + clipRectInverseTranslate.xy, + fragCoord); +#endif // USING_DEPTH_STENCIL + } +#endif // ENABLE_CLIP_RECT + + // Unpack the paint once we have a position. + if (paintType == SOLID_COLOR_PAINT_TYPE) + { + half4 color = unpackUnorm4x8(paintData.y); + v_paint = float4(color); + } +#ifdef _EXPORTED_ENABLE_CLIPPING + else if (_EXPORTED_ENABLE_CLIPPING && paintType == CLIP_UPDATE_PAINT_TYPE) + { + half outerClipID = id_bits_to_f16(paintData.x >> 16, uniforms.pathIDGranularity); + v_paint = float4(outerClipID, 0, 0, 0); + } +#endif + else + { + float2x2 paintMatrix = make_float2x2(STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u)); + float4 paintTranslate = STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 1u); + float2 paintCoord = MUL(paintMatrix, fragCoord) + paintTranslate.xy; + if (paintType == LINEAR_GRADIENT_PAINT_TYPE || paintType == RADIAL_GRADIENT_PAINT_TYPE) + { + // v_paint.a contains "-row" of the gradient ramp at texel center, in normalized space. + v_paint.w = -uintBitsToFloat(paintData.y); + // abs(v_paint.b) contains either: + // - 2 if the gradient ramp spans an entire row. + // - x0 of the gradient ramp in normalized space, if it's a simple 2-texel ramp. + if (paintTranslate.z > .9) // paintTranslate.z is either ~1 or ~1/GRAD_TEXTURE_WIDTH. + { + // Complex ramps span an entire row. Set it to 2 to convey this. + v_paint.z = 2.; + } + else + { + // This is a simple ramp. + v_paint.z = paintTranslate.w; + } + if (paintType == LINEAR_GRADIENT_PAINT_TYPE) + { + // The paint is a linear gradient. + v_paint.y = .0; + v_paint.x = paintCoord.x; + } + else + { + // The paint is a radial gradient. Mark v_paint.b negative to indicate this to the + // fragment shader. (v_paint.b can't be zero because the gradient ramp is aligned on + // pixel centers, so negating it will always produce a negative number.) + v_paint.z = -v_paint.z; + v_paint.xy = paintCoord.xy; + } + } + else // IMAGE_PAINT_TYPE + { + // v_paint.a <= -1. signals that the paint is an image. + // v_paint.b is the image opacity. + // v_paint.rg is the normalized image texture coordinate (built into the paintMatrix). + float opacity = uintBitsToFloat(paintData.y); + v_paint = float4(paintCoord.x, paintCoord.y, opacity, -2.); + } + } + + float4 pos; + if (!shouldDiscardVertex) + { + pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); +#ifdef _EXPORTED_USING_DEPTH_STENCIL + pos.z = normalize_z_index(pathZIndex); +#endif + } + else + { + pos = float4(uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue); + } + + VARYING_PACK(v_paint); +#ifndef _EXPORTED_USING_DEPTH_STENCIL +#ifdef _EXPORTED_DRAW_INTERIOR_TRIANGLES + VARYING_PACK(v_windingWeight); +#else + VARYING_PACK(v_edgeDistance); +#endif + VARYING_PACK(v_pathID); +#ifdef _EXPORTED_ENABLE_CLIPPING + VARYING_PACK(v_clipID); +#endif +#ifdef _EXPORTED_ENABLE_CLIP_RECT + VARYING_PACK(v_clipRect); +#endif +#endif // !USING_DEPTH_STENCIL +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND + VARYING_PACK(v_blendMode); +#endif + EMIT_VERTEX(pos); +} +#endif + +#ifdef _EXPORTED_FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, GRAD_TEXTURE_IDX, _EXPORTED_gradTexture); +TEXTURE_RGBA8(PER_DRAW_BINDINGS_SET, IMAGE_TEXTURE_IDX, _EXPORTED_imageTexture); +#ifdef _EXPORTED_USING_DEPTH_STENCIL +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, DST_COLOR_TEXTURE_IDX, _EXPORTED_dstColorTexture); +#endif +#endif +FRAG_TEXTURE_BLOCK_END + +SAMPLER_LINEAR(GRAD_TEXTURE_IDX, gradSampler) +SAMPLER_MIPMAP(IMAGE_TEXTURE_IDX, imageSampler) + +FRAG_STORAGE_BUFFER_BLOCK_BEGIN +FRAG_STORAGE_BUFFER_BLOCK_END + +INLINE half4 find_paint_color(float4 paint +#ifdef _EXPORTED_TARGET_VULKAN + , + float2 imagePaintDDX, + float2 imagePaintDDY +#endif + FRAGMENT_CONTEXT_DECL) +{ + if (paint.w >= .0) // Is the paint a solid color? + { + return cast_float4_to_half4(paint); + } + else if (paint.w > -1.) // Is paint is a gradient (linear or radial)? + { + float t = paint.z > .0 ? /*linear*/ paint.x : /*radial*/ length(paint.xy); + t = clamp(t, .0, 1.); + float span = abs(paint.z); + float x = span > 1. ? /*entire row*/ (1. - 1. / GRAD_TEXTURE_WIDTH) * t + + (.5 / GRAD_TEXTURE_WIDTH) + : /*two texels*/ (1. / GRAD_TEXTURE_WIDTH) * t + span; + float row = -paint.w; + // Our gradient texture is not mipmapped. Issue a texture-sample that explicitly does not + // find derivatives for LOD computation (by specifying derivatives directly). + return TEXTURE_SAMPLE_LOD(_EXPORTED_gradTexture, gradSampler, float2(x, row), .0); + } + else // The paint is an image. + { + half4 color; +#ifdef _EXPORTED_TARGET_VULKAN + // Vulkan validators require explicit derivatives when sampling a texture in + // "non-uniform" control flow. See above. + color = TEXTURE_SAMPLE_GRAD(_EXPORTED_imageTexture, + imageSampler, + paint.xy, + imagePaintDDX, + imagePaintDDY); +#else + color = TEXTURE_SAMPLE(_EXPORTED_imageTexture, imageSampler, paint.xy); +#endif + color.w *= paint.z; // paint.b holds the opacity of the image. + return color; + } +} + +#ifndef _EXPORTED_USING_DEPTH_STENCIL + +PLS_BLOCK_BEGIN +PLS_DECL4F(COLOR_PLANE_IDX, colorBuffer); +#if defined(_EXPORTED_ENABLE_CLIPPING) || defined(_EXPORTED_PLS_IMPL_ANGLE) +PLS_DECLUI(CLIP_PLANE_IDX, clipBuffer); +#endif +PLS_DECL4F(SCRATCH_COLOR_PLANE_IDX, scratchColorBuffer); +PLS_DECLUI(COVERAGE_PLANE_IDX, coverageCountBuffer); +PLS_BLOCK_END + +PLS_MAIN(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_paint, float4); +#ifdef _EXPORTED_DRAW_INTERIOR_TRIANGLES + VARYING_UNPACK(v_windingWeight, half); +#else + VARYING_UNPACK(v_edgeDistance, half2); +#endif + VARYING_UNPACK(v_pathID, half); +#ifdef _EXPORTED_ENABLE_CLIPPING + VARYING_UNPACK(v_clipID, half); +#endif +#ifdef _EXPORTED_ENABLE_CLIP_RECT + VARYING_UNPACK(v_clipRect, float4); +#endif +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND + VARYING_UNPACK(v_blendMode, half); +#endif + +#ifdef _EXPORTED_TARGET_VULKAN + // Strict validators require derivatives (i.e., for a mipmapped texture sample) to be computed + // within uniform control flow. + // Our control flow for texture sampling is uniform for an entire triangle, so we're fine, but + // the validators don't know this. + // If this might be a problem (e.g., for WebGPU), just find the potential image paint + // derivatives here. + float2 imagePaintDDX = dFdx(v_paint.xy); + float2 imagePaintDDY = dFdy(v_paint.xy); +#endif + +#ifndef _EXPORTED_DRAW_INTERIOR_TRIANGLES + // Interior triangles don't overlap, so don't need raster ordering. + PLS_INTERLOCK_BEGIN; +#endif + + half2 coverageData = unpackHalf2x16(PLS_LOADUI(coverageCountBuffer)); + half coverageBufferID = coverageData.y; + half coverageCount = coverageBufferID == v_pathID ? coverageData.x : make_half(.0); + +#ifdef _EXPORTED_DRAW_INTERIOR_TRIANGLES + coverageCount += v_windingWeight; +#else + if (v_edgeDistance.y >= .0) // Stroke. + coverageCount = max(min(v_edgeDistance.x, v_edgeDistance.y), coverageCount); + else // Fill. (Back-face culling ensures v_edgeDistance.x is appropriately signed.) + coverageCount += v_edgeDistance.x; + + // Save the updated coverage. + PLS_STOREUI(coverageCountBuffer, packHalf2x16(make_half2(coverageCount, v_pathID))); +#endif + + // Convert coverageCount to coverage. + half coverage = abs(coverageCount); +#ifdef _EXPORTED_ENABLE_EVEN_ODD + if (_EXPORTED_ENABLE_EVEN_ODD && v_pathID < .0 /*even-odd*/) + { + coverage = 1. - make_half(abs(fract(coverage * .5) * 2. + -1.)); + } +#endif + coverage = min(coverage, make_half(1.)); // This also caps stroke coverage, which can be >1. + +#ifdef _EXPORTED_ENABLE_CLIPPING + if (_EXPORTED_ENABLE_CLIPPING && v_clipID < .0) // Update the clip buffer. + { + half clipID = -v_clipID; +#ifdef _EXPORTED_ENABLE_NESTED_CLIPPING + if (_EXPORTED_ENABLE_NESTED_CLIPPING) + { + half outerClipID = v_paint.x; + if (outerClipID != .0) + { + // This is a nested clip. Intersect coverage with the enclosing clip (outerClipID). + half2 clipData = unpackHalf2x16(PLS_LOADUI(clipBuffer)); + half clipContentID = clipData.y; + half outerClipCoverage; + if (clipContentID != clipID) + { + // First hit: either clipBuffer contains outerClipCoverage, or this pixel is not + // inside the outer clip and outerClipCoverage is zero. + outerClipCoverage = clipContentID == outerClipID ? clipData.x : .0; +#ifndef _EXPORTED_DRAW_INTERIOR_TRIANGLES + // Stash outerClipCoverage before overwriting clipBuffer, in case we hit this + // pixel again and need it. (Not necessary when drawing interior triangles + // because they always go last and don't overlap.) + PLS_STORE4F(scratchColorBuffer, make_half4(outerClipCoverage, .0, .0, .0)); +#endif + } + else + { + // Subsequent hit: outerClipCoverage is stashed in scratchColorBuffer. + outerClipCoverage = PLS_LOAD4F(scratchColorBuffer).x; +#ifndef _EXPORTED_DRAW_INTERIOR_TRIANGLES + // Since interior triangles are always last, there's no need to preserve this + // value. + PLS_PRESERVE_4F(scratchColorBuffer); +#endif + } + coverage = min(coverage, outerClipCoverage); + } + } +#endif // @ENABLE_NESTED_CLIPPING + PLS_STOREUI(clipBuffer, packHalf2x16(make_half2(coverage, clipID))); + PLS_PRESERVE_4F(colorBuffer); + } + else // Render to the main framebuffer. +#endif // @ENABLE_CLIPPING + { +#ifdef _EXPORTED_ENABLE_CLIPPING + if (_EXPORTED_ENABLE_CLIPPING) + { + // Apply the clip. + if (v_clipID != .0) + { + // Clip IDs are not necessarily drawn in monotonically increasing order, so always + // check exact equality of the clipID. + half2 clipData = unpackHalf2x16(PLS_LOADUI(clipBuffer)); + half clipContentID = clipData.y; + coverage = (clipContentID == v_clipID) ? min(clipData.x, coverage) : make_half(.0); + } + PLS_PRESERVE_UI(clipBuffer); + } +#endif +#ifdef _EXPORTED_ENABLE_CLIP_RECT + if (_EXPORTED_ENABLE_CLIP_RECT) + { + half clipRectCoverage = min_value(cast_float4_to_half4(v_clipRect)); + coverage = clamp(clipRectCoverage, make_half(.0), coverage); + } +#endif // ENABLE_CLIP_RECT + + half4 color = find_paint_color(v_paint +#ifdef _EXPORTED_TARGET_VULKAN + , + imagePaintDDX, + imagePaintDDY +#endif + FRAGMENT_CONTEXT_UNPACK); + color.w *= coverage; + + half4 dstColor; + if (coverageBufferID != v_pathID) + { + // This is the first fragment from pathID to touch this pixel. + dstColor = PLS_LOAD4F(colorBuffer); +#ifndef _EXPORTED_DRAW_INTERIOR_TRIANGLES + // We don't need to store coverage when drawing interior triangles because they always + // go last and don't overlap, so every fragment is the final one in the path. + PLS_STORE4F(scratchColorBuffer, dstColor); +#endif + } + else + { + dstColor = PLS_LOAD4F(scratchColorBuffer); +#ifndef _EXPORTED_DRAW_INTERIOR_TRIANGLES + // Since interior triangles are always last, there's no need to preserve this value. + PLS_PRESERVE_4F(scratchColorBuffer); +#endif + } + + // Blend with the framebuffer color. +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND + if (_EXPORTED_ENABLE_ADVANCED_BLEND && v_blendMode != cast_uint_to_half(BLEND_SRC_OVER)) + { + color = advanced_blend(color, unmultiply(dstColor), cast_half_to_ushort(v_blendMode)); + } + else +#endif + { + color.xyz *= color.w; + color = color + dstColor * (1. - color.w); + } + + PLS_STORE4F(colorBuffer, color); + } + +#ifndef _EXPORTED_DRAW_INTERIOR_TRIANGLES + // Interior triangles don't overlap, so don't need raster ordering. + PLS_INTERLOCK_END; +#endif + + EMIT_PLS; +} + +#else // USING_DEPTH_STENCIL + +FRAG_DATA_MAIN(half4, _EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_paint, float4); +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND + VARYING_UNPACK(v_blendMode, half); +#endif + + half4 color = find_paint_color(v_paint); + +#ifdef _EXPORTED_ENABLE_ADVANCED_BLEND + if (_EXPORTED_ENABLE_ADVANCED_BLEND) + { + half4 dstColor = TEXEL_FETCH(_EXPORTED_dstColorTexture, int2(floor(_fragCoord.xy))); + color = advanced_blend(color, unmultiply(dstColor), cast_half_to_ushort(v_blendMode)); + } + else +#endif // !ENABLE_ADVANCED_BLEND + { + color = premultiply(color); + } + EMIT_FRAG_DATA(color); +} + +#endif // !USING_DEPTH_STENCIL + +#endif // FRAGMENT +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path.minified.ush new file mode 100644 index 00000000..1ba9e40d --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path.minified.ush @@ -0,0 +1,529 @@ +/* + * Copyright 2022 Rive + */ + +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +#ifdef DRAW_INTERIOR_TRIANGLES +ATTR(0, packed_float3, _EXPORTED_a_triangleVertex); +#else +ATTR(0, float4, _EXPORTED_a_patchVertexData); // [localVertexID, outset, fillCoverage, vertexType] +ATTR(1, float4, _EXPORTED_a_mirroredVertexData); +#endif +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float4, v_paint); +#ifndef USING_DEPTH_STENCIL +#ifdef DRAW_INTERIOR_TRIANGLES +OPTIONALLY_FLAT VARYING(1, half, v_windingWeight); +#else +NO_PERSPECTIVE VARYING(2, half2, v_edgeDistance); +#endif +OPTIONALLY_FLAT VARYING(3, half, v_pathID); +#ifdef ENABLE_CLIPPING +OPTIONALLY_FLAT VARYING(4, half, v_clipID); +#endif +#ifdef ENABLE_CLIP_RECT +NO_PERSPECTIVE VARYING(5, float4, v_clipRect); +#endif +#endif // !USING_DEPTH_STENCIL +#ifdef ENABLE_ADVANCED_BLEND +OPTIONALLY_FLAT VARYING(6, half, v_blendMode); +#endif +VARYING_BLOCK_END + +#ifdef VERTEX +VERTEX_MAIN(_EXPORTED_drawVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ +#ifdef DRAW_INTERIOR_TRIANGLES + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_triangleVertex, float3); +#else + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_patchVertexData, float4); + ATTR_UNPACK(_vertexID, attrs, _EXPORTED_a_mirroredVertexData, float4); +#endif + + VARYING_INIT(v_paint, float4); +#ifndef USING_DEPTH_STENCIL +#ifdef DRAW_INTERIOR_TRIANGLES + VARYING_INIT(v_windingWeight, half); +#else + VARYING_INIT(v_edgeDistance, half2); +#endif + VARYING_INIT(v_pathID, half); +#ifdef ENABLE_CLIPPING + VARYING_INIT(v_clipID, half); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_INIT(v_clipRect, float4); +#endif +#endif // !USING_DEPTH_STENCIL +#ifdef ENABLE_ADVANCED_BLEND + VARYING_INIT(v_blendMode, half); +#endif + + bool shouldDiscardVertex = false; + ushort pathID; + float2 vertexPosition; +#ifdef USING_DEPTH_STENCIL + ushort pathZIndex; +#endif + +#ifdef DRAW_INTERIOR_TRIANGLES + vertexPosition = unpack_interior_triangle_vertex(_EXPORTED_a_triangleVertex, + pathID, + v_windingWeight VERTEX_CONTEXT_UNPACK); +#else + shouldDiscardVertex = !unpack_tessellated_path_vertex(_EXPORTED_a_patchVertexData, + _EXPORTED_a_mirroredVertexData, + _instanceID, + pathID, + vertexPosition +#ifndef USING_DEPTH_STENCIL + , + v_edgeDistance +#else + , + pathZIndex +#endif + VERTEX_CONTEXT_UNPACK); +#endif // !DRAW_INTERIOR_TRIANGLES + + uint2 paintData = STORAGE_BUFFER_LOAD2(_EXPORTED_paintBuffer, pathID); + +#ifndef USING_DEPTH_STENCIL + // Encode the integral pathID as a "half" that we know the hardware will see as a unique value + // in the fragment shader. + v_pathID = id_bits_to_f16(pathID, uniforms.pathIDGranularity); + + // Indicate even-odd fill rule by making pathID negative. + if ((paintData.x & PAINT_FLAG_EVEN_ODD) != 0u) + v_pathID = -v_pathID; +#endif // !USING_DEPTH_STENCIL + + uint paintType = paintData.x & 0xfu; +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + uint clipIDBits = (paintType == CLIP_UPDATE_PAINT_TYPE ? paintData.y : paintData.x) >> 16; + v_clipID = id_bits_to_f16(clipIDBits, uniforms.pathIDGranularity); + // Negative clipID means to update the clip buffer instead of the color buffer. + if (paintType == CLIP_UPDATE_PAINT_TYPE) + v_clipID = -v_clipID; + } +#endif +#ifdef ENABLE_ADVANCED_BLEND + if (ENABLE_ADVANCED_BLEND) + { + v_blendMode = float((paintData.x >> 4) & 0xfu); + } +#endif + + // Paint matrices operate on the fragment shader's "_fragCoord", which is bottom-up in GL. + float2 fragCoord = vertexPosition; +#ifdef FRAG_COORD_BOTTOM_UP + fragCoord.y = float(uniforms.renderTargetHeight) - fragCoord.y; +#endif + +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { + // clipRectInverseMatrix transforms from pixel coordinates to a space where the clipRect is + // the normalized rectangle: [-1, -1, 1, 1]. + float2x2 clipRectInverseMatrix = + make_float2x2(STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 2u)); + float4 clipRectInverseTranslate = STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 3u); +#ifndef USING_DEPTH_STENCIL + v_clipRect = find_clip_rect_coverage_distances(clipRectInverseMatrix, + clipRectInverseTranslate.xy, + fragCoord); +#else // USING_DEPTH_STENCIL + set_clip_rect_plane_distances(clipRectInverseMatrix, + clipRectInverseTranslate.xy, + fragCoord); +#endif // USING_DEPTH_STENCIL + } +#endif // ENABLE_CLIP_RECT + + // Unpack the paint once we have a position. + if (paintType == SOLID_COLOR_PAINT_TYPE) + { + half4 color = unpackUnorm4x8(paintData.y); + v_paint = float4(color); + } +#ifdef ENABLE_CLIPPING + else if (ENABLE_CLIPPING && paintType == CLIP_UPDATE_PAINT_TYPE) + { + half outerClipID = id_bits_to_f16(paintData.x >> 16, uniforms.pathIDGranularity); + v_paint = float4(outerClipID, 0, 0, 0); + } +#endif + else + { + float2x2 paintMatrix = make_float2x2(STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u)); + float4 paintTranslate = STORAGE_BUFFER_LOAD4(_EXPORTED_paintAuxBuffer, pathID * 4u + 1u); + float2 paintCoord = MUL(paintMatrix, fragCoord) + paintTranslate.xy; + if (paintType == LINEAR_GRADIENT_PAINT_TYPE || paintType == RADIAL_GRADIENT_PAINT_TYPE) + { + // v_paint.a contains "-row" of the gradient ramp at texel center, in normalized space. + v_paint.w = -uintBitsToFloat(paintData.y); + // abs(v_paint.b) contains either: + // - 2 if the gradient ramp spans an entire row. + // - x0 of the gradient ramp in normalized space, if it's a simple 2-texel ramp. + if (paintTranslate.z > .9) // paintTranslate.z is either ~1 or ~1/GRAD_TEXTURE_WIDTH. + { + // Complex ramps span an entire row. Set it to 2 to convey this. + v_paint.z = 2.; + } + else + { + // This is a simple ramp. + v_paint.z = paintTranslate.w; + } + if (paintType == LINEAR_GRADIENT_PAINT_TYPE) + { + // The paint is a linear gradient. + v_paint.y = .0; + v_paint.x = paintCoord.x; + } + else + { + // The paint is a radial gradient. Mark v_paint.b negative to indicate this to the + // fragment shader. (v_paint.b can't be zero because the gradient ramp is aligned on + // pixel centers, so negating it will always produce a negative number.) + v_paint.z = -v_paint.z; + v_paint.xy = paintCoord.xy; + } + } + else // IMAGE_PAINT_TYPE + { + // v_paint.a <= -1. signals that the paint is an image. + // v_paint.b is the image opacity. + // v_paint.rg is the normalized image texture coordinate (built into the paintMatrix). + float opacity = uintBitsToFloat(paintData.y); + v_paint = float4(paintCoord.x, paintCoord.y, opacity, -2.); + } + } + + float4 pos; + if (!shouldDiscardVertex) + { + pos = RENDER_TARGET_COORD_TO_CLIP_COORD(vertexPosition); +#ifdef USING_DEPTH_STENCIL + pos.z = normalize_z_index(pathZIndex); +#endif + } + else + { + pos = float4(uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue, + uniforms.vertexDiscardValue); + } + + VARYING_PACK(v_paint); +#ifndef USING_DEPTH_STENCIL +#ifdef DRAW_INTERIOR_TRIANGLES + VARYING_PACK(v_windingWeight); +#else + VARYING_PACK(v_edgeDistance); +#endif + VARYING_PACK(v_pathID); +#ifdef ENABLE_CLIPPING + VARYING_PACK(v_clipID); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_PACK(v_clipRect); +#endif +#endif // !USING_DEPTH_STENCIL +#ifdef ENABLE_ADVANCED_BLEND + VARYING_PACK(v_blendMode); +#endif + EMIT_VERTEX(pos); +} +#endif + +#ifdef FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, GRAD_TEXTURE_IDX, _EXPORTED_gradTexture); +TEXTURE_RGBA8(PER_DRAW_BINDINGS_SET, IMAGE_TEXTURE_IDX, _EXPORTED_imageTexture); +#ifdef USING_DEPTH_STENCIL +#ifdef ENABLE_ADVANCED_BLEND +TEXTURE_RGBA8(PER_FLUSH_BINDINGS_SET, DST_COLOR_TEXTURE_IDX, _EXPORTED_dstColorTexture); +#endif +#endif +FRAG_TEXTURE_BLOCK_END + +SAMPLER_LINEAR(GRAD_TEXTURE_IDX, gradSampler) +SAMPLER_MIPMAP(IMAGE_TEXTURE_IDX, imageSampler) + +FRAG_STORAGE_BUFFER_BLOCK_BEGIN +FRAG_STORAGE_BUFFER_BLOCK_END + +INLINE half4 find_paint_color(float4 paint +#ifdef TARGET_VULKAN + , + float2 imagePaintDDX, + float2 imagePaintDDY +#endif + FRAGMENT_CONTEXT_DECL) +{ + if (paint.w >= .0) // Is the paint a solid color? + { + return cast_float4_to_half4(paint); + } + else if (paint.w > -1.) // Is paint is a gradient (linear or radial)? + { + float t = paint.z > .0 ? /*linear*/ paint.x : /*radial*/ length(paint.xy); + t = clamp(t, .0, 1.); + float span = abs(paint.z); + float x = span > 1. ? /*entire row*/ (1. - 1. / GRAD_TEXTURE_WIDTH) * t + + (.5 / GRAD_TEXTURE_WIDTH) + : /*two texels*/ (1. / GRAD_TEXTURE_WIDTH) * t + span; + float row = -paint.w; + // Our gradient texture is not mipmapped. Issue a texture-sample that explicitly does not + // find derivatives for LOD computation (by specifying derivatives directly). + return TEXTURE_SAMPLE_LOD(_EXPORTED_gradTexture, gradSampler, float2(x, row), .0); + } + else // The paint is an image. + { + half4 color; +#ifdef TARGET_VULKAN + // Vulkan validators require explicit derivatives when sampling a texture in + // "non-uniform" control flow. See above. + color = TEXTURE_SAMPLE_GRAD(_EXPORTED_imageTexture, + imageSampler, + paint.xy, + imagePaintDDX, + imagePaintDDY); +#else + color = TEXTURE_SAMPLE(_EXPORTED_imageTexture, imageSampler, paint.xy); +#endif + color.w *= paint.z; // paint.b holds the opacity of the image. + return color; + } +} + +#ifndef USING_DEPTH_STENCIL + +PLS_BLOCK_BEGIN +PLS_DECL4F(COLOR_PLANE_IDX, colorBuffer); +#if defined(ENABLE_CLIPPING) || defined(PLS_IMPL_ANGLE) +PLS_DECLUI(CLIP_PLANE_IDX, clipBuffer); +#endif +PLS_DECL4F(SCRATCH_COLOR_PLANE_IDX, scratchColorBuffer); +PLS_DECLUI(COVERAGE_PLANE_IDX, coverageCountBuffer); +PLS_BLOCK_END + +PLS_MAIN(_EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_paint, float4); +#ifdef DRAW_INTERIOR_TRIANGLES + VARYING_UNPACK(v_windingWeight, half); +#else + VARYING_UNPACK(v_edgeDistance, half2); +#endif + VARYING_UNPACK(v_pathID, half); +#ifdef ENABLE_CLIPPING + VARYING_UNPACK(v_clipID, half); +#endif +#ifdef ENABLE_CLIP_RECT + VARYING_UNPACK(v_clipRect, float4); +#endif +#ifdef ENABLE_ADVANCED_BLEND + VARYING_UNPACK(v_blendMode, half); +#endif + +#ifdef TARGET_VULKAN + // Strict validators require derivatives (i.e., for a mipmapped texture sample) to be computed + // within uniform control flow. + // Our control flow for texture sampling is uniform for an entire triangle, so we're fine, but + // the validators don't know this. + // If this might be a problem (e.g., for WebGPU), just find the potential image paint + // derivatives here. + float2 imagePaintDDX = dFdx(v_paint.xy); + float2 imagePaintDDY = dFdy(v_paint.xy); +#endif + +#ifndef DRAW_INTERIOR_TRIANGLES + // Interior triangles don't overlap, so don't need raster ordering. + PLS_INTERLOCK_BEGIN; +#endif + + half2 coverageData = unpackHalf2x16(PLS_LOADUI(coverageCountBuffer)); + half coverageBufferID = coverageData.y; + half coverageCount = coverageBufferID == v_pathID ? coverageData.x : make_half(.0); + +#ifdef DRAW_INTERIOR_TRIANGLES + coverageCount += v_windingWeight; +#else + if (v_edgeDistance.y >= .0) // Stroke. + coverageCount = max(min(v_edgeDistance.x, v_edgeDistance.y), coverageCount); + else // Fill. (Back-face culling ensures v_edgeDistance.x is appropriately signed.) + coverageCount += v_edgeDistance.x; + + // Save the updated coverage. + PLS_STOREUI(coverageCountBuffer, packHalf2x16(make_half2(coverageCount, v_pathID))); +#endif + + // Convert coverageCount to coverage. + half coverage = abs(coverageCount); +#ifdef ENABLE_EVEN_ODD + if (ENABLE_EVEN_ODD && v_pathID < .0 /*even-odd*/) + { + coverage = 1. - make_half(abs(fract(coverage * .5) * 2. + -1.)); + } +#endif + coverage = min(coverage, make_half(1.)); // This also caps stroke coverage, which can be >1. + +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING && v_clipID < .0) // Update the clip buffer. + { + half clipID = -v_clipID; +#ifdef ENABLE_NESTED_CLIPPING + if (ENABLE_NESTED_CLIPPING) + { + half outerClipID = v_paint.x; + if (outerClipID != .0) + { + // This is a nested clip. Intersect coverage with the enclosing clip (outerClipID). + half2 clipData = unpackHalf2x16(PLS_LOADUI(clipBuffer)); + half clipContentID = clipData.y; + half outerClipCoverage; + if (clipContentID != clipID) + { + // First hit: either clipBuffer contains outerClipCoverage, or this pixel is not + // inside the outer clip and outerClipCoverage is zero. + outerClipCoverage = clipContentID == outerClipID ? clipData.x : .0; +#ifndef DRAW_INTERIOR_TRIANGLES + // Stash outerClipCoverage before overwriting clipBuffer, in case we hit this + // pixel again and need it. (Not necessary when drawing interior triangles + // because they always go last and don't overlap.) + PLS_STORE4F(scratchColorBuffer, make_half4(outerClipCoverage, .0, .0, .0)); +#endif + } + else + { + // Subsequent hit: outerClipCoverage is stashed in scratchColorBuffer. + outerClipCoverage = PLS_LOAD4F(scratchColorBuffer).x; +#ifndef DRAW_INTERIOR_TRIANGLES + // Since interior triangles are always last, there's no need to preserve this + // value. + PLS_PRESERVE_4F(scratchColorBuffer); +#endif + } + coverage = min(coverage, outerClipCoverage); + } + } +#endif // @ENABLE_NESTED_CLIPPING + PLS_STOREUI(clipBuffer, packHalf2x16(make_half2(coverage, clipID))); + PLS_PRESERVE_4F(colorBuffer); + } + else // Render to the main framebuffer. +#endif // @ENABLE_CLIPPING + { +#ifdef ENABLE_CLIPPING + if (ENABLE_CLIPPING) + { + // Apply the clip. + if (v_clipID != .0) + { + // Clip IDs are not necessarily drawn in monotonically increasing order, so always + // check exact equality of the clipID. + half2 clipData = unpackHalf2x16(PLS_LOADUI(clipBuffer)); + half clipContentID = clipData.y; + coverage = (clipContentID == v_clipID) ? min(clipData.x, coverage) : make_half(.0); + } + PLS_PRESERVE_UI(clipBuffer); + } +#endif +#ifdef ENABLE_CLIP_RECT + if (ENABLE_CLIP_RECT) + { + half clipRectCoverage = min_value(cast_float4_to_half4(v_clipRect)); + coverage = clamp(clipRectCoverage, make_half(.0), coverage); + } +#endif // ENABLE_CLIP_RECT + + half4 color = find_paint_color(v_paint +#ifdef TARGET_VULKAN + , + imagePaintDDX, + imagePaintDDY +#endif + FRAGMENT_CONTEXT_UNPACK); + color.w *= coverage; + + half4 dstColor; + if (coverageBufferID != v_pathID) + { + // This is the first fragment from pathID to touch this pixel. + dstColor = PLS_LOAD4F(colorBuffer); +#ifndef DRAW_INTERIOR_TRIANGLES + // We don't need to store coverage when drawing interior triangles because they always + // go last and don't overlap, so every fragment is the final one in the path. + PLS_STORE4F(scratchColorBuffer, dstColor); +#endif + } + else + { + dstColor = PLS_LOAD4F(scratchColorBuffer); +#ifndef DRAW_INTERIOR_TRIANGLES + // Since interior triangles are always last, there's no need to preserve this value. + PLS_PRESERVE_4F(scratchColorBuffer); +#endif + } + + // Blend with the framebuffer color. +#ifdef ENABLE_ADVANCED_BLEND + if (ENABLE_ADVANCED_BLEND && v_blendMode != cast_uint_to_half(BLEND_SRC_OVER)) + { + color = advanced_blend(color, unmultiply(dstColor), cast_half_to_ushort(v_blendMode)); + } + else +#endif + { + color.xyz *= color.w; + color = color + dstColor * (1. - color.w); + } + + PLS_STORE4F(colorBuffer, color); + } + +#ifndef DRAW_INTERIOR_TRIANGLES + // Interior triangles don't overlap, so don't need raster ordering. + PLS_INTERLOCK_END; +#endif + + EMIT_PLS; +} + +#else // USING_DEPTH_STENCIL + +FRAG_DATA_MAIN(half4, _EXPORTED_drawFragmentMain) +{ + VARYING_UNPACK(v_paint, float4); +#ifdef ENABLE_ADVANCED_BLEND + VARYING_UNPACK(v_blendMode, half); +#endif + + half4 color = find_paint_color(v_paint); + +#ifdef ENABLE_ADVANCED_BLEND + if (ENABLE_ADVANCED_BLEND) + { + half4 dstColor = TEXEL_FETCH(_EXPORTED_dstColorTexture, int2(floor(_fragCoord.xy))); + color = advanced_blend(color, unmultiply(dstColor), cast_half_to_ushort(v_blendMode)); + } + else +#endif // !ENABLE_ADVANCED_BLEND + { + color = premultiply(color); + } + EMIT_FRAG_DATA(color); +} + +#endif // !USING_DEPTH_STENCIL + +#endif // FRAGMENT diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path_common.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path_common.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path_common.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path_common.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path_common.glsl.hpp new file mode 100644 index 00000000..8d4f0c8b --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path_common.glsl.hpp @@ -0,0 +1,312 @@ +#pragma once + +#include "draw_path_common.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char draw_path_common[] = R"===(/* + * Copyright 2023 Rive + */ + +// Common functions shared by draw shaders. + +#ifdef _EXPORTED_VERTEX + +VERTEX_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA32UI(PER_FLUSH_BINDINGS_SET, TESS_VERTEX_TEXTURE_IDX, _EXPORTED_tessVertexTexture); +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +STORAGE_BUFFER_U32x4(PATH_BUFFER_IDX, PathBuffer, _EXPORTED_pathBuffer); +STORAGE_BUFFER_U32x2(PAINT_BUFFER_IDX, PaintBuffer, _EXPORTED_paintBuffer); +STORAGE_BUFFER_F32x4(PAINT_AUX_BUFFER_IDX, PaintAuxBuffer, _EXPORTED_paintAuxBuffer); +STORAGE_BUFFER_U32x4(CONTOUR_BUFFER_IDX, ContourBuffer, _EXPORTED_contourBuffer); +VERTEX_STORAGE_BUFFER_BLOCK_END + +#ifdef _EXPORTED_DRAW_PATH +INLINE int2 tess_texel_coord(int texelIndex) +{ + return int2(texelIndex & ((1 << TESS_TEXTURE_WIDTH_LOG2) - 1), + texelIndex >> TESS_TEXTURE_WIDTH_LOG2); +} + +INLINE float manhattan_pixel_width(float2x2 M, float2 normalized) +{ + + float2 v = MUL(M, normalized); + return (abs(v.x) + abs(v.y)) * (1. / dot(v, v)); +} + +INLINE bool unpack_tessellated_path_vertex(float4 patchVertexData, + float4 mirroredVertexData, + int _instanceID, + OUT(ushort) o_pathID, + OUT(float2) o_vertexPosition +#ifndef _EXPORTED_USING_DEPTH_STENCIL + , + OUT(half2) o_edgeDistance +#else + , + OUT(ushort) o_pathZIndex +#endif + VERTEX_CONTEXT_DECL) +{ + // Unpack patchVertexData. + int localVertexID = int(patchVertexData.x); + float outset = patchVertexData.y; + float fillCoverage = patchVertexData.z; + int patchSegmentSpan = floatBitsToInt(patchVertexData.w) >> 2; + int vertexType = floatBitsToInt(patchVertexData.w) & 3; + + // Fetch a vertex that definitely belongs to the contour we're drawing. + int vertexIDOnContour = min(localVertexID, patchSegmentSpan - 1); + int tessVertexIdx = _instanceID * patchSegmentSpan + vertexIDOnContour; + uint4 tessVertexData = TEXEL_FETCH(_EXPORTED_tessVertexTexture, tess_texel_coord(tessVertexIdx)); + uint contourIDWithFlags = tessVertexData.w; + + // Fetch and unpack the contour referenced by the tessellation vertex. + uint4 contourData = STORAGE_BUFFER_LOAD4(_EXPORTED_contourBuffer, contour_data_idx(contourIDWithFlags)); + float2 midpoint = uintBitsToFloat(contourData.xy); + o_pathID = cast_uint_to_ushort(contourData.z & 0xffffu); + uint vertexIndex0 = contourData.w; + + // Fetch and unpack the path. + float2x2 M = make_float2x2(uintBitsToFloat(STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, o_pathID * 2u))); + uint4 pathData = STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, o_pathID * 2u + 1u); + float2 translate = uintBitsToFloat(pathData.xy); + + float strokeRadius = uintBitsToFloat(pathData.z); +#ifdef _EXPORTED_USING_DEPTH_STENCIL + o_pathZIndex = cast_uint_to_ushort(pathData.w); +#endif + + // Fix the tessellation vertex if we fetched the wrong one in order to guarantee we got the + // correct contour ID and flags, or if we belong to a mirrored contour and this vertex has an + // alternate position when mirrored. + uint mirroredContourFlag = contourIDWithFlags & MIRRORED_CONTOUR_CONTOUR_FLAG; + if (mirroredContourFlag != 0u) + { + localVertexID = int(mirroredVertexData.x); + outset = mirroredVertexData.y; + fillCoverage = mirroredVertexData.z; + } + if (localVertexID != vertexIDOnContour) + { + // This can peek one vertex before or after the contour, but the tessellator guarantees + // there is always at least one padding vertex at the beginning and end of the data. + tessVertexIdx += localVertexID - vertexIDOnContour; + uint4 replacementTessVertexData = + TEXEL_FETCH(_EXPORTED_tessVertexTexture, tess_texel_coord(tessVertexIdx)); + if ((replacementTessVertexData.w & 0xffffu) != (contourIDWithFlags & 0xffffu)) + { + // We crossed over into a new contour. Either wrap to the first vertex in the contour or + // leave it clamped at the final vertex of the contour. + bool isClosed = strokeRadius == .0 || // filled + midpoint.x != .0; // explicity closed stroke + if (isClosed) + { + tessVertexData = + TEXEL_FETCH(_EXPORTED_tessVertexTexture, tess_texel_coord(int(vertexIndex0))); + } + } + else + { + tessVertexData = replacementTessVertexData; + } + // MIRRORED_CONTOUR_CONTOUR_FLAG is not preserved at vertexIndex0. Preserve it here. By not + // preserving this flag, the normal and mirrored contour can both share the same contour + // record. + contourIDWithFlags = tessVertexData.w | mirroredContourFlag; + } + + // Finish unpacking tessVertexData. + float theta = uintBitsToFloat(tessVertexData.z); + float2 norm = float2(sin(theta), -cos(theta)); + float2 origin = uintBitsToFloat(tessVertexData.xy); + float2 postTransformVertexOffset; + + if (strokeRadius != .0) // Is this a stroke? + { + // Ensure strokes always emit clockwise triangles. + outset *= sign(determinant(M)); + + // Joins only emanate from the outer side of the stroke. + if ((contourIDWithFlags & LEFT_JOIN_CONTOUR_FLAG) != 0u) + outset = min(outset, .0); + if ((contourIDWithFlags & RIGHT_JOIN_CONTOUR_FLAG) != 0u) + outset = max(outset, .0); + + float aaRadius = manhattan_pixel_width(M, norm) * AA_RADIUS; + half globalCoverage = 1.; + if (aaRadius > strokeRadius) + { + // The stroke is narrower than the AA ramp. Instead of emitting subpixel geometry, make + // the stroke as wide as the AA ramp and apply a global coverage multiplier. + globalCoverage = cast_float_to_half(strokeRadius) / cast_float_to_half(aaRadius); + strokeRadius = aaRadius; + } + + // Extend the vertex by half the width of the AA ramp. + float2 vertexOffset = MUL(norm, strokeRadius + aaRadius); // Bloat stroke width for AA. + +#ifndef _EXPORTED_USING_DEPTH_STENCIL + // Calculate the AA distance to both the outset and inset edges of the stroke. The fragment + // shader will use whichever is lesser. + float x = outset * (strokeRadius + aaRadius); + o_edgeDistance = + cast_float2_to_half2((1. / (aaRadius * 2.)) * (float2(x, -x) + strokeRadius) + .5); +#endif + + uint joinType = contourIDWithFlags & JOIN_TYPE_MASK; + if (joinType != 0u) + { + // This vertex belongs to a miter or bevel join. Begin by finding the bisector, which is + // the same as the miter line. The first two vertices in the join peek forward to figure + // out the bisector, and the final two peek backward. + int peekDir = 2; + if ((contourIDWithFlags & JOIN_TANGENT_0_CONTOUR_FLAG) == 0u) + peekDir = -peekDir; + if ((contourIDWithFlags & MIRRORED_CONTOUR_CONTOUR_FLAG) != 0u) + peekDir = -peekDir; + int2 otherJoinTexelCoord = tess_texel_coord(tessVertexIdx + peekDir); + uint4 otherJoinData = TEXEL_FETCH(_EXPORTED_tessVertexTexture, otherJoinTexelCoord); + float otherJoinTheta = uintBitsToFloat(otherJoinData.z); + float joinAngle = abs(otherJoinTheta - theta); + if (joinAngle > PI) + joinAngle = 2. * PI - joinAngle; + bool isTan0 = (contourIDWithFlags & JOIN_TANGENT_0_CONTOUR_FLAG) != 0u; + bool isLeftJoin = (contourIDWithFlags & LEFT_JOIN_CONTOUR_FLAG) != 0u; + float bisectTheta = joinAngle * (isTan0 == isLeftJoin ? -.5 : .5) + theta; + float2 bisector = float2(sin(bisectTheta), -cos(bisectTheta)); + float bisectPixelWidth = manhattan_pixel_width(M, bisector); + + // Generalize everything to a "miter-clip", which is proposed in the SVG-2 draft. Bevel + // joins are converted to miter-clip joins with a miter limit of 1/2 pixel. They + // technically bleed out 1/2 pixel when drawn this way, but they seem to look fine and + // there is not an obvious solution to antialias them without an ink bleed. + float miterRatio = cos(joinAngle * .5); + float clipRadius; + if ((joinType == MITER_CLIP_JOIN_CONTOUR_FLAG) || + (joinType == MITER_REVERT_JOIN_CONTOUR_FLAG && miterRatio >= .25)) + { + // Miter! (Or square cap.) + // We currently use hard coded miter limits: + // * 1 for square caps being emulated as miter-clip joins. + // * 4, which is the SVG default, for all other miter joins. + float miterInverseLimit = + (contourIDWithFlags & EMULATED_STROKE_CAP_CONTOUR_FLAG) != 0u ? 1. : .25; + clipRadius = strokeRadius * (1. / max(miterRatio, miterInverseLimit)); + } + else + { + // Bevel! (Or butt cap.) + clipRadius = strokeRadius * miterRatio + /* 1/2px bleed! */ bisectPixelWidth * .5; + } + float clipAARadius = clipRadius + bisectPixelWidth * AA_RADIUS; + if ((contourIDWithFlags & JOIN_TANGENT_INNER_CONTOUR_FLAG) != 0u) + { + // Reposition the inner join vertices at the miter-clip positions. Leave the outer + // join vertices as duplicates on the surrounding curve endpoints. We emit duplicate + // vertex positions because we need a hard stop on the clip distance (see below). + // + // Use aaRadius here because we're tracking AA on the mitered edge, NOT the outer + // clip edge. + float strokeAARaidus = strokeRadius + aaRadius; + // clipAARadius must be 1/16 of an AA ramp (~1/16 pixel) longer than the miter + // length before we start clipping, to ensure we are solving for a numerically + // stable intersection. + float slop = aaRadius * .125; + if (strokeAARaidus <= clipAARadius * miterRatio + slop) + { + // The miter point is before the clip line. Extend out to the miter point. + float miterAARadius = strokeAARaidus * (1. / miterRatio); + vertexOffset = bisector * miterAARadius; + } + else + { + // The clip line is before the miter point. Find where the clip line and the + // mitered edge intersect. + float2 bisectAAOffset = bisector * clipAARadius; + float2 k = float2(dot(vertexOffset, vertexOffset), + dot(bisectAAOffset, bisectAAOffset)); + vertexOffset = MUL(k, inverse(float2x2(vertexOffset, bisectAAOffset))); + } + } + // The clip distance tells us how to antialias the outer clipped edge. Since joins only + // emanate from the outset side of the stroke, we can repurpose the inset distance as + // the clip distance. + float2 pt = abs(outset) * vertexOffset; + float clipDistance = + (clipAARadius - dot(pt, bisector)) / (bisectPixelWidth * (AA_RADIUS * 2.)); +#ifndef _EXPORTED_USING_DEPTH_STENCIL + if ((contourIDWithFlags & LEFT_JOIN_CONTOUR_FLAG) != 0u) + o_edgeDistance.y = cast_float_to_half(clipDistance); + else + o_edgeDistance.x = cast_float_to_half(clipDistance); +#endif + } + +#ifndef _EXPORTED_USING_DEPTH_STENCIL + o_edgeDistance *= globalCoverage; + + // Bias o_edgeDistance.y slightly upwards in order to guarantee o_edgeDistance.y is >= 0 at + // every pixel. "o_edgeDistance.y < 0" is used to differentiate between strokes and fills. + o_edgeDistance.y = max(o_edgeDistance.y, make_half(1e-4)); +#endif + + postTransformVertexOffset = MUL(M, outset * vertexOffset); + + // Throw away the fan triangles since we're a stroke. + if (vertexType != STROKE_VERTEX) + return false; + } + else // This is a fill. + { + // Place the fan point. + if (vertexType == FAN_MIDPOINT_VERTEX) + origin = midpoint; + + // Offset the vertex for Manhattan AA. + postTransformVertexOffset = sign(MUL(outset * norm, inverse(M))) * AA_RADIUS; + + if ((contourIDWithFlags & MIRRORED_CONTOUR_CONTOUR_FLAG) != 0u) + fillCoverage = -fillCoverage; + +#ifndef _EXPORTED_USING_DEPTH_STENCIL + // "o_edgeDistance.y < 0" indicates to the fragment shader that this is a fill. + o_edgeDistance = make_half2(fillCoverage, -1.); +#endif + + // If we're actually just drawing a triangle, throw away the entire patch except a single + // fan triangle. + if ((contourIDWithFlags & RETROFITTED_TRIANGLE_CONTOUR_FLAG) != 0u && + vertexType != FAN_VERTEX) + return false; + } + + o_vertexPosition = MUL(M, origin) + postTransformVertexOffset + translate; + return true; +} +#endif // @DRAW_PATH + +#ifdef _EXPORTED_DRAW_INTERIOR_TRIANGLES +INLINE float2 unpack_interior_triangle_vertex(float3 triangleVertex, + OUT(ushort) o_pathID, + OUT(half) o_windingWeight VERTEX_CONTEXT_DECL) +{ + o_pathID = cast_uint_to_ushort(floatBitsToUint(triangleVertex.z) & 0xffffu); + float2x2 M = make_float2x2(uintBitsToFloat(STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, o_pathID * 2u))); + uint4 pathData = STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, o_pathID * 2u + 1u); + float2 translate = uintBitsToFloat(pathData.xy); + o_windingWeight = + cast_int_to_half(floatBitsToInt(triangleVertex.z) >> 16) * sign(determinant(M)); + return MUL(M, triangleVertex.xy) + translate; +} +#endif // @DRAW_INTERIOR_TRIANGLES + +#endif // @VERTEX +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path_common.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path_common.minified.ush new file mode 100644 index 00000000..52991645 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/draw_path_common.minified.ush @@ -0,0 +1,301 @@ +/* + * Copyright 2023 Rive + */ + +// Common functions shared by draw shaders. + +#ifdef VERTEX + +VERTEX_TEXTURE_BLOCK_BEGIN +TEXTURE_RGBA32UI(PER_FLUSH_BINDINGS_SET, TESS_VERTEX_TEXTURE_IDX, _EXPORTED_tessVertexTexture); +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +STORAGE_BUFFER_U32x4(PATH_BUFFER_IDX, PathBuffer, _EXPORTED_pathBuffer); +STORAGE_BUFFER_U32x2(PAINT_BUFFER_IDX, PaintBuffer, _EXPORTED_paintBuffer); +STORAGE_BUFFER_F32x4(PAINT_AUX_BUFFER_IDX, PaintAuxBuffer, _EXPORTED_paintAuxBuffer); +STORAGE_BUFFER_U32x4(CONTOUR_BUFFER_IDX, ContourBuffer, _EXPORTED_contourBuffer); +VERTEX_STORAGE_BUFFER_BLOCK_END + +#ifdef DRAW_PATH +INLINE int2 tess_texel_coord(int texelIndex) +{ + return int2(texelIndex & ((1 << TESS_TEXTURE_WIDTH_LOG2) - 1), + texelIndex >> TESS_TEXTURE_WIDTH_LOG2); +} + +INLINE float manhattan_pixel_width(float2x2 M, float2 normalized) +{ + + float2 v = MUL(M, normalized); + return (abs(v.x) + abs(v.y)) * (1. / dot(v, v)); +} + +INLINE bool unpack_tessellated_path_vertex(float4 patchVertexData, + float4 mirroredVertexData, + int _instanceID, + OUT(ushort) o_pathID, + OUT(float2) o_vertexPosition +#ifndef USING_DEPTH_STENCIL + , + OUT(half2) o_edgeDistance +#else + , + OUT(ushort) o_pathZIndex +#endif + VERTEX_CONTEXT_DECL) +{ + // Unpack patchVertexData. + int localVertexID = int(patchVertexData.x); + float outset = patchVertexData.y; + float fillCoverage = patchVertexData.z; + int patchSegmentSpan = floatBitsToInt(patchVertexData.w) >> 2; + int vertexType = floatBitsToInt(patchVertexData.w) & 3; + + // Fetch a vertex that definitely belongs to the contour we're drawing. + int vertexIDOnContour = min(localVertexID, patchSegmentSpan - 1); + int tessVertexIdx = _instanceID * patchSegmentSpan + vertexIDOnContour; + uint4 tessVertexData = TEXEL_FETCH(_EXPORTED_tessVertexTexture, tess_texel_coord(tessVertexIdx)); + uint contourIDWithFlags = tessVertexData.w; + + // Fetch and unpack the contour referenced by the tessellation vertex. + uint4 contourData = STORAGE_BUFFER_LOAD4(_EXPORTED_contourBuffer, contour_data_idx(contourIDWithFlags)); + float2 midpoint = uintBitsToFloat(contourData.xy); + o_pathID = cast_uint_to_ushort(contourData.z & 0xffffu); + uint vertexIndex0 = contourData.w; + + // Fetch and unpack the path. + float2x2 M = make_float2x2(uintBitsToFloat(STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, o_pathID * 2u))); + uint4 pathData = STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, o_pathID * 2u + 1u); + float2 translate = uintBitsToFloat(pathData.xy); + + float strokeRadius = uintBitsToFloat(pathData.z); +#ifdef USING_DEPTH_STENCIL + o_pathZIndex = cast_uint_to_ushort(pathData.w); +#endif + + // Fix the tessellation vertex if we fetched the wrong one in order to guarantee we got the + // correct contour ID and flags, or if we belong to a mirrored contour and this vertex has an + // alternate position when mirrored. + uint mirroredContourFlag = contourIDWithFlags & MIRRORED_CONTOUR_CONTOUR_FLAG; + if (mirroredContourFlag != 0u) + { + localVertexID = int(mirroredVertexData.x); + outset = mirroredVertexData.y; + fillCoverage = mirroredVertexData.z; + } + if (localVertexID != vertexIDOnContour) + { + // This can peek one vertex before or after the contour, but the tessellator guarantees + // there is always at least one padding vertex at the beginning and end of the data. + tessVertexIdx += localVertexID - vertexIDOnContour; + uint4 replacementTessVertexData = + TEXEL_FETCH(_EXPORTED_tessVertexTexture, tess_texel_coord(tessVertexIdx)); + if ((replacementTessVertexData.w & 0xffffu) != (contourIDWithFlags & 0xffffu)) + { + // We crossed over into a new contour. Either wrap to the first vertex in the contour or + // leave it clamped at the final vertex of the contour. + bool isClosed = strokeRadius == .0 || // filled + midpoint.x != .0; // explicity closed stroke + if (isClosed) + { + tessVertexData = + TEXEL_FETCH(_EXPORTED_tessVertexTexture, tess_texel_coord(int(vertexIndex0))); + } + } + else + { + tessVertexData = replacementTessVertexData; + } + // MIRRORED_CONTOUR_CONTOUR_FLAG is not preserved at vertexIndex0. Preserve it here. By not + // preserving this flag, the normal and mirrored contour can both share the same contour + // record. + contourIDWithFlags = tessVertexData.w | mirroredContourFlag; + } + + // Finish unpacking tessVertexData. + float theta = uintBitsToFloat(tessVertexData.z); + float2 norm = float2(sin(theta), -cos(theta)); + float2 origin = uintBitsToFloat(tessVertexData.xy); + float2 postTransformVertexOffset; + + if (strokeRadius != .0) // Is this a stroke? + { + // Ensure strokes always emit clockwise triangles. + outset *= sign(determinant(M)); + + // Joins only emanate from the outer side of the stroke. + if ((contourIDWithFlags & LEFT_JOIN_CONTOUR_FLAG) != 0u) + outset = min(outset, .0); + if ((contourIDWithFlags & RIGHT_JOIN_CONTOUR_FLAG) != 0u) + outset = max(outset, .0); + + float aaRadius = manhattan_pixel_width(M, norm) * AA_RADIUS; + half globalCoverage = 1.; + if (aaRadius > strokeRadius) + { + // The stroke is narrower than the AA ramp. Instead of emitting subpixel geometry, make + // the stroke as wide as the AA ramp and apply a global coverage multiplier. + globalCoverage = cast_float_to_half(strokeRadius) / cast_float_to_half(aaRadius); + strokeRadius = aaRadius; + } + + // Extend the vertex by half the width of the AA ramp. + float2 vertexOffset = MUL(norm, strokeRadius + aaRadius); // Bloat stroke width for AA. + +#ifndef USING_DEPTH_STENCIL + // Calculate the AA distance to both the outset and inset edges of the stroke. The fragment + // shader will use whichever is lesser. + float x = outset * (strokeRadius + aaRadius); + o_edgeDistance = + cast_float2_to_half2((1. / (aaRadius * 2.)) * (float2(x, -x) + strokeRadius) + .5); +#endif + + uint joinType = contourIDWithFlags & JOIN_TYPE_MASK; + if (joinType != 0u) + { + // This vertex belongs to a miter or bevel join. Begin by finding the bisector, which is + // the same as the miter line. The first two vertices in the join peek forward to figure + // out the bisector, and the final two peek backward. + int peekDir = 2; + if ((contourIDWithFlags & JOIN_TANGENT_0_CONTOUR_FLAG) == 0u) + peekDir = -peekDir; + if ((contourIDWithFlags & MIRRORED_CONTOUR_CONTOUR_FLAG) != 0u) + peekDir = -peekDir; + int2 otherJoinTexelCoord = tess_texel_coord(tessVertexIdx + peekDir); + uint4 otherJoinData = TEXEL_FETCH(_EXPORTED_tessVertexTexture, otherJoinTexelCoord); + float otherJoinTheta = uintBitsToFloat(otherJoinData.z); + float joinAngle = abs(otherJoinTheta - theta); + if (joinAngle > PI) + joinAngle = 2. * PI - joinAngle; + bool isTan0 = (contourIDWithFlags & JOIN_TANGENT_0_CONTOUR_FLAG) != 0u; + bool isLeftJoin = (contourIDWithFlags & LEFT_JOIN_CONTOUR_FLAG) != 0u; + float bisectTheta = joinAngle * (isTan0 == isLeftJoin ? -.5 : .5) + theta; + float2 bisector = float2(sin(bisectTheta), -cos(bisectTheta)); + float bisectPixelWidth = manhattan_pixel_width(M, bisector); + + // Generalize everything to a "miter-clip", which is proposed in the SVG-2 draft. Bevel + // joins are converted to miter-clip joins with a miter limit of 1/2 pixel. They + // technically bleed out 1/2 pixel when drawn this way, but they seem to look fine and + // there is not an obvious solution to antialias them without an ink bleed. + float miterRatio = cos(joinAngle * .5); + float clipRadius; + if ((joinType == MITER_CLIP_JOIN_CONTOUR_FLAG) || + (joinType == MITER_REVERT_JOIN_CONTOUR_FLAG && miterRatio >= .25)) + { + // Miter! (Or square cap.) + // We currently use hard coded miter limits: + // * 1 for square caps being emulated as miter-clip joins. + // * 4, which is the SVG default, for all other miter joins. + float miterInverseLimit = + (contourIDWithFlags & EMULATED_STROKE_CAP_CONTOUR_FLAG) != 0u ? 1. : .25; + clipRadius = strokeRadius * (1. / max(miterRatio, miterInverseLimit)); + } + else + { + // Bevel! (Or butt cap.) + clipRadius = strokeRadius * miterRatio + /* 1/2px bleed! */ bisectPixelWidth * .5; + } + float clipAARadius = clipRadius + bisectPixelWidth * AA_RADIUS; + if ((contourIDWithFlags & JOIN_TANGENT_INNER_CONTOUR_FLAG) != 0u) + { + // Reposition the inner join vertices at the miter-clip positions. Leave the outer + // join vertices as duplicates on the surrounding curve endpoints. We emit duplicate + // vertex positions because we need a hard stop on the clip distance (see below). + // + // Use aaRadius here because we're tracking AA on the mitered edge, NOT the outer + // clip edge. + float strokeAARaidus = strokeRadius + aaRadius; + // clipAARadius must be 1/16 of an AA ramp (~1/16 pixel) longer than the miter + // length before we start clipping, to ensure we are solving for a numerically + // stable intersection. + float slop = aaRadius * .125; + if (strokeAARaidus <= clipAARadius * miterRatio + slop) + { + // The miter point is before the clip line. Extend out to the miter point. + float miterAARadius = strokeAARaidus * (1. / miterRatio); + vertexOffset = bisector * miterAARadius; + } + else + { + // The clip line is before the miter point. Find where the clip line and the + // mitered edge intersect. + float2 bisectAAOffset = bisector * clipAARadius; + float2 k = float2(dot(vertexOffset, vertexOffset), + dot(bisectAAOffset, bisectAAOffset)); + vertexOffset = MUL(k, inverse(float2x2(vertexOffset, bisectAAOffset))); + } + } + // The clip distance tells us how to antialias the outer clipped edge. Since joins only + // emanate from the outset side of the stroke, we can repurpose the inset distance as + // the clip distance. + float2 pt = abs(outset) * vertexOffset; + float clipDistance = + (clipAARadius - dot(pt, bisector)) / (bisectPixelWidth * (AA_RADIUS * 2.)); +#ifndef USING_DEPTH_STENCIL + if ((contourIDWithFlags & LEFT_JOIN_CONTOUR_FLAG) != 0u) + o_edgeDistance.y = cast_float_to_half(clipDistance); + else + o_edgeDistance.x = cast_float_to_half(clipDistance); +#endif + } + +#ifndef USING_DEPTH_STENCIL + o_edgeDistance *= globalCoverage; + + // Bias o_edgeDistance.y slightly upwards in order to guarantee o_edgeDistance.y is >= 0 at + // every pixel. "o_edgeDistance.y < 0" is used to differentiate between strokes and fills. + o_edgeDistance.y = max(o_edgeDistance.y, make_half(1e-4)); +#endif + + postTransformVertexOffset = MUL(M, outset * vertexOffset); + + // Throw away the fan triangles since we're a stroke. + if (vertexType != STROKE_VERTEX) + return false; + } + else // This is a fill. + { + // Place the fan point. + if (vertexType == FAN_MIDPOINT_VERTEX) + origin = midpoint; + + // Offset the vertex for Manhattan AA. + postTransformVertexOffset = sign(MUL(outset * norm, inverse(M))) * AA_RADIUS; + + if ((contourIDWithFlags & MIRRORED_CONTOUR_CONTOUR_FLAG) != 0u) + fillCoverage = -fillCoverage; + +#ifndef USING_DEPTH_STENCIL + // "o_edgeDistance.y < 0" indicates to the fragment shader that this is a fill. + o_edgeDistance = make_half2(fillCoverage, -1.); +#endif + + // If we're actually just drawing a triangle, throw away the entire patch except a single + // fan triangle. + if ((contourIDWithFlags & RETROFITTED_TRIANGLE_CONTOUR_FLAG) != 0u && + vertexType != FAN_VERTEX) + return false; + } + + o_vertexPosition = MUL(M, origin) + postTransformVertexOffset + translate; + return true; +} +#endif // @DRAW_PATH + +#ifdef DRAW_INTERIOR_TRIANGLES +INLINE float2 unpack_interior_triangle_vertex(float3 triangleVertex, + OUT(ushort) o_pathID, + OUT(half) o_windingWeight VERTEX_CONTEXT_DECL) +{ + o_pathID = cast_uint_to_ushort(floatBitsToUint(triangleVertex.z) & 0xffffu); + float2x2 M = make_float2x2(uintBitsToFloat(STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, o_pathID * 2u))); + uint4 pathData = STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, o_pathID * 2u + 1u); + float2 translate = uintBitsToFloat(pathData.xy); + o_windingWeight = + cast_int_to_half(floatBitsToInt(triangleVertex.z) >> 16) * sign(determinant(M)); + return MUL(M, triangleVertex.xy) + translate; +} +#endif // @DRAW_INTERIOR_TRIANGLES + +#endif // @VERTEX diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.glsl.hpp new file mode 100644 index 00000000..075f6660 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.glsl.hpp @@ -0,0 +1,508 @@ +#pragma once + +#include "glsl.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char glsl[] = R"===(/* + * Copyright 2023 Rive + */ + +// This header provides GLSL-specific #defines and declarations that enable our shaders to be +// compiled on MSL and GLSL both. + +#define GLSL + +#ifndef _EXPORTED_GLSL_VERSION +// In "#version 320 es", Qualcomm incorrectly substitutes __VERSION__ to 300. @GLSL_VERSION is a +// workaround for this. +#define _EXPORTED_GLSL_VERSION __VERSION__ +#endif + +#define float2 vec2 +#define float3 vec3 +#define packed_float3 vec3 +#define float4 vec4 + +#define half mediump float +#define half2 mediump vec2 +#define half3 mediump vec3 +#define half4 mediump vec4 +#define half3x4 mediump mat3x4 + +#define int2 ivec2 +#define int3 ivec3 +#define int4 ivec4 + +#define short mediump int +#define short2 mediump ivec2 +#define short3 mediump ivec3 +#define short4 mediump ivec4 + +#define uint2 uvec2 +#define uint3 uvec3 +#define uint4 uvec4 + +#define ushort mediump uint +#define ushort2 mediump uvec2 +#define ushort3 mediump uvec3 +#define ushort4 mediump uvec4 + +#define float2x2 mat2 + +#define INLINE +#define OUT(ARG_TYPE) out ARG_TYPE + +#ifdef GL_ANGLE_base_vertex_base_instance_shader_builtin +#extension GL_ANGLE_base_vertex_base_instance_shader_builtin : require +#endif + +#ifdef _EXPORTED_ENABLE_BINDLESS_TEXTURES +#extension GL_ARB_bindless_texture : require +#endif + +#ifdef _EXPORTED_ENABLE_KHR_BLEND +#extension GL_KHR_blend_equation_advanced : require +#endif + +#if defined(_EXPORTED_USING_DEPTH_STENCIL) && defined(_EXPORTED_ENABLE_CLIP_RECT) && defined(GL_ES) +#ifdef GL_EXT_clip_cull_distance +#extension GL_EXT_clip_cull_distance : require +#elif defined(GL_ANGLE_clip_cull_distance) +#extension GL_ANGLE_clip_cull_distance : require +#endif +#endif // USING_DEPTH_STENCIL && ENABLE_CLIP_RECT + +#if _EXPORTED_GLSL_VERSION >= 310 +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + layout(binding = IDX, std140) uniform NAME \ + { +#else +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + layout(std140) uniform NAME \ + { +#endif +// clang-format barrier... Otherwise it tries to merge this #define into the above macro... +#define UNIFORM_BLOCK_END(NAME) \ + } \ + NAME; + +#define ATTR_BLOCK_BEGIN(NAME) +#define ATTR(IDX, TYPE, NAME) layout(location = IDX) in TYPE NAME +#define ATTR_BLOCK_END +#define ATTR_LOAD(A, B, C, D) +#define ATTR_UNPACK(ID, attrs, NAME, TYPE) + +#ifdef _EXPORTED_VERTEX +#if _EXPORTED_GLSL_VERSION >= 310 +#define VARYING(IDX, TYPE, NAME) layout(location = IDX) out TYPE NAME +#else +#define VARYING(IDX, TYPE, NAME) out TYPE NAME +#endif +#else +#if _EXPORTED_GLSL_VERSION >= 310 +#define VARYING(IDX, TYPE, NAME) layout(location = IDX) in TYPE NAME +#else +#define VARYING(IDX, TYPE, NAME) in TYPE NAME +#endif +#endif +#define FLAT flat +#define VARYING_BLOCK_BEGIN +#define VARYING_BLOCK_END + +// clang-format off +#ifdef _EXPORTED_TARGET_VULKAN + // Since Vulkan is compiled offline and not all platforms support noperspective, don't use it. +#define NO_PERSPECTIVE +#else +#ifdef GL_NV_shader_noperspective_interpolation +#extension GL_NV_shader_noperspective_interpolation : require +#define NO_PERSPECTIVE noperspective +#else +#define NO_PERSPECTIVE +#endif +#endif +// clang-format on + +#ifdef _EXPORTED_VERTEX +#define VERTEX_TEXTURE_BLOCK_BEGIN +#define VERTEX_TEXTURE_BLOCK_END +#endif + +#ifdef _EXPORTED_FRAGMENT +#define FRAG_TEXTURE_BLOCK_BEGIN +#define FRAG_TEXTURE_BLOCK_END +#endif + +#ifdef _EXPORTED_TARGET_VULKAN +#define TEXTURE_RGBA32UI(SET, IDX, NAME) \ + layout(set = SET, binding = IDX) uniform highp utexture2D NAME +#define TEXTURE_RGBA32F(SET, IDX, NAME) \ + layout(set = SET, binding = IDX) uniform highp texture2D NAME +#define TEXTURE_RGBA8(SET, IDX, NAME) \ + layout(set = SET, binding = IDX) uniform mediump texture2D NAME +#elif _EXPORTED_GLSL_VERSION >= 310 +#define TEXTURE_RGBA32UI(SET, IDX, NAME) layout(binding = IDX) uniform highp usampler2D NAME +#define TEXTURE_RGBA32F(SET, IDX, NAME) layout(binding = IDX) uniform highp sampler2D NAME +#define TEXTURE_RGBA8(SET, IDX, NAME) layout(binding = IDX) uniform mediump sampler2D NAME +#else +#define TEXTURE_RGBA32UI(SET, IDX, NAME) uniform highp usampler2D NAME +#define TEXTURE_RGBA32F(SET, IDX, NAME) uniform highp sampler2D NAME +#define TEXTURE_RGBA8(SET, IDX, NAME) uniform mediump sampler2D NAME +#endif +#define TEXTURE_RG32UI(SET, IDX, NAME) TEXTURE_RGBA32UI(SET, IDX, NAME) + +#ifdef _EXPORTED_TARGET_VULKAN +#define SAMPLER_LINEAR(TEXTURE_IDX, NAME) \ + layout(set = SAMPLER_BINDINGS_SET, binding = TEXTURE_IDX) uniform mediump sampler NAME; +#define SAMPLER_MIPMAP(TEXTURE_IDX, NAME) \ + layout(set = SAMPLER_BINDINGS_SET, binding = TEXTURE_IDX) uniform mediump sampler NAME; +#define TEXTURE_SAMPLE(NAME, SAMPLER_NAME, COORD) texture(sampler2D(NAME, SAMPLER_NAME), COORD) +#define TEXTURE_SAMPLE_LOD(NAME, SAMPLER_NAME, COORD, LOD) \ + textureLod(sampler2D(NAME, SAMPLER_NAME), COORD, LOD) +#define TEXTURE_SAMPLE_GRAD(NAME, SAMPLER_NAME, COORD, DDX, DDY) \ + textureGrad(sampler2D(NAME, SAMPLER_NAME), COORD, DDX, DDY) +#else +// SAMPLER_LINEAR and SAMPLER_MIPMAP are no-ops because in GL, sampling parameters are API-level +// state tied to the texture. +#define SAMPLER_LINEAR(TEXTURE_IDX, NAME) +#define SAMPLER_MIPMAP(TEXTURE_IDX, NAME) +#define TEXTURE_SAMPLE(NAME, SAMPLER_NAME, COORD) texture(NAME, COORD) +#define TEXTURE_SAMPLE_LOD(NAME, SAMPLER_NAME, COORD, LOD) textureLod(NAME, COORD, LOD) +#define TEXTURE_SAMPLE_GRAD(NAME, SAMPLER_NAME, COORD, DDX, DDY) textureGrad(NAME, COORD, DDX, DDY) +#endif + +#define TEXEL_FETCH(NAME, COORD) texelFetch(NAME, COORD, 0) + +#define VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +#define VERTEX_STORAGE_BUFFER_BLOCK_END + +#define FRAG_STORAGE_BUFFER_BLOCK_BEGIN +#define FRAG_STORAGE_BUFFER_BLOCK_END + +#ifdef _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS + +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) \ + TEXTURE_RGBA32UI(PER_FLUSH_BINDINGS_SET, IDX, NAME) +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + TEXTURE_RG32UI(PER_FLUSH_BINDINGS_SET, IDX, NAME) +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + TEXTURE_RGBA32F(PER_FLUSH_BINDINGS_SET, IDX, NAME) +#define STORAGE_BUFFER_LOAD4(NAME, I) \ + TEXEL_FETCH(NAME, int2((I)&STORAGE_TEXTURE_MASK_X, (I) >> STORAGE_TEXTURE_SHIFT_Y)) +#define STORAGE_BUFFER_LOAD2(NAME, I) \ + TEXEL_FETCH(NAME, int2((I)&STORAGE_TEXTURE_MASK_X, (I) >> STORAGE_TEXTURE_SHIFT_Y)).xy + +#else + +#ifdef GL_ARB_shader_storage_buffer_object +#extension GL_ARB_shader_storage_buffer_object : require +#endif +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) \ + layout(std430, binding = IDX) readonly buffer GLSL_STRUCT_NAME { uint2 _values[]; } \ + NAME +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + layout(std430, binding = IDX) readonly buffer GLSL_STRUCT_NAME { uint4 _values[]; } \ + NAME +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + layout(std430, binding = IDX) readonly buffer GLSL_STRUCT_NAME { float4 _values[]; } \ + NAME +#define STORAGE_BUFFER_LOAD4(NAME, I) NAME._values[I] +#define STORAGE_BUFFER_LOAD2(NAME, I) NAME._values[I] + +#endif // DISABLE_SHADER_STORAGE_BUFFERS + +// Define macros for implementing pixel local storage based on available extensions. +#ifdef _EXPORTED_PLS_IMPL_ANGLE + +#extension GL_ANGLE_shader_pixel_local_storage : require + +#define PLS_BLOCK_BEGIN +#define PLS_DECL4F(IDX, NAME) layout(binding = IDX, rgba8) uniform lowp pixelLocalANGLE NAME +#define PLS_DECLUI(IDX, NAME) layout(binding = IDX, r32ui) uniform highp upixelLocalANGLE NAME +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) pixelLocalLoadANGLE(PLANE) +#define PLS_LOADUI(PLANE) pixelLocalLoadANGLE(PLANE).x +#define PLS_STORE4F(PLANE, VALUE) pixelLocalStoreANGLE(PLANE, VALUE) +#define PLS_STOREUI(PLANE, VALUE) pixelLocalStoreANGLE(PLANE, uvec4(VALUE)) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif // PLS_IMPL_ANGLE + +#ifdef _EXPORTED_PLS_IMPL_EXT_NATIVE + +#extension GL_EXT_shader_pixel_local_storage : enable + +// We need one of the framebuffer fetch extensions for the shader that loads the framebuffer. +#extension GL_ARM_shader_framebuffer_fetch : enable +#extension GL_EXT_shader_framebuffer_fetch : enable + +#define PLS_BLOCK_BEGIN \ + __pixel_localEXT PLS \ + { +#define PLS_DECL4F(IDX, NAME) layout(rgba8) lowp vec4 NAME +#define PLS_DECLUI(IDX, NAME) layout(r32ui) highp uint NAME +#define PLS_BLOCK_END \ + } \ + ; + +#define PLS_LOAD4F(PLANE) PLANE +#define PLS_LOADUI(PLANE) PLANE +#define PLS_STORE4F(PLANE, VALUE) PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) PLANE = (VALUE) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif + +#ifdef _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH + +#extension GL_EXT_shader_framebuffer_fetch : require + +#define PLS_BLOCK_BEGIN +#define PLS_DECL4F(IDX, NAME) layout(location = IDX) inout lowp vec4 NAME +#define PLS_DECLUI(IDX, NAME) layout(location = IDX) inout highp uvec4 NAME +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) PLANE +#define PLS_LOADUI(PLANE) PLANE.x +#define PLS_STORE4F(PLANE, VALUE) PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) PLANE.x = (VALUE) + +// When using multiple color attachments, we have to write a value to every color attachment, every +// shader invocation, or else the contents become undefined. +#define PLS_PRESERVE_4F(PLANE) PLS_STORE4F(PLANE, PLS_LOAD4F(PLANE)) +#define PLS_PRESERVE_UI(PLANE) PLS_STOREUI(PLANE, PLS_LOADUI(PLANE)) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif // PLS_IMPL_FRAMEBUFFER_FETCH + +#ifdef _EXPORTED_PLS_IMPL_STORAGE_TEXTURE + +#ifdef GL_ARB_shader_image_load_store +#extension GL_ARB_shader_image_load_store : require +#endif +#if defined(GL_ARB_fragment_shader_interlock) +#extension GL_ARB_fragment_shader_interlock : require +#define PLS_INTERLOCK_BEGIN beginInvocationInterlockARB() +#define PLS_INTERLOCK_END endInvocationInterlockARB() +#elif defined(GL_INTEL_fragment_shader_ordering) +#extension GL_INTEL_fragment_shader_ordering : require +#define PLS_INTERLOCK_BEGIN beginFragmentShaderOrderingINTEL() +#define PLS_INTERLOCK_END +#else +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END +#endif + +#define PLS_BLOCK_BEGIN +#ifdef _EXPORTED_TARGET_VULKAN +#define PLS_DECL4F(IDX, NAME) \ + layout(set = PLS_TEXTURE_BINDINGS_SET, binding = IDX, rgba8) uniform lowp coherent image2D NAME +#define PLS_DECLUI(IDX, NAME) \ + layout(set = PLS_TEXTURE_BINDINGS_SET, binding = IDX, r32ui) \ + uniform highp coherent uimage2D NAME +#else +#define PLS_DECL4F(IDX, NAME) layout(binding = IDX, rgba8) uniform lowp coherent image2D NAME +#define PLS_DECLUI(IDX, NAME) layout(binding = IDX, r32ui) uniform highp coherent uimage2D NAME +#endif +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) imageLoad(PLANE, _plsCoord) +#define PLS_LOADUI(PLANE) imageLoad(PLANE, _plsCoord).x +#define PLS_STORE4F(PLANE, VALUE) imageStore(PLANE, _plsCoord, VALUE) +#define PLS_STOREUI(PLANE, VALUE) imageStore(PLANE, _plsCoord, uvec4(VALUE)) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#ifndef _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define _EXPORTED_USING_PLS_STORAGE_TEXTURES + +#endif // PLS_IMPL_STORAGE_TEXTURE + +#endif // PLS_IMPL_STORAGE_TEXTURE + +#ifdef _EXPORTED_PLS_IMPL_SUBPASS_LOAD + +#define PLS_BLOCK_BEGIN +#define PLS_DECL4F(IDX, NAME) \ + layout(input_attachment_index = IDX, binding = IDX, set = PLS_TEXTURE_BINDINGS_SET) \ + uniform lowp subpassInput _in_##NAME; \ + layout(location = IDX) out lowp vec4 NAME +#define PLS_DECLUI(IDX, NAME) \ + layout(input_attachment_index = IDX, binding = IDX, set = PLS_TEXTURE_BINDINGS_SET) \ + uniform highp usubpassInput _in_##NAME; \ + layout(location = IDX) out highp uvec4 NAME +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) subpassLoad(_in_##PLANE) +#define PLS_LOADUI(PLANE) subpassLoad(_in_##PLANE).x +#define PLS_STORE4F(PLANE, VALUE) PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) PLANE.x = (VALUE) + +#define PLS_PRESERVE_4F(PLANE) PLS_STORE4F(PLANE, subpassLoad(_in_##PLANE)) +#define PLS_PRESERVE_UI(PLANE) PLS_STOREUI(PLANE, subpassLoad(_in_##PLANE).x) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif + +#ifdef _EXPORTED_PLS_IMPL_NONE + +#define PLS_BLOCK_BEGIN +#define PLS_DECL4F(IDX, NAME) layout(location = IDX) out lowp vec4 NAME +#define PLS_DECLUI(IDX, NAME) layout(location = IDX) out highp uvec4 NAME +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) vec4(0) +#define PLS_LOADUI(PLANE) 0u +#define PLS_STORE4F(PLANE, VALUE) PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) PLANE.x = (VALUE) + +#define PLS_PRESERVE_4F(PLANE) PLANE = vec4(1, 0, 1, 1) +#define PLS_PRESERVE_UI(PLANE) PLANE.x = 0u + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif + +#ifdef _EXPORTED_TARGET_VULKAN +#define gl_VertexID gl_VertexIndex +#endif + +// clang-format off +#ifdef _EXPORTED_ENABLE_INSTANCE_INDEX +#ifdef _EXPORTED_TARGET_VULKAN +#define INSTANCE_INDEX gl_InstanceIndex +#else +#ifdef _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE + // This uniform is specifically named "SPIRV_Cross_BaseInstance" for compatibility with + // SPIRV-Cross sytems that search for it by name. + uniform int SPIRV_Cross_BaseInstance; +#define INSTANCE_INDEX (gl_InstanceID + SPIRV_Cross_BaseInstance) +#else +#define INSTANCE_INDEX (gl_InstanceID + gl_BaseInstance) +#endif +#endif +#else +#define INSTANCE_INDEX 0 +#endif +// clang-format on + +#define VERTEX_CONTEXT_DECL +#define VERTEX_CONTEXT_UNPACK + +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + void main() \ + { \ + int _vertexID = gl_VertexID; \ + int _instanceID = INSTANCE_INDEX; + +#define IMAGE_RECT_VERTEX_MAIN VERTEX_MAIN + +#define IMAGE_MESH_VERTEX_MAIN(NAME, PositionAttr, position, UVAttr, uv, _vertexID) \ + VERTEX_MAIN(NAME, PositionAttr, position, _vertexID, _instanceID) + +#define VARYING_INIT(NAME, TYPE) +#define VARYING_PACK(NAME) +#define VARYING_UNPACK(NAME, TYPE) + +#define EMIT_VERTEX(_pos) \ + gl_Position = _pos; \ + } + +#define FRAG_DATA_MAIN(DATA_TYPE, NAME) \ + layout(location = 0) out DATA_TYPE _fd; \ + void main() + +#define EMIT_FRAG_DATA(VALUE) _fd = VALUE + +#define _fragCoord gl_FragCoord.xy + +#define FRAGMENT_CONTEXT_DECL +#define FRAGMENT_CONTEXT_UNPACK + +#ifdef _EXPORTED_USING_PLS_STORAGE_TEXTURES + +#ifdef _EXPORTED_TARGET_VULKAN +#define PLS_DECLUI_ATOMIC(IDX, NAME) \ + layout(set = PLS_TEXTURE_BINDINGS_SET, binding = IDX, r32ui) \ + uniform highp coherent uimage2D NAME +#else +#define PLS_DECLUI_ATOMIC(IDX, NAME) \ + layout(binding = IDX, r32ui) uniform highp coherent uimage2D NAME +#endif +#define PLS_LOADUI_ATOMIC(PLANE) imageLoad(PLANE, _plsCoord).x +#define PLS_STOREUI_ATOMIC(PLANE, VALUE) imageStore(PLANE, _plsCoord, uvec4(VALUE)) +#define PLS_ATOMIC_MAX(PLANE, X) imageAtomicMax(PLANE, _plsCoord, X) +#define PLS_ATOMIC_ADD(PLANE, X) imageAtomicAdd(PLANE, _plsCoord, X) + +#define PLS_CONTEXT_DECL , int2 _plsCoord +#define PLS_CONTEXT_UNPACK , _plsCoord + +#define PLS_MAIN(NAME) \ + void main() \ + { \ + int2 _plsCoord = ivec2(floor(_fragCoord)); + +#define EMIT_PLS } + +#else // !USING_PLS_STORAGE_TEXTURES + +#define PLS_CONTEXT_DECL +#define PLS_CONTEXT_UNPACK + +#define PLS_MAIN(NAME) void main() +#define EMIT_PLS + +#endif // !USING_PLS_STORAGE_TEXTURES + +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_MAIN(NAME) + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + layout(location = 0) out half4 _fragColor; \ + PLS_MAIN(NAME) + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + layout(location = 0) out half4 _fragColor; \ + PLS_MAIN(NAME) + +#define EMIT_PLS_AND_FRAG_COLOR EMIT_PLS + +#define MUL(A, B) ((A) * (B)) + +#ifndef _EXPORTED_TARGET_VULKAN +#define FRAG_COORD_BOTTOM_UP +#endif + +precision highp float; +precision highp int; + +#if _EXPORTED_GLSL_VERSION < 310 +// Polyfill ES 3.1+ methods. +INLINE half4 unpackUnorm4x8(uint u) +{ + uint4 vals = uint4(u & 0xffu, (u >> 8) & 0xffu, (u >> 16) & 0xffu, u >> 24); + return float4(vals) * (1. / 255.); +} +#endif +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.minified.ush new file mode 100644 index 00000000..eef9360b --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.minified.ush @@ -0,0 +1,497 @@ +/* + * Copyright 2023 Rive + */ + +// This header provides GLSL-specific #defines and declarations that enable our shaders to be +// compiled on MSL and GLSL both. + +#define GLSL + +#ifndef GLSL_VERSION +// In "#version 320 es", Qualcomm incorrectly substitutes __VERSION__ to 300. @GLSL_VERSION is a +// workaround for this. +#define GLSL_VERSION __VERSION__ +#endif + +#define float2 vec2 +#define float3 vec3 +#define packed_float3 vec3 +#define float4 vec4 + +#define half mediump float +#define half2 mediump vec2 +#define half3 mediump vec3 +#define half4 mediump vec4 +#define half3x4 mediump mat3x4 + +#define int2 ivec2 +#define int3 ivec3 +#define int4 ivec4 + +#define short mediump int +#define short2 mediump ivec2 +#define short3 mediump ivec3 +#define short4 mediump ivec4 + +#define uint2 uvec2 +#define uint3 uvec3 +#define uint4 uvec4 + +#define ushort mediump uint +#define ushort2 mediump uvec2 +#define ushort3 mediump uvec3 +#define ushort4 mediump uvec4 + +#define float2x2 mat2 + +#define INLINE +#define OUT(ARG_TYPE) out ARG_TYPE + +#ifdef GL_ANGLE_base_vertex_base_instance_shader_builtin +#extension GL_ANGLE_base_vertex_base_instance_shader_builtin : require +#endif + +#ifdef ENABLE_BINDLESS_TEXTURES +#extension GL_ARB_bindless_texture : require +#endif + +#ifdef ENABLE_KHR_BLEND +#extension GL_KHR_blend_equation_advanced : require +#endif + +#if defined(USING_DEPTH_STENCIL) && defined(ENABLE_CLIP_RECT) && defined(GL_ES) +#ifdef GL_EXT_clip_cull_distance +#extension GL_EXT_clip_cull_distance : require +#elif defined(GL_ANGLE_clip_cull_distance) +#extension GL_ANGLE_clip_cull_distance : require +#endif +#endif // USING_DEPTH_STENCIL && ENABLE_CLIP_RECT + +#if GLSL_VERSION >= 310 +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + layout(binding = IDX, std140) uniform NAME \ + { +#else +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + layout(std140) uniform NAME \ + { +#endif +// clang-format barrier... Otherwise it tries to merge this #define into the above macro... +#define UNIFORM_BLOCK_END(NAME) \ + } \ + NAME; + +#define ATTR_BLOCK_BEGIN(NAME) +#define ATTR(IDX, TYPE, NAME) layout(location = IDX) in TYPE NAME +#define ATTR_BLOCK_END +#define ATTR_LOAD(A, B, C, D) +#define ATTR_UNPACK(ID, attrs, NAME, TYPE) + +#ifdef VERTEX +#if GLSL_VERSION >= 310 +#define VARYING(IDX, TYPE, NAME) layout(location = IDX) out TYPE NAME +#else +#define VARYING(IDX, TYPE, NAME) out TYPE NAME +#endif +#else +#if GLSL_VERSION >= 310 +#define VARYING(IDX, TYPE, NAME) layout(location = IDX) in TYPE NAME +#else +#define VARYING(IDX, TYPE, NAME) in TYPE NAME +#endif +#endif +#define FLAT flat +#define VARYING_BLOCK_BEGIN +#define VARYING_BLOCK_END + +// clang-format off +#ifdef TARGET_VULKAN + // Since Vulkan is compiled offline and not all platforms support noperspective, don't use it. +#define NO_PERSPECTIVE +#else +#ifdef GL_NV_shader_noperspective_interpolation +#extension GL_NV_shader_noperspective_interpolation : require +#define NO_PERSPECTIVE noperspective +#else +#define NO_PERSPECTIVE +#endif +#endif +// clang-format on + +#ifdef VERTEX +#define VERTEX_TEXTURE_BLOCK_BEGIN +#define VERTEX_TEXTURE_BLOCK_END +#endif + +#ifdef FRAGMENT +#define FRAG_TEXTURE_BLOCK_BEGIN +#define FRAG_TEXTURE_BLOCK_END +#endif + +#ifdef TARGET_VULKAN +#define TEXTURE_RGBA32UI(SET, IDX, NAME) \ + layout(set = SET, binding = IDX) uniform highp utexture2D NAME +#define TEXTURE_RGBA32F(SET, IDX, NAME) \ + layout(set = SET, binding = IDX) uniform highp texture2D NAME +#define TEXTURE_RGBA8(SET, IDX, NAME) \ + layout(set = SET, binding = IDX) uniform mediump texture2D NAME +#elif GLSL_VERSION >= 310 +#define TEXTURE_RGBA32UI(SET, IDX, NAME) layout(binding = IDX) uniform highp usampler2D NAME +#define TEXTURE_RGBA32F(SET, IDX, NAME) layout(binding = IDX) uniform highp sampler2D NAME +#define TEXTURE_RGBA8(SET, IDX, NAME) layout(binding = IDX) uniform mediump sampler2D NAME +#else +#define TEXTURE_RGBA32UI(SET, IDX, NAME) uniform highp usampler2D NAME +#define TEXTURE_RGBA32F(SET, IDX, NAME) uniform highp sampler2D NAME +#define TEXTURE_RGBA8(SET, IDX, NAME) uniform mediump sampler2D NAME +#endif +#define TEXTURE_RG32UI(SET, IDX, NAME) TEXTURE_RGBA32UI(SET, IDX, NAME) + +#ifdef TARGET_VULKAN +#define SAMPLER_LINEAR(TEXTURE_IDX, NAME) \ + layout(set = SAMPLER_BINDINGS_SET, binding = TEXTURE_IDX) uniform mediump sampler NAME; +#define SAMPLER_MIPMAP(TEXTURE_IDX, NAME) \ + layout(set = SAMPLER_BINDINGS_SET, binding = TEXTURE_IDX) uniform mediump sampler NAME; +#define TEXTURE_SAMPLE(NAME, SAMPLER_NAME, COORD) texture(sampler2D(NAME, SAMPLER_NAME), COORD) +#define TEXTURE_SAMPLE_LOD(NAME, SAMPLER_NAME, COORD, LOD) \ + textureLod(sampler2D(NAME, SAMPLER_NAME), COORD, LOD) +#define TEXTURE_SAMPLE_GRAD(NAME, SAMPLER_NAME, COORD, DDX, DDY) \ + textureGrad(sampler2D(NAME, SAMPLER_NAME), COORD, DDX, DDY) +#else +// SAMPLER_LINEAR and SAMPLER_MIPMAP are no-ops because in GL, sampling parameters are API-level +// state tied to the texture. +#define SAMPLER_LINEAR(TEXTURE_IDX, NAME) +#define SAMPLER_MIPMAP(TEXTURE_IDX, NAME) +#define TEXTURE_SAMPLE(NAME, SAMPLER_NAME, COORD) texture(NAME, COORD) +#define TEXTURE_SAMPLE_LOD(NAME, SAMPLER_NAME, COORD, LOD) textureLod(NAME, COORD, LOD) +#define TEXTURE_SAMPLE_GRAD(NAME, SAMPLER_NAME, COORD, DDX, DDY) textureGrad(NAME, COORD, DDX, DDY) +#endif + +#define TEXEL_FETCH(NAME, COORD) texelFetch(NAME, COORD, 0) + +#define VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +#define VERTEX_STORAGE_BUFFER_BLOCK_END + +#define FRAG_STORAGE_BUFFER_BLOCK_BEGIN +#define FRAG_STORAGE_BUFFER_BLOCK_END + +#ifdef DISABLE_SHADER_STORAGE_BUFFERS + +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) \ + TEXTURE_RGBA32UI(PER_FLUSH_BINDINGS_SET, IDX, NAME) +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + TEXTURE_RG32UI(PER_FLUSH_BINDINGS_SET, IDX, NAME) +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + TEXTURE_RGBA32F(PER_FLUSH_BINDINGS_SET, IDX, NAME) +#define STORAGE_BUFFER_LOAD4(NAME, I) \ + TEXEL_FETCH(NAME, int2((I)&STORAGE_TEXTURE_MASK_X, (I) >> STORAGE_TEXTURE_SHIFT_Y)) +#define STORAGE_BUFFER_LOAD2(NAME, I) \ + TEXEL_FETCH(NAME, int2((I)&STORAGE_TEXTURE_MASK_X, (I) >> STORAGE_TEXTURE_SHIFT_Y)).xy + +#else + +#ifdef GL_ARB_shader_storage_buffer_object +#extension GL_ARB_shader_storage_buffer_object : require +#endif +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) \ + layout(std430, binding = IDX) readonly buffer GLSL_STRUCT_NAME { uint2 _values[]; } \ + NAME +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + layout(std430, binding = IDX) readonly buffer GLSL_STRUCT_NAME { uint4 _values[]; } \ + NAME +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + layout(std430, binding = IDX) readonly buffer GLSL_STRUCT_NAME { float4 _values[]; } \ + NAME +#define STORAGE_BUFFER_LOAD4(NAME, I) NAME._values[I] +#define STORAGE_BUFFER_LOAD2(NAME, I) NAME._values[I] + +#endif // DISABLE_SHADER_STORAGE_BUFFERS + +// Define macros for implementing pixel local storage based on available extensions. +#ifdef PLS_IMPL_ANGLE + +#extension GL_ANGLE_shader_pixel_local_storage : require + +#define PLS_BLOCK_BEGIN +#define PLS_DECL4F(IDX, NAME) layout(binding = IDX, rgba8) uniform lowp pixelLocalANGLE NAME +#define PLS_DECLUI(IDX, NAME) layout(binding = IDX, r32ui) uniform highp upixelLocalANGLE NAME +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) pixelLocalLoadANGLE(PLANE) +#define PLS_LOADUI(PLANE) pixelLocalLoadANGLE(PLANE).x +#define PLS_STORE4F(PLANE, VALUE) pixelLocalStoreANGLE(PLANE, VALUE) +#define PLS_STOREUI(PLANE, VALUE) pixelLocalStoreANGLE(PLANE, uvec4(VALUE)) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif // PLS_IMPL_ANGLE + +#ifdef PLS_IMPL_EXT_NATIVE + +#extension GL_EXT_shader_pixel_local_storage : enable + +// We need one of the framebuffer fetch extensions for the shader that loads the framebuffer. +#extension GL_ARM_shader_framebuffer_fetch : enable +#extension GL_EXT_shader_framebuffer_fetch : enable + +#define PLS_BLOCK_BEGIN \ + __pixel_localEXT PLS \ + { +#define PLS_DECL4F(IDX, NAME) layout(rgba8) lowp vec4 NAME +#define PLS_DECLUI(IDX, NAME) layout(r32ui) highp uint NAME +#define PLS_BLOCK_END \ + } \ + ; + +#define PLS_LOAD4F(PLANE) PLANE +#define PLS_LOADUI(PLANE) PLANE +#define PLS_STORE4F(PLANE, VALUE) PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) PLANE = (VALUE) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif + +#ifdef PLS_IMPL_FRAMEBUFFER_FETCH + +#extension GL_EXT_shader_framebuffer_fetch : require + +#define PLS_BLOCK_BEGIN +#define PLS_DECL4F(IDX, NAME) layout(location = IDX) inout lowp vec4 NAME +#define PLS_DECLUI(IDX, NAME) layout(location = IDX) inout highp uvec4 NAME +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) PLANE +#define PLS_LOADUI(PLANE) PLANE.x +#define PLS_STORE4F(PLANE, VALUE) PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) PLANE.x = (VALUE) + +// When using multiple color attachments, we have to write a value to every color attachment, every +// shader invocation, or else the contents become undefined. +#define PLS_PRESERVE_4F(PLANE) PLS_STORE4F(PLANE, PLS_LOAD4F(PLANE)) +#define PLS_PRESERVE_UI(PLANE) PLS_STOREUI(PLANE, PLS_LOADUI(PLANE)) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif // PLS_IMPL_FRAMEBUFFER_FETCH + +#ifdef PLS_IMPL_STORAGE_TEXTURE + +#ifdef GL_ARB_shader_image_load_store +#extension GL_ARB_shader_image_load_store : require +#endif +#if defined(GL_ARB_fragment_shader_interlock) +#extension GL_ARB_fragment_shader_interlock : require +#define PLS_INTERLOCK_BEGIN beginInvocationInterlockARB() +#define PLS_INTERLOCK_END endInvocationInterlockARB() +#elif defined(GL_INTEL_fragment_shader_ordering) +#extension GL_INTEL_fragment_shader_ordering : require +#define PLS_INTERLOCK_BEGIN beginFragmentShaderOrderingINTEL() +#define PLS_INTERLOCK_END +#else +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END +#endif + +#define PLS_BLOCK_BEGIN +#ifdef TARGET_VULKAN +#define PLS_DECL4F(IDX, NAME) \ + layout(set = PLS_TEXTURE_BINDINGS_SET, binding = IDX, rgba8) uniform lowp coherent image2D NAME +#define PLS_DECLUI(IDX, NAME) \ + layout(set = PLS_TEXTURE_BINDINGS_SET, binding = IDX, r32ui) \ + uniform highp coherent uimage2D NAME +#else +#define PLS_DECL4F(IDX, NAME) layout(binding = IDX, rgba8) uniform lowp coherent image2D NAME +#define PLS_DECLUI(IDX, NAME) layout(binding = IDX, r32ui) uniform highp coherent uimage2D NAME +#endif +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) imageLoad(PLANE, _plsCoord) +#define PLS_LOADUI(PLANE) imageLoad(PLANE, _plsCoord).x +#define PLS_STORE4F(PLANE, VALUE) imageStore(PLANE, _plsCoord, VALUE) +#define PLS_STOREUI(PLANE, VALUE) imageStore(PLANE, _plsCoord, uvec4(VALUE)) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#ifndef USING_PLS_STORAGE_TEXTURES +#define USING_PLS_STORAGE_TEXTURES + +#endif // PLS_IMPL_STORAGE_TEXTURE + +#endif // PLS_IMPL_STORAGE_TEXTURE + +#ifdef PLS_IMPL_SUBPASS_LOAD + +#define PLS_BLOCK_BEGIN +#define PLS_DECL4F(IDX, NAME) \ + layout(input_attachment_index = IDX, binding = IDX, set = PLS_TEXTURE_BINDINGS_SET) \ + uniform lowp subpassInput _in_##NAME; \ + layout(location = IDX) out lowp vec4 NAME +#define PLS_DECLUI(IDX, NAME) \ + layout(input_attachment_index = IDX, binding = IDX, set = PLS_TEXTURE_BINDINGS_SET) \ + uniform highp usubpassInput _in_##NAME; \ + layout(location = IDX) out highp uvec4 NAME +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) subpassLoad(_in_##PLANE) +#define PLS_LOADUI(PLANE) subpassLoad(_in_##PLANE).x +#define PLS_STORE4F(PLANE, VALUE) PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) PLANE.x = (VALUE) + +#define PLS_PRESERVE_4F(PLANE) PLS_STORE4F(PLANE, subpassLoad(_in_##PLANE)) +#define PLS_PRESERVE_UI(PLANE) PLS_STOREUI(PLANE, subpassLoad(_in_##PLANE).x) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif + +#ifdef PLS_IMPL_NONE + +#define PLS_BLOCK_BEGIN +#define PLS_DECL4F(IDX, NAME) layout(location = IDX) out lowp vec4 NAME +#define PLS_DECLUI(IDX, NAME) layout(location = IDX) out highp uvec4 NAME +#define PLS_BLOCK_END + +#define PLS_LOAD4F(PLANE) vec4(0) +#define PLS_LOADUI(PLANE) 0u +#define PLS_STORE4F(PLANE, VALUE) PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) PLANE.x = (VALUE) + +#define PLS_PRESERVE_4F(PLANE) PLANE = vec4(1, 0, 1, 1) +#define PLS_PRESERVE_UI(PLANE) PLANE.x = 0u + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#endif + +#ifdef TARGET_VULKAN +#define gl_VertexID gl_VertexIndex +#endif + +// clang-format off +#ifdef ENABLE_INSTANCE_INDEX +#ifdef TARGET_VULKAN +#define INSTANCE_INDEX gl_InstanceIndex +#else +#ifdef ENABLE_SPIRV_CROSS_BASE_INSTANCE + // This uniform is specifically named "SPIRV_Cross_BaseInstance" for compatibility with + // SPIRV-Cross sytems that search for it by name. + uniform int SPIRV_Cross_BaseInstance; +#define INSTANCE_INDEX (gl_InstanceID + SPIRV_Cross_BaseInstance) +#else +#define INSTANCE_INDEX (gl_InstanceID + gl_BaseInstance) +#endif +#endif +#else +#define INSTANCE_INDEX 0 +#endif +// clang-format on + +#define VERTEX_CONTEXT_DECL +#define VERTEX_CONTEXT_UNPACK + +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + void main() \ + { \ + int _vertexID = gl_VertexID; \ + int _instanceID = INSTANCE_INDEX; + +#define IMAGE_RECT_VERTEX_MAIN VERTEX_MAIN + +#define IMAGE_MESH_VERTEX_MAIN(NAME, PositionAttr, position, UVAttr, uv, _vertexID) \ + VERTEX_MAIN(NAME, PositionAttr, position, _vertexID, _instanceID) + +#define VARYING_INIT(NAME, TYPE) +#define VARYING_PACK(NAME) +#define VARYING_UNPACK(NAME, TYPE) + +#define EMIT_VERTEX(_pos) \ + gl_Position = _pos; \ + } + +#define FRAG_DATA_MAIN(DATA_TYPE, NAME) \ + layout(location = 0) out DATA_TYPE _fd; \ + void main() + +#define EMIT_FRAG_DATA(VALUE) _fd = VALUE + +#define _fragCoord gl_FragCoord.xy + +#define FRAGMENT_CONTEXT_DECL +#define FRAGMENT_CONTEXT_UNPACK + +#ifdef USING_PLS_STORAGE_TEXTURES + +#ifdef TARGET_VULKAN +#define PLS_DECLUI_ATOMIC(IDX, NAME) \ + layout(set = PLS_TEXTURE_BINDINGS_SET, binding = IDX, r32ui) \ + uniform highp coherent uimage2D NAME +#else +#define PLS_DECLUI_ATOMIC(IDX, NAME) \ + layout(binding = IDX, r32ui) uniform highp coherent uimage2D NAME +#endif +#define PLS_LOADUI_ATOMIC(PLANE) imageLoad(PLANE, _plsCoord).x +#define PLS_STOREUI_ATOMIC(PLANE, VALUE) imageStore(PLANE, _plsCoord, uvec4(VALUE)) +#define PLS_ATOMIC_MAX(PLANE, X) imageAtomicMax(PLANE, _plsCoord, X) +#define PLS_ATOMIC_ADD(PLANE, X) imageAtomicAdd(PLANE, _plsCoord, X) + +#define PLS_CONTEXT_DECL , int2 _plsCoord +#define PLS_CONTEXT_UNPACK , _plsCoord + +#define PLS_MAIN(NAME) \ + void main() \ + { \ + int2 _plsCoord = ivec2(floor(_fragCoord)); + +#define EMIT_PLS } + +#else // !USING_PLS_STORAGE_TEXTURES + +#define PLS_CONTEXT_DECL +#define PLS_CONTEXT_UNPACK + +#define PLS_MAIN(NAME) void main() +#define EMIT_PLS + +#endif // !USING_PLS_STORAGE_TEXTURES + +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_MAIN(NAME) + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + layout(location = 0) out half4 _fragColor; \ + PLS_MAIN(NAME) + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + layout(location = 0) out half4 _fragColor; \ + PLS_MAIN(NAME) + +#define EMIT_PLS_AND_FRAG_COLOR EMIT_PLS + +#define MUL(A, B) ((A) * (B)) + +#ifndef TARGET_VULKAN +#define FRAG_COORD_BOTTOM_UP +#endif + +precision highp float; +precision highp int; + +#if GLSL_VERSION < 310 +// Polyfill ES 3.1+ methods. +INLINE half4 unpackUnorm4x8(uint u) +{ + uint4 vals = uint4(u & 0xffu, (u >> 8) & 0xffu, (u >> 16) & 0xffu, u >> 24); + return float4(vals) * (1. / 255.); +} +#endif diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.stamp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/glsl.stamp new file mode 100644 index 00000000..e69de29b diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/hlsl.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/hlsl.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/hlsl.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/hlsl.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/hlsl.glsl.hpp new file mode 100644 index 00000000..deea2321 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/hlsl.glsl.hpp @@ -0,0 +1,396 @@ +#pragma once + +#include "hlsl.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char hlsl[] = R"===(/* + * Copyright 2023 Rive + */ + +// This header provides GLSL-specific #defines and declarations that enable our shaders to be +// compiled on MSL and GLSL both. + +// HLSL warns that it will unroll the loops through r,g,b values in advanced_blend.glsl, but +// unrolling these loops is exactly what we want. +#pragma warning(disable : 3550) + +// Don't warn about uninitialized variables. If we leave one uninitialized it's because we know what +// we're doing and don't want to pay the cost of initializing it. +#pragma warning(disable : 4000) + +// #define native hlsl types if their names are being rewritten. +#define _ARE_TOKEN_NAMES_PRESERVED +#ifndef _ARE_TOKEN_NAMES_PRESERVED +#define half half +#define half2 half2 +#define half3 half3 +#define half4 half4 +#define short short +#define short2 short2 +#define short3 short3 +#define short4 short4 +#define ushort ushort +#define ushort2 ushort2 +#define ushort3 ushort3 +#define ushort4 ushort4 +#define float2 float2 +#define float3 float3 +#define float4 float4 +#define bool2 bool2 +#define bool3 bool3 +#define bool4 bool4 +#define uint2 uint2 +#define uint3 uint3 +#define uint4 uint4 +#define int2 int2 +#define int3 int3 +#define int4 int4 +#define float4x2 float4x2 +#define ushort ushort +#define float2x2 float2x2 +#define half3x4 half3x4 +#endif + +typedef float3 packed_float3; + +#ifdef _EXPORTED_ENABLE_MIN_16_PRECISION + +typedef min16int short; + +typedef min16uint ushort; + +#else + +typedef int short; + +typedef uint ushort; + +#endif + +#define INLINE inline +#define OUT(ARG_TYPE) out ARG_TYPE + +#define ATTR_BLOCK_BEGIN(NAME) \ + struct NAME \ + { +#define ATTR(IDX, TYPE, NAME) TYPE NAME : NAME +#define ATTR_BLOCK_END \ + } \ + ; +#define ATTR_LOAD(T, A, N, I) +#define ATTR_UNPACK(ID, attrs, NAME, TYPE) TYPE NAME = attrs.NAME + +#define UNIFORM_BUFFER_REGISTER(IDX) register(b##IDX) + +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + cbuffer NAME : UNIFORM_BUFFER_REGISTER(IDX) \ + { \ + struct \ + { + +#define UNIFORM_BLOCK_END(NAME) \ + } \ + NAME; \ + } + +#define VARYING_BLOCK_BEGIN \ + struct Varyings \ + { + +#define NO_PERSPECTIVE noperspective +#define _EXPORTED_OPTIONALLY_FLAT nointerpolation +#define FLAT nointerpolation +#define VARYING(IDX, TYPE, NAME) TYPE NAME : TEXCOORD##IDX + +#define VARYING_BLOCK_END \ + float4 _pos : SV_Position; \ + } \ + ; + +#define VARYING_INIT(NAME, TYPE) TYPE NAME +#define VARYING_PACK(NAME) _varyings.NAME = NAME +#define VARYING_UNPACK(NAME, TYPE) TYPE NAME = _varyings.NAME + +#ifdef _EXPORTED_VERTEX +#define VERTEX_TEXTURE_BLOCK_BEGIN +#define VERTEX_TEXTURE_BLOCK_END +#endif + +#ifdef _EXPORTED_FRAGMENT +#define FRAG_TEXTURE_BLOCK_BEGIN +#define FRAG_TEXTURE_BLOCK_END +#endif + +#define TEXTURE_RGBA32UI(SET, IDX, NAME) uniform Texture2D NAME : register(t##IDX) +#define TEXTURE_RGBA32F(SET, IDX, NAME) uniform Texture2D NAME : register(t##IDX) +#define TEXTURE_RGBA8(SET, IDX, NAME) uniform Texture2D NAME : register(t##IDX) + +// SAMPLER_LINEAR and SAMPLER_MIPMAP are the same because in d3d11, sampler parameters are defined +// at the API level. +#define SAMPLER(TEXTURE_IDX, NAME) SamplerState NAME : register(s##TEXTURE_IDX); +#define SAMPLER_LINEAR SAMPLER +#define SAMPLER_MIPMAP SAMPLER + +#define TEXEL_FETCH(NAME, COORD) NAME[COORD] +#define TEXTURE_SAMPLE(NAME, SAMPLER_NAME, COORD) NAME.Sample(SAMPLER_NAME, COORD) +#define TEXTURE_SAMPLE_LOD(NAME, SAMPLER_NAME, COORD, LOD) \ + NAME.SampleLevel(SAMPLER_NAME, COORD, LOD) +#define TEXTURE_SAMPLE_GRAD(NAME, SAMPLER_NAME, COORD, DDX, DDY) \ + NAME.SampleGrad(SAMPLER_NAME, COORD, DDX, DDY) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#ifdef _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define PLS_TEX2D RasterizerOrderedTexture2D +#else +#define PLS_TEX2D RWTexture2D +#endif + +#define PLS_BLOCK_BEGIN +#ifdef _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_DECL4F(IDX, NAME) uniform PLS_TEX2D NAME : register(u##IDX) +#else +#define PLS_DECL4F(IDX, NAME) uniform PLS_TEX2D NAME : register(u##IDX) +#endif +#define PLS_DECLUI(IDX, NAME) uniform PLS_TEX2D NAME : register(u##IDX) +#define PLS_DECLUI_ATOMIC PLS_DECLUI +#define PLS_LOADUI_ATOMIC PLS_LOADUI +#define PLS_STOREUI_ATOMIC PLS_STOREUI +#define PLS_BLOCK_END + +#ifdef _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_LOAD4F(PLANE) PLANE[_plsCoord] +#else +#define PLS_LOAD4F(PLANE) unpackUnorm4x8(PLANE[_plsCoord]) +#endif +#define PLS_LOADUI(PLANE) PLANE[_plsCoord] +#ifdef _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_STORE4F(PLANE, VALUE) PLANE[_plsCoord] = (VALUE) +#else +#define PLS_STORE4F(PLANE, VALUE) PLANE[_plsCoord] = packUnorm4x8(VALUE) +#endif +#define PLS_STOREUI(PLANE, VALUE) PLANE[_plsCoord] = (VALUE) + +INLINE uint pls_atomic_max(PLS_TEX2D plane, int2 _plsCoord, uint x) +{ + uint originalValue; + InterlockedMax(plane[_plsCoord], x, originalValue); + return originalValue; +} + +#define PLS_ATOMIC_MAX(PLANE, X) pls_atomic_max(PLANE, _plsCoord, X) + +INLINE uint pls_atomic_add(PLS_TEX2D plane, int2 _plsCoord, uint x) +{ + uint originalValue; + InterlockedAdd(plane[_plsCoord], x, originalValue); + return originalValue; +} + +#define PLS_ATOMIC_ADD(PLANE, X) pls_atomic_add(PLANE, _plsCoord, X) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define VERTEX_CONTEXT_DECL +#define VERTEX_CONTEXT_UNPACK + +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + cbuffer DrawUniforms : UNIFORM_BUFFER_REGISTER(PATH_BASE_INSTANCE_UNIFORM_BUFFER_IDX) \ + { \ + uint baseInstance; \ + uint NAME##_pad0; \ + uint NAME##_pad1; \ + uint NAME##_pad2; \ + } \ + Varyings NAME(Attrs attrs, uint _vertexID \ + : SV_VertexID, uint _instanceIDWithoutBase \ + : SV_InstanceID) \ + { \ + uint _instanceID = _instanceIDWithoutBase + baseInstance; \ + Varyings _varyings; + +#define IMAGE_RECT_VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + Varyings NAME(Attrs attrs, uint _vertexID : SV_VertexID) \ + { \ + Varyings _varyings; \ + float4 _pos; + +#define IMAGE_MESH_VERTEX_MAIN(NAME, PositionAttr, position, UVAttr, uv, _vertexID) \ + Varyings NAME(PositionAttr position, UVAttr uv, uint _vertexID : SV_VertexID) \ + { \ + Varyings _varyings; \ + float4 _pos; + +#define EMIT_VERTEX(POSITION) \ + _varyings._pos = POSITION; \ + } \ + return _varyings; + +#define FRAG_DATA_MAIN(DATA_TYPE, NAME) \ + DATA_TYPE NAME(Varyings _varyings) : SV_Target \ + { + +#define EMIT_FRAG_DATA(VALUE) \ + return VALUE; \ + } + +#define FRAGMENT_CONTEXT_DECL , float2 _fragCoord +#define FRAGMENT_CONTEXT_UNPACK , _fragCoord + +#define PLS_CONTEXT_DECL , int2 _plsCoord +#define PLS_CONTEXT_UNPACK , _plsCoord + +#define PLS_MAIN(NAME) [earlydepthstencil] void NAME(Varyings _varyings) { \ + float2 _fragCoord = _varyings._pos.xy;\ + int2 _plsCoord = int2(floor(_fragCoord)); + +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_MAIN(NAME) + +#define EMIT_PLS } + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + [earlydepthstencil] half4 NAME(Varyings _varyings) : SV_Target \ + { \ + float2 _fragCoord = _varyings._pos.xy; \ + int2 _plsCoord = int2(floor(_fragCoord)); \ + half4 _fragColor; + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_FRAG_COLOR_MAIN(NAME) + +#define EMIT_PLS_AND_FRAG_COLOR \ + } \ + return _fragColor; + +#define uintBitsToFloat asfloat +#define intBitsToFloat asfloat +#define floatBitsToInt asint +#define floatBitsToUint asuint +#define inversesqrt rsqrt +#define notEqual(A, B) ((A) != (B)) +#define lessThanEqual(A, B) ((A) <= (B)) +#define greaterThanEqual(A, B) ((A) >= (B)) + +// HLSL matrices are stored in row-major order, and therefore transposed from their counterparts +// in GLSL and Metal. We can work around this entirely by reversing the arguments to mul(). +#define MUL(A, B) mul(B, A) + +#define VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +#define VERTEX_STORAGE_BUFFER_BLOCK_END + +#define FRAG_STORAGE_BUFFER_BLOCK_BEGIN +#define FRAG_STORAGE_BUFFER_BLOCK_END + +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(t##IDX) +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(t##IDX) +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(t##IDX) + +#define STORAGE_BUFFER_LOAD4(NAME, I) NAME[I] +#define STORAGE_BUFFER_LOAD2(NAME, I) NAME[I] + +INLINE half2 unpackHalf2x16(uint u) +{ + uint y = (u >> 16); + uint x = u & 0xffffu; + return half2(f16tof32(x), f16tof32(y)); +} + +INLINE uint packHalf2x16(float2 v) +{ + uint x = f32tof16(v.x); + uint y = f32tof16(v.y); + return (y << 16) | x; +} + +INLINE half4 unpackUnorm4x8(uint u) +{ + uint4 vals = uint4(u & 0xffu, (u >> 8) & 0xffu, (u >> 16) & 0xffu, u >> 24); + return half4(vals) * (1. / 255.); +} + +INLINE uint packUnorm4x8(half4 color) +{ + uint4 vals = (uint4(color * 255.) & 0xff) << uint4(0, 8, 16, 24); + vals.xy |= vals.zw; + vals.x |= vals.y; + return vals.x; +} + +INLINE float atan(float y, float x) { return atan2(y, x); } + +INLINE float2x2 inverse(float2x2 m) +{ + float2x2 adjoint = float2x2(m[1][1], -m[0][1], -m[1][0], m[0][0]); + return adjoint * (1. / determinant(m)); +} + +// Redirects for intrinsics that have different names in HLSL + +INLINE float mix(float x, float y, float s) { return lerp(x, y, s); } +INLINE float2 mix(float2 x, float2 y, float2 s) { return lerp(x, y, s); } +INLINE float3 mix(float3 x, float3 y, float3 s) { return lerp(x, y, s); } +INLINE float4 mix(float4 x, float4 y, float4 s) { return lerp(x, y, s); } + +INLINE half mix(half x, half y, half s) { return x + s * (y - x); } +INLINE half2 mix(half2 x, half2 y, half2 s) { return x + s * (y - x); } +INLINE half3 mix(half3 x, half3 y, half3 s) { return x + s * (y - x); } +INLINE half4 mix(half4 x, half4 y, half4 s) { return x + s * (y - x); } + +INLINE float fract(float x) { return frac(x); } +INLINE float2 fract(float2 x) { return frac(x); } +INLINE float3 fract(float3 x) { return frac(x); } +INLINE float4 fract(float4 x) { return frac(x); } + +INLINE half fract(half x) { return frac(x); } +INLINE half2 fract(half2 x) { return half2(frac(x)); } +INLINE half3 fract(half3 x) { return half3(frac(x)); } +INLINE half4 fract(half4 x) { return half4(frac(x)); } + +// Reimplement intrinsics for half types. +// This shadows the intrinsic function for floats, so we also have to declare that overload. + +INLINE half rive_sign(half x) { return sign(x); } +INLINE half2 rive_sign(half2 x) { return half2(sign(x)); } +INLINE half3 rive_sign(half3 x) { return half3(sign(x)); } +INLINE half4 rive_sign(half4 x) { return half4(sign(x)); } + +INLINE float rive_sign(float x) { return sign(x); } +INLINE float2 rive_sign(float2 x) { return sign(x); } +INLINE float3 rive_sign(float3 x) { return sign(x); } +INLINE float4 rive_sign(float4 x) { return sign(x); } + +#define sign rive_sign + +INLINE half rive_abs(half x) { return abs(x); } +INLINE half2 rive_abs(half2 x) { return half2(abs(x)); } +INLINE half3 rive_abs(half3 x) { return half3(abs(x)); } +INLINE half4 rive_abs(half4 x) { return half4(abs(x)); } + +INLINE float rive_abs(float x) { return abs(x); } +INLINE float2 rive_abs(float2 x) { return abs(x); } +INLINE float3 rive_abs(float3 x) { return abs(x); } +INLINE float4 rive_abs(float4 x) { return abs(x); } + +#define abs rive_abs + +INLINE half rive_sqrt(half x) { return sqrt(x); } +INLINE half2 rive_sqrt(half2 x) { return half2(sqrt(x)); } +INLINE half3 rive_sqrt(half3 x) { return half3(sqrt(x)); } +INLINE half4 rive_sqrt(half4 x) { return half4(sqrt(x)); } + +INLINE float rive_sqrt(float x) { return sqrt(x); } +INLINE float2 rive_sqrt(float2 x) { return sqrt(x); } +INLINE float3 rive_sqrt(float3 x) { return sqrt(x); } +INLINE float4 rive_sqrt(float4 x) { return sqrt(x); } + +#define sqrt rive_sqrt +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/hlsl.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/hlsl.minified.ush new file mode 100644 index 00000000..5d1a97cb --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/hlsl.minified.ush @@ -0,0 +1,385 @@ +/* + * Copyright 2023 Rive + */ + +// This header provides GLSL-specific #defines and declarations that enable our shaders to be +// compiled on MSL and GLSL both. + +// HLSL warns that it will unroll the loops through r,g,b values in advanced_blend.glsl, but +// unrolling these loops is exactly what we want. +#pragma warning(disable : 3550) + +// Don't warn about uninitialized variables. If we leave one uninitialized it's because we know what +// we're doing and don't want to pay the cost of initializing it. +#pragma warning(disable : 4000) + +// #define native hlsl types if their names are being rewritten. +#define _ARE_TOKEN_NAMES_PRESERVED +#ifndef _ARE_TOKEN_NAMES_PRESERVED +#define half half +#define half2 half2 +#define half3 half3 +#define half4 half4 +#define short short +#define short2 short2 +#define short3 short3 +#define short4 short4 +#define ushort ushort +#define ushort2 ushort2 +#define ushort3 ushort3 +#define ushort4 ushort4 +#define float2 float2 +#define float3 float3 +#define float4 float4 +#define bool2 bool2 +#define bool3 bool3 +#define bool4 bool4 +#define uint2 uint2 +#define uint3 uint3 +#define uint4 uint4 +#define int2 int2 +#define int3 int3 +#define int4 int4 +#define float4x2 float4x2 +#define ushort ushort +#define float2x2 float2x2 +#define half3x4 half3x4 +#endif + +typedef float3 packed_float3; + +#ifdef ENABLE_MIN_16_PRECISION + +typedef min16int short; + +typedef min16uint ushort; + +#else + +typedef int short; + +typedef uint ushort; + +#endif + +#define INLINE inline +#define OUT(ARG_TYPE) out ARG_TYPE + +#define ATTR_BLOCK_BEGIN(NAME) \ + struct NAME \ + { +#define ATTR(IDX, TYPE, NAME) TYPE NAME : NAME +#define ATTR_BLOCK_END \ + } \ + ; +#define ATTR_LOAD(T, A, N, I) +#define ATTR_UNPACK(ID, attrs, NAME, TYPE) TYPE NAME = attrs.NAME + +#define UNIFORM_BUFFER_REGISTER(IDX) register(b##IDX) + +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + cbuffer NAME : UNIFORM_BUFFER_REGISTER(IDX) \ + { \ + struct \ + { + +#define UNIFORM_BLOCK_END(NAME) \ + } \ + NAME; \ + } + +#define VARYING_BLOCK_BEGIN \ + struct Varyings \ + { + +#define NO_PERSPECTIVE noperspective +#define OPTIONALLY_FLAT nointerpolation +#define FLAT nointerpolation +#define VARYING(IDX, TYPE, NAME) TYPE NAME : TEXCOORD##IDX + +#define VARYING_BLOCK_END \ + float4 _pos : SV_Position; \ + } \ + ; + +#define VARYING_INIT(NAME, TYPE) TYPE NAME +#define VARYING_PACK(NAME) _varyings.NAME = NAME +#define VARYING_UNPACK(NAME, TYPE) TYPE NAME = _varyings.NAME + +#ifdef VERTEX +#define VERTEX_TEXTURE_BLOCK_BEGIN +#define VERTEX_TEXTURE_BLOCK_END +#endif + +#ifdef FRAGMENT +#define FRAG_TEXTURE_BLOCK_BEGIN +#define FRAG_TEXTURE_BLOCK_END +#endif + +#define TEXTURE_RGBA32UI(SET, IDX, NAME) uniform Texture2D NAME : register(t##IDX) +#define TEXTURE_RGBA32F(SET, IDX, NAME) uniform Texture2D NAME : register(t##IDX) +#define TEXTURE_RGBA8(SET, IDX, NAME) uniform Texture2D NAME : register(t##IDX) + +// SAMPLER_LINEAR and SAMPLER_MIPMAP are the same because in d3d11, sampler parameters are defined +// at the API level. +#define SAMPLER(TEXTURE_IDX, NAME) SamplerState NAME : register(s##TEXTURE_IDX); +#define SAMPLER_LINEAR SAMPLER +#define SAMPLER_MIPMAP SAMPLER + +#define TEXEL_FETCH(NAME, COORD) NAME[COORD] +#define TEXTURE_SAMPLE(NAME, SAMPLER_NAME, COORD) NAME.Sample(SAMPLER_NAME, COORD) +#define TEXTURE_SAMPLE_LOD(NAME, SAMPLER_NAME, COORD, LOD) \ + NAME.SampleLevel(SAMPLER_NAME, COORD, LOD) +#define TEXTURE_SAMPLE_GRAD(NAME, SAMPLER_NAME, COORD, DDX, DDY) \ + NAME.SampleGrad(SAMPLER_NAME, COORD, DDX, DDY) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#ifdef ENABLE_RASTERIZER_ORDERED_VIEWS +#define PLS_TEX2D RasterizerOrderedTexture2D +#else +#define PLS_TEX2D RWTexture2D +#endif + +#define PLS_BLOCK_BEGIN +#ifdef ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_DECL4F(IDX, NAME) uniform PLS_TEX2D NAME : register(u##IDX) +#else +#define PLS_DECL4F(IDX, NAME) uniform PLS_TEX2D NAME : register(u##IDX) +#endif +#define PLS_DECLUI(IDX, NAME) uniform PLS_TEX2D NAME : register(u##IDX) +#define PLS_DECLUI_ATOMIC PLS_DECLUI +#define PLS_LOADUI_ATOMIC PLS_LOADUI +#define PLS_STOREUI_ATOMIC PLS_STOREUI +#define PLS_BLOCK_END + +#ifdef ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_LOAD4F(PLANE) PLANE[_plsCoord] +#else +#define PLS_LOAD4F(PLANE) unpackUnorm4x8(PLANE[_plsCoord]) +#endif +#define PLS_LOADUI(PLANE) PLANE[_plsCoord] +#ifdef ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_STORE4F(PLANE, VALUE) PLANE[_plsCoord] = (VALUE) +#else +#define PLS_STORE4F(PLANE, VALUE) PLANE[_plsCoord] = packUnorm4x8(VALUE) +#endif +#define PLS_STOREUI(PLANE, VALUE) PLANE[_plsCoord] = (VALUE) + +INLINE uint pls_atomic_max(PLS_TEX2D plane, int2 _plsCoord, uint x) +{ + uint originalValue; + InterlockedMax(plane[_plsCoord], x, originalValue); + return originalValue; +} + +#define PLS_ATOMIC_MAX(PLANE, X) pls_atomic_max(PLANE, _plsCoord, X) + +INLINE uint pls_atomic_add(PLS_TEX2D plane, int2 _plsCoord, uint x) +{ + uint originalValue; + InterlockedAdd(plane[_plsCoord], x, originalValue); + return originalValue; +} + +#define PLS_ATOMIC_ADD(PLANE, X) pls_atomic_add(PLANE, _plsCoord, X) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define VERTEX_CONTEXT_DECL +#define VERTEX_CONTEXT_UNPACK + +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + cbuffer DrawUniforms : UNIFORM_BUFFER_REGISTER(PATH_BASE_INSTANCE_UNIFORM_BUFFER_IDX) \ + { \ + uint baseInstance; \ + uint NAME##_pad0; \ + uint NAME##_pad1; \ + uint NAME##_pad2; \ + } \ + Varyings NAME(Attrs attrs, uint _vertexID \ + : SV_VertexID, uint _instanceIDWithoutBase \ + : SV_InstanceID) \ + { \ + uint _instanceID = _instanceIDWithoutBase + baseInstance; \ + Varyings _varyings; + +#define IMAGE_RECT_VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + Varyings NAME(Attrs attrs, uint _vertexID : SV_VertexID) \ + { \ + Varyings _varyings; \ + float4 _pos; + +#define IMAGE_MESH_VERTEX_MAIN(NAME, PositionAttr, position, UVAttr, uv, _vertexID) \ + Varyings NAME(PositionAttr position, UVAttr uv, uint _vertexID : SV_VertexID) \ + { \ + Varyings _varyings; \ + float4 _pos; + +#define EMIT_VERTEX(POSITION) \ + _varyings._pos = POSITION; \ + } \ + return _varyings; + +#define FRAG_DATA_MAIN(DATA_TYPE, NAME) \ + DATA_TYPE NAME(Varyings _varyings) : SV_Target \ + { + +#define EMIT_FRAG_DATA(VALUE) \ + return VALUE; \ + } + +#define FRAGMENT_CONTEXT_DECL , float2 _fragCoord +#define FRAGMENT_CONTEXT_UNPACK , _fragCoord + +#define PLS_CONTEXT_DECL , int2 _plsCoord +#define PLS_CONTEXT_UNPACK , _plsCoord + +#define PLS_MAIN(NAME) [earlydepthstencil] void NAME(Varyings _varyings) { \ + float2 _fragCoord = _varyings._pos.xy;\ + int2 _plsCoord = int2(floor(_fragCoord)); + +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_MAIN(NAME) + +#define EMIT_PLS } + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + [earlydepthstencil] half4 NAME(Varyings _varyings) : SV_Target \ + { \ + float2 _fragCoord = _varyings._pos.xy; \ + int2 _plsCoord = int2(floor(_fragCoord)); \ + half4 _fragColor; + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_FRAG_COLOR_MAIN(NAME) + +#define EMIT_PLS_AND_FRAG_COLOR \ + } \ + return _fragColor; + +#define uintBitsToFloat asfloat +#define intBitsToFloat asfloat +#define floatBitsToInt asint +#define floatBitsToUint asuint +#define inversesqrt rsqrt +#define notEqual(A, B) ((A) != (B)) +#define lessThanEqual(A, B) ((A) <= (B)) +#define greaterThanEqual(A, B) ((A) >= (B)) + +// HLSL matrices are stored in row-major order, and therefore transposed from their counterparts +// in GLSL and Metal. We can work around this entirely by reversing the arguments to mul(). +#define MUL(A, B) mul(B, A) + +#define VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +#define VERTEX_STORAGE_BUFFER_BLOCK_END + +#define FRAG_STORAGE_BUFFER_BLOCK_BEGIN +#define FRAG_STORAGE_BUFFER_BLOCK_END + +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(t##IDX) +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(t##IDX) +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(t##IDX) + +#define STORAGE_BUFFER_LOAD4(NAME, I) NAME[I] +#define STORAGE_BUFFER_LOAD2(NAME, I) NAME[I] + +INLINE half2 unpackHalf2x16(uint u) +{ + uint y = (u >> 16); + uint x = u & 0xffffu; + return half2(f16tof32(x), f16tof32(y)); +} + +INLINE uint packHalf2x16(float2 v) +{ + uint x = f32tof16(v.x); + uint y = f32tof16(v.y); + return (y << 16) | x; +} + +INLINE half4 unpackUnorm4x8(uint u) +{ + uint4 vals = uint4(u & 0xffu, (u >> 8) & 0xffu, (u >> 16) & 0xffu, u >> 24); + return half4(vals) * (1. / 255.); +} + +INLINE uint packUnorm4x8(half4 color) +{ + uint4 vals = (uint4(color * 255.) & 0xff) << uint4(0, 8, 16, 24); + vals.xy |= vals.zw; + vals.x |= vals.y; + return vals.x; +} + +INLINE float atan(float y, float x) { return atan2(y, x); } + +INLINE float2x2 inverse(float2x2 m) +{ + float2x2 adjoint = float2x2(m[1][1], -m[0][1], -m[1][0], m[0][0]); + return adjoint * (1. / determinant(m)); +} + +// Redirects for intrinsics that have different names in HLSL + +INLINE float mix(float x, float y, float s) { return lerp(x, y, s); } +INLINE float2 mix(float2 x, float2 y, float2 s) { return lerp(x, y, s); } +INLINE float3 mix(float3 x, float3 y, float3 s) { return lerp(x, y, s); } +INLINE float4 mix(float4 x, float4 y, float4 s) { return lerp(x, y, s); } + +INLINE half mix(half x, half y, half s) { return x + s * (y - x); } +INLINE half2 mix(half2 x, half2 y, half2 s) { return x + s * (y - x); } +INLINE half3 mix(half3 x, half3 y, half3 s) { return x + s * (y - x); } +INLINE half4 mix(half4 x, half4 y, half4 s) { return x + s * (y - x); } + +INLINE float fract(float x) { return frac(x); } +INLINE float2 fract(float2 x) { return frac(x); } +INLINE float3 fract(float3 x) { return frac(x); } +INLINE float4 fract(float4 x) { return frac(x); } + +INLINE half fract(half x) { return frac(x); } +INLINE half2 fract(half2 x) { return half2(frac(x)); } +INLINE half3 fract(half3 x) { return half3(frac(x)); } +INLINE half4 fract(half4 x) { return half4(frac(x)); } + +// Reimplement intrinsics for half types. +// This shadows the intrinsic function for floats, so we also have to declare that overload. + +INLINE half rive_sign(half x) { return sign(x); } +INLINE half2 rive_sign(half2 x) { return half2(sign(x)); } +INLINE half3 rive_sign(half3 x) { return half3(sign(x)); } +INLINE half4 rive_sign(half4 x) { return half4(sign(x)); } + +INLINE float rive_sign(float x) { return sign(x); } +INLINE float2 rive_sign(float2 x) { return sign(x); } +INLINE float3 rive_sign(float3 x) { return sign(x); } +INLINE float4 rive_sign(float4 x) { return sign(x); } + +#define sign rive_sign + +INLINE half rive_abs(half x) { return abs(x); } +INLINE half2 rive_abs(half2 x) { return half2(abs(x)); } +INLINE half3 rive_abs(half3 x) { return half3(abs(x)); } +INLINE half4 rive_abs(half4 x) { return half4(abs(x)); } + +INLINE float rive_abs(float x) { return abs(x); } +INLINE float2 rive_abs(float2 x) { return abs(x); } +INLINE float3 rive_abs(float3 x) { return abs(x); } +INLINE float4 rive_abs(float4 x) { return abs(x); } + +#define abs rive_abs + +INLINE half rive_sqrt(half x) { return sqrt(x); } +INLINE half2 rive_sqrt(half2 x) { return half2(sqrt(x)); } +INLINE half3 rive_sqrt(half3 x) { return half3(sqrt(x)); } +INLINE half4 rive_sqrt(half4 x) { return half4(sqrt(x)); } + +INLINE float rive_sqrt(float x) { return sqrt(x); } +INLINE float2 rive_sqrt(float2 x) { return sqrt(x); } +INLINE float3 rive_sqrt(float3 x) { return sqrt(x); } +INLINE float4 rive_sqrt(float4 x) { return sqrt(x); } + +#define sqrt rive_sqrt diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/metal.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/metal.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/metal.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/metal.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/metal.glsl.hpp new file mode 100644 index 00000000..3c9d39fc --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/metal.glsl.hpp @@ -0,0 +1,458 @@ +#pragma once + +#include "metal.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char metal[] = R"===(/* + * Copyright 2023 Rive + */ + +// This header provides Metal-specific #defines and declarations that enable our shaders to be +// compiled on MSL and GLSL both. + +#define METAL + +// #define native metal types if their names are being rewritten. +#define _ARE_TOKEN_NAMES_PRESERVED +#ifndef _ARE_TOKEN_NAMES_PRESERVED +#define half half +#define half2 half2 +#define half3 half3 +#define half4 half4 +#define short short +#define short2 short2 +#define short3 short3 +#define short4 short4 +#define ushort ushort +#define ushort2 ushort2 +#define ushort3 ushort3 +#define ushort4 ushort4 +#define float2 float2 +#define float3 float3 +#define packed_float3 packed_float3 +#define float4 float4 +#define bool2 bool2 +#define bool3 bool3 +#define bool4 bool4 +#define uint2 uint2 +#define uint3 uint3 +#define uint4 uint4 +#define int2 int2 +#define int3 int3 +#define int4 int4 +#define float4x2 float4x2 +#define ushort ushort +#define float2x2 float2x2 +#define half3x4 half3x4 +#endif + +#define INLINE inline +#define OUT(ARG_TYPE) thread ARG_TYPE& + +#define notEqual(A, B) ((A) != (B)) +#define lessThanEqual(A, B) ((A) <= (B)) +#define greaterThanEqual(A, B) ((A) >= (B)) +#define MUL(A, B) ((A) * (B)) +#define atan atan2 +#define inversesqrt rsqrt + +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + struct NAME \ + { +#define UNIFORM_BLOCK_END(NAME) \ + } \ + ; + +#define ATTR_BLOCK_BEGIN(NAME) \ + struct NAME \ + { +#define ATTR(IDX, TYPE, NAME) TYPE NAME +#define ATTR_BLOCK_END \ + } \ + ; +#define ATTR_UNPACK(ID, attrs, NAME, TYPE) TYPE NAME = attrs[ID].NAME + +#define VARYING_BLOCK_BEGIN \ + struct Varyings \ + { +#define VARYING(IDX, TYPE, NAME) TYPE NAME +#define FLAT [[flat]] +#define NO_PERSPECTIVE [[center_no_perspective]] +#ifndef _EXPORTED_OPTIONALLY_FLAT +// Don't use no-perspective interpolation for varyings that need to be flat. No-persective +// interpolation appears to break the guarantee that a varying == "x" when all barycentric values +// also == "x". Default (perspective-correct) interpolation does preserve this guarantee, and seems +// to be faster faster than flat on Apple Silicon. +#define _EXPORTED_OPTIONALLY_FLAT +#endif +#define VARYING_BLOCK_END \ + float4 _pos [[position]] [[invariant]]; \ + } \ + ; + +#define VARYING_INIT(NAME, TYPE) thread TYPE& NAME = _varyings.NAME +#define VARYING_PACK(NAME) +#define VARYING_UNPACK(NAME, TYPE) TYPE NAME = _varyings.NAME + +#define VERTEX_STORAGE_BUFFER_BLOCK_BEGIN \ + struct VertexStorageBuffers \ + { +#define VERTEX_STORAGE_BUFFER_BLOCK_END \ + } \ + ; + +#define FRAG_STORAGE_BUFFER_BLOCK_BEGIN \ + struct FragmentStorageBuffers \ + { +#define FRAG_STORAGE_BUFFER_BLOCK_END \ + } \ + ; + +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) constant uint2* NAME [[buffer(IDX)]] +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) constant uint4* NAME [[buffer(IDX)]] +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) constant float4* NAME [[buffer(IDX)]] +#define STORAGE_BUFFER_LOAD4(NAME, I) _buffers.NAME[I] +#define STORAGE_BUFFER_LOAD2(NAME, I) _buffers.NAME[I] + +#define VERTEX_TEXTURE_BLOCK_BEGIN \ + struct VertexTextures \ + { +#define VERTEX_TEXTURE_BLOCK_END \ + } \ + ; + +#define FRAG_TEXTURE_BLOCK_BEGIN \ + struct FragmentTextures \ + { +#define FRAG_TEXTURE_BLOCK_END \ + } \ + ; + +#define TEXTURE_RGBA32UI(SET, IDX, NAME) [[texture(IDX)]] texture2d NAME +#define TEXTURE_RGBA32F(SET, IDX, NAME) [[texture(IDX)]] texture2d NAME +#define TEXTURE_RGBA8(SET, IDX, NAME) [[texture(IDX)]] texture2d NAME + +#define SAMPLER_LINEAR(TEXTURE_IDX, NAME) \ + constexpr sampler NAME(filter::linear, mip_filter::none); +#define SAMPLER_MIPMAP(TEXTURE_IDX, NAME) \ + constexpr sampler NAME(filter::linear, mip_filter::linear); + +#define TEXEL_FETCH(TEXTURE, COORD) _textures.TEXTURE.read(uint2(COORD)) +#define TEXTURE_SAMPLE(TEXTURE, SAMPLER_NAME, COORD) _textures.TEXTURE.sample(SAMPLER_NAME, COORD) +#define TEXTURE_SAMPLE_LOD(TEXTURE, SAMPLER_NAME, COORD, LOD) \ + _textures.TEXTURE.sample(SAMPLER_NAME, COORD, level(LOD)) +#define TEXTURE_SAMPLE_GRAD(TEXTURE, SAMPLER_NAME, COORD, DDX, DDY) \ + _textures.TEXTURE.sample(SAMPLER_NAME, COORD, gradient2d(DDX, DDY)) + +#define VERTEX_CONTEXT_DECL , VertexTextures _textures, VertexStorageBuffers _buffers +#define VERTEX_CONTEXT_UNPACK , _textures, _buffers + +#ifdef _EXPORTED_ENABLE_INSTANCE_INDEX +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + __attribute__((visibility("default"))) Varyings vertex NAME( \ + uint _vertexID [[vertex_id]], \ + uint _instanceID [[instance_id]], \ + constant uint& _baseInstance [[buffer(PATH_BASE_INSTANCE_UNIFORM_BUFFER_IDX)]], \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant Attrs* attrs [[buffer(0)]] VERTEX_CONTEXT_DECL) \ + { \ + _instanceID += _baseInstance; \ + Varyings _varyings; +#else +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + __attribute__((visibility("default"))) Varyings vertex NAME( \ + uint _vertexID [[vertex_id]], \ + uint _instanceID [[instance_id]], \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant Attrs* attrs [[buffer(0)]] VERTEX_CONTEXT_DECL) \ + { \ + Varyings _varyings; +#endif + +#define IMAGE_RECT_VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + __attribute__((visibility("default"))) Varyings vertex NAME( \ + uint _vertexID [[vertex_id]], \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]], \ + constant Attrs* attrs [[buffer(0)]] VERTEX_CONTEXT_DECL) \ + { \ + Varyings _varyings; + +#define IMAGE_MESH_VERTEX_MAIN(NAME, PositionAttr, position, UVAttr, uv, _vertexID) \ + __attribute__((visibility("default"))) Varyings vertex NAME( \ + uint _vertexID [[vertex_id]], \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]], \ + constant PositionAttr* position [[buffer(0)]], \ + constant UVAttr* uv [[buffer(1)]]) \ + { \ + Varyings _varyings; + +#define EMIT_VERTEX(POSITION) \ + _varyings._pos = POSITION; \ + } \ + return _varyings; + +#define FRAG_DATA_MAIN(DATA_TYPE, NAME) \ + DATA_TYPE __attribute__((visibility("default"))) fragment NAME(Varyings _varyings \ + [[stage_in]]) \ + { + +#define EMIT_FRAG_DATA(VALUE) \ + return VALUE; \ + } + +#define FRAGMENT_CONTEXT_DECL \ + , float2 _fragCoord, FragmentTextures _textures, FragmentStorageBuffers _buffers +#define FRAGMENT_CONTEXT_UNPACK , _fragCoord, _textures, _buffers + +#ifdef _EXPORTED_PLS_IMPL_DEVICE_BUFFER + +#define PLS_BLOCK_BEGIN \ + struct PLS \ + { +#ifdef _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +// Apple Silicon doesn't support fragment-fragment memory barriers, so on this hardware we use +// raster order groups instead. +// Since the PLS plane indices collide with other buffer bindings, offset the binding indices of +// these buffers by DEFAULT_BINDINGS_SET_SIZE. +#define PLS_DECL4F(IDX, NAME) \ + device uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE), raster_order_group(0)]] +#define PLS_DECLUI(IDX, NAME) \ + device uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE), raster_order_group(0)]] +#define PLS_DECLUI_ATOMIC(IDX, NAME) \ + device atomic_uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE), raster_order_group(0)]] +#else +// Since the PLS plane indices collide with other buffer bindings, offset the binding indices of +// these buffers by DEFAULT_BINDINGS_SET_SIZE. +#define PLS_DECL4F(IDX, NAME) device uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE)]] +#define PLS_DECLUI(IDX, NAME) device uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE)]] +#define PLS_DECLUI_ATOMIC(IDX, NAME) \ + device atomic_uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE)]] +#endif // @PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define PLS_BLOCK_END \ + } \ + ; +#define PLS_CONTEXT_DECL , PLS _pls, uint _plsIdx +#define PLS_CONTEXT_UNPACK , _pls, _plsIdx + +#define PLS_LOAD4F(PLANE) unpackUnorm4x8(_pls.PLANE[_plsIdx]) +#define PLS_LOADUI(PLANE) _pls.PLANE[_plsIdx] +#define PLS_LOADUI_ATOMIC(PLANE) \ + atomic_load_explicit(&_pls.PLANE[_plsIdx], memory_order::memory_order_relaxed) +#define PLS_STORE4F(PLANE, VALUE) _pls.PLANE[_plsIdx] = packUnorm4x8(VALUE) +#define PLS_STOREUI(PLANE, VALUE) _pls.PLANE[_plsIdx] = (VALUE) +#define PLS_STOREUI_ATOMIC(PLANE, VALUE) \ + atomic_store_explicit(&_pls.PLANE[_plsIdx], VALUE, memory_order::memory_order_relaxed) +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define PLS_ATOMIC_MAX(PLANE, X) \ + atomic_fetch_max_explicit(&_pls.PLANE[_plsIdx], X, memory_order::memory_order_relaxed) + +#define PLS_ATOMIC_ADD(PLANE, X) \ + atomic_fetch_add_explicit(&_pls.PLANE[_plsIdx], X, memory_order::memory_order_relaxed) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#define PLS_METAL_MAIN(NAME) \ + __attribute__((visibility("default"))) fragment NAME(PLS _pls, \ + constant _EXPORTED_FlushUniforms& uniforms \ + [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers) \ + { \ + float2 _fragCoord = _varyings._pos.xy; \ + uint2 _plsCoord = uint2(metal::floor(_fragCoord)); \ + uint _plsIdx = _plsCoord.y * uniforms.renderTargetWidth + _plsCoord.x; + +#define PLS_METAL_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + __attribute__((visibility("default"))) fragment NAME( \ + PLS _pls, \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]], \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers) \ + { \ + float2 _fragCoord = _varyings._pos.xy; \ + uint2 _plsCoord = uint2(metal::floor(_fragCoord)); \ + uint _plsIdx = _plsCoord.y * uniforms.renderTargetWidth + _plsCoord.x; + +#define PLS_MAIN(NAME) void PLS_METAL_MAIN(NAME) +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) void PLS_METAL_MAIN_WITH_IMAGE_UNIFORMS(NAME) +#define EMIT_PLS } + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + half4 PLS_METAL_MAIN(NAME) \ + { \ + half4 _fragColor; + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + half4 PLS_METAL_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + { \ + half4 _fragColor; + +#define EMIT_PLS_AND_FRAG_COLOR \ + } \ + return _fragColor; \ + EMIT_PLS + +#else // Default implementation -- framebuffer reads. + +#define PLS_BLOCK_BEGIN \ + struct PLS \ + { +#define PLS_DECL4F(IDX, NAME) [[color(IDX)]] half4 NAME +#define PLS_DECLUI(IDX, NAME) [[color(IDX)]] uint NAME +#define PLS_DECLUI_ATOMIC PLS_DECLUI +#define PLS_BLOCK_END \ + } \ + ; +#define PLS_CONTEXT_DECL , thread PLS &_inpls, thread PLS &_pls +#define PLS_CONTEXT_UNPACK , _inpls, _pls + +#define PLS_LOAD4F(PLANE) _inpls.PLANE +#define PLS_LOADUI(PLANE) _inpls.PLANE +#define PLS_LOADUI_ATOMIC(PLANE) PLS_LOADUI +#define PLS_STORE4F(PLANE, VALUE) _pls.PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) _pls.PLANE = (VALUE) +#define PLS_STOREUI_ATOMIC(PLANE) PLS_STOREUI +#define PLS_PRESERVE_4F(PLANE) _pls.PLANE = _inpls.PLANE +#define PLS_PRESERVE_UI(PLANE) _pls.PLANE = _inpls.PLANE + +INLINE uint pls_atomic_max(thread uint& dst, uint x) +{ + uint originalValue = dst; + dst = metal::max(originalValue, x); + return originalValue; +} + +#define PLS_ATOMIC_MAX(PLANE, X) pls_atomic_max(_pls.PLANE, X) + +INLINE uint pls_atomic_add(thread uint& dst, uint x) +{ + uint originalValue = dst; + dst = originalValue + x; + return originalValue; +} + +#define PLS_ATOMIC_ADD(PLANE, X) pls_atomic_add(_pls.PLANE, X) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#define PLS_METAL_MAIN(NAME, ...) \ + PLS __attribute__((visibility("default"))) fragment NAME(__VA_ARGS__) \ + { \ + float2 _fragCoord [[maybe_unused]] = _varyings._pos.xy; \ + PLS _pls; + +#define PLS_MAIN(NAME, ...) \ + PLS_METAL_MAIN(NAME, \ + PLS _inpls, \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers) + +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + PLS_METAL_MAIN(NAME, \ + PLS _inpls, \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers, \ + constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]]) + +#define EMIT_PLS \ + } \ + return _pls; + +#define PLS_FRAG_COLOR_METAL_MAIN(NAME, ...) \ + struct FragmentOut \ + { \ + half4 _color [[color(0)]]; \ + PLS _pls; \ + }; \ + FragmentOut __attribute__((visibility("default"))) fragment NAME(__VA_ARGS__) \ + { \ + float2 _fragCoord [[maybe_unused]] = _varyings._pos.xy; \ + half4 _fragColor; \ + PLS _pls; + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + PLS_FRAG_COLOR_METAL_MAIN(NAME, \ + PLS _inpls, \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers) + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + PLS_FRAG_COLOR_METAL_MAIN(NAME, \ + PLS _inpls, \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers, \ + __VA_ARGS__ constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]]) + +#define EMIT_PLS_AND_FRAG_COLOR \ + } \ + return {._color = _fragColor, ._pls = _pls}; + +#endif // PLS_IMPL_DEVICE_BUFFER + +#define discard discard_fragment() + +using namespace metal; + +template INLINE vec floatBitsToUint(vec x) +{ + return as_type>(x); +} + +template INLINE vec floatBitsToInt(vec x) +{ + return as_type>(x); +} + +INLINE uint floatBitsToUint(float x) { return as_type(x); } + +INLINE int floatBitsToInt(float x) { return as_type(x); } + +template INLINE vec uintBitsToFloat(vec x) +{ + return as_type>(x); +} + +INLINE float uintBitsToFloat(uint x) { return as_type(x); } +INLINE half2 unpackHalf2x16(uint x) { return as_type(x); } +INLINE uint packHalf2x16(half2 x) { return as_type(x); } +INLINE half4 unpackUnorm4x8(uint x) { return unpack_unorm4x8_to_half(x); } +INLINE uint packUnorm4x8(half4 x) { return pack_half_to_unorm4x8(x); } + +INLINE float2x2 inverse(float2x2 m) +{ + float2x2 m_ = float2x2(m[1][1], -m[0][1], -m[1][0], m[0][0]); + float det = (m_[0][0] * m[0][0]) + (m_[0][1] * m[1][0]); + return m_ * (1 / det); +} + +INLINE half3 mix(half3 a, half3 b, bool3 c) +{ + half3 result; + for (int i = 0; i < 3; ++i) + result[i] = c[i] ? b[i] : a[i]; + return result; +} +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/metal.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/metal.minified.ush new file mode 100644 index 00000000..d820477b --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/metal.minified.ush @@ -0,0 +1,447 @@ +/* + * Copyright 2023 Rive + */ + +// This header provides Metal-specific #defines and declarations that enable our shaders to be +// compiled on MSL and GLSL both. + +#define METAL + +// #define native metal types if their names are being rewritten. +#define _ARE_TOKEN_NAMES_PRESERVED +#ifndef _ARE_TOKEN_NAMES_PRESERVED +#define half half +#define half2 half2 +#define half3 half3 +#define half4 half4 +#define short short +#define short2 short2 +#define short3 short3 +#define short4 short4 +#define ushort ushort +#define ushort2 ushort2 +#define ushort3 ushort3 +#define ushort4 ushort4 +#define float2 float2 +#define float3 float3 +#define packed_float3 packed_float3 +#define float4 float4 +#define bool2 bool2 +#define bool3 bool3 +#define bool4 bool4 +#define uint2 uint2 +#define uint3 uint3 +#define uint4 uint4 +#define int2 int2 +#define int3 int3 +#define int4 int4 +#define float4x2 float4x2 +#define ushort ushort +#define float2x2 float2x2 +#define half3x4 half3x4 +#endif + +#define INLINE inline +#define OUT(ARG_TYPE) thread ARG_TYPE& + +#define notEqual(A, B) ((A) != (B)) +#define lessThanEqual(A, B) ((A) <= (B)) +#define greaterThanEqual(A, B) ((A) >= (B)) +#define MUL(A, B) ((A) * (B)) +#define atan atan2 +#define inversesqrt rsqrt + +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + struct NAME \ + { +#define UNIFORM_BLOCK_END(NAME) \ + } \ + ; + +#define ATTR_BLOCK_BEGIN(NAME) \ + struct NAME \ + { +#define ATTR(IDX, TYPE, NAME) TYPE NAME +#define ATTR_BLOCK_END \ + } \ + ; +#define ATTR_UNPACK(ID, attrs, NAME, TYPE) TYPE NAME = attrs[ID].NAME + +#define VARYING_BLOCK_BEGIN \ + struct Varyings \ + { +#define VARYING(IDX, TYPE, NAME) TYPE NAME +#define FLAT [[flat]] +#define NO_PERSPECTIVE [[center_no_perspective]] +#ifndef OPTIONALLY_FLAT +// Don't use no-perspective interpolation for varyings that need to be flat. No-persective +// interpolation appears to break the guarantee that a varying == "x" when all barycentric values +// also == "x". Default (perspective-correct) interpolation does preserve this guarantee, and seems +// to be faster faster than flat on Apple Silicon. +#define OPTIONALLY_FLAT +#endif +#define VARYING_BLOCK_END \ + float4 _pos [[position]] [[invariant]]; \ + } \ + ; + +#define VARYING_INIT(NAME, TYPE) thread TYPE& NAME = _varyings.NAME +#define VARYING_PACK(NAME) +#define VARYING_UNPACK(NAME, TYPE) TYPE NAME = _varyings.NAME + +#define VERTEX_STORAGE_BUFFER_BLOCK_BEGIN \ + struct VertexStorageBuffers \ + { +#define VERTEX_STORAGE_BUFFER_BLOCK_END \ + } \ + ; + +#define FRAG_STORAGE_BUFFER_BLOCK_BEGIN \ + struct FragmentStorageBuffers \ + { +#define FRAG_STORAGE_BUFFER_BLOCK_END \ + } \ + ; + +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) constant uint2* NAME [[buffer(IDX)]] +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) constant uint4* NAME [[buffer(IDX)]] +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) constant float4* NAME [[buffer(IDX)]] +#define STORAGE_BUFFER_LOAD4(NAME, I) _buffers.NAME[I] +#define STORAGE_BUFFER_LOAD2(NAME, I) _buffers.NAME[I] + +#define VERTEX_TEXTURE_BLOCK_BEGIN \ + struct VertexTextures \ + { +#define VERTEX_TEXTURE_BLOCK_END \ + } \ + ; + +#define FRAG_TEXTURE_BLOCK_BEGIN \ + struct FragmentTextures \ + { +#define FRAG_TEXTURE_BLOCK_END \ + } \ + ; + +#define TEXTURE_RGBA32UI(SET, IDX, NAME) [[texture(IDX)]] texture2d NAME +#define TEXTURE_RGBA32F(SET, IDX, NAME) [[texture(IDX)]] texture2d NAME +#define TEXTURE_RGBA8(SET, IDX, NAME) [[texture(IDX)]] texture2d NAME + +#define SAMPLER_LINEAR(TEXTURE_IDX, NAME) \ + constexpr sampler NAME(filter::linear, mip_filter::none); +#define SAMPLER_MIPMAP(TEXTURE_IDX, NAME) \ + constexpr sampler NAME(filter::linear, mip_filter::linear); + +#define TEXEL_FETCH(TEXTURE, COORD) _textures.TEXTURE.read(uint2(COORD)) +#define TEXTURE_SAMPLE(TEXTURE, SAMPLER_NAME, COORD) _textures.TEXTURE.sample(SAMPLER_NAME, COORD) +#define TEXTURE_SAMPLE_LOD(TEXTURE, SAMPLER_NAME, COORD, LOD) \ + _textures.TEXTURE.sample(SAMPLER_NAME, COORD, level(LOD)) +#define TEXTURE_SAMPLE_GRAD(TEXTURE, SAMPLER_NAME, COORD, DDX, DDY) \ + _textures.TEXTURE.sample(SAMPLER_NAME, COORD, gradient2d(DDX, DDY)) + +#define VERTEX_CONTEXT_DECL , VertexTextures _textures, VertexStorageBuffers _buffers +#define VERTEX_CONTEXT_UNPACK , _textures, _buffers + +#ifdef ENABLE_INSTANCE_INDEX +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + __attribute__((visibility("default"))) Varyings vertex NAME( \ + uint _vertexID [[vertex_id]], \ + uint _instanceID [[instance_id]], \ + constant uint& _baseInstance [[buffer(PATH_BASE_INSTANCE_UNIFORM_BUFFER_IDX)]], \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant Attrs* attrs [[buffer(0)]] VERTEX_CONTEXT_DECL) \ + { \ + _instanceID += _baseInstance; \ + Varyings _varyings; +#else +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + __attribute__((visibility("default"))) Varyings vertex NAME( \ + uint _vertexID [[vertex_id]], \ + uint _instanceID [[instance_id]], \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant Attrs* attrs [[buffer(0)]] VERTEX_CONTEXT_DECL) \ + { \ + Varyings _varyings; +#endif + +#define IMAGE_RECT_VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + __attribute__((visibility("default"))) Varyings vertex NAME( \ + uint _vertexID [[vertex_id]], \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]], \ + constant Attrs* attrs [[buffer(0)]] VERTEX_CONTEXT_DECL) \ + { \ + Varyings _varyings; + +#define IMAGE_MESH_VERTEX_MAIN(NAME, PositionAttr, position, UVAttr, uv, _vertexID) \ + __attribute__((visibility("default"))) Varyings vertex NAME( \ + uint _vertexID [[vertex_id]], \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]], \ + constant PositionAttr* position [[buffer(0)]], \ + constant UVAttr* uv [[buffer(1)]]) \ + { \ + Varyings _varyings; + +#define EMIT_VERTEX(POSITION) \ + _varyings._pos = POSITION; \ + } \ + return _varyings; + +#define FRAG_DATA_MAIN(DATA_TYPE, NAME) \ + DATA_TYPE __attribute__((visibility("default"))) fragment NAME(Varyings _varyings \ + [[stage_in]]) \ + { + +#define EMIT_FRAG_DATA(VALUE) \ + return VALUE; \ + } + +#define FRAGMENT_CONTEXT_DECL \ + , float2 _fragCoord, FragmentTextures _textures, FragmentStorageBuffers _buffers +#define FRAGMENT_CONTEXT_UNPACK , _fragCoord, _textures, _buffers + +#ifdef PLS_IMPL_DEVICE_BUFFER + +#define PLS_BLOCK_BEGIN \ + struct PLS \ + { +#ifdef PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +// Apple Silicon doesn't support fragment-fragment memory barriers, so on this hardware we use +// raster order groups instead. +// Since the PLS plane indices collide with other buffer bindings, offset the binding indices of +// these buffers by DEFAULT_BINDINGS_SET_SIZE. +#define PLS_DECL4F(IDX, NAME) \ + device uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE), raster_order_group(0)]] +#define PLS_DECLUI(IDX, NAME) \ + device uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE), raster_order_group(0)]] +#define PLS_DECLUI_ATOMIC(IDX, NAME) \ + device atomic_uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE), raster_order_group(0)]] +#else +// Since the PLS plane indices collide with other buffer bindings, offset the binding indices of +// these buffers by DEFAULT_BINDINGS_SET_SIZE. +#define PLS_DECL4F(IDX, NAME) device uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE)]] +#define PLS_DECLUI(IDX, NAME) device uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE)]] +#define PLS_DECLUI_ATOMIC(IDX, NAME) \ + device atomic_uint* NAME [[buffer(IDX + DEFAULT_BINDINGS_SET_SIZE)]] +#endif // @PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define PLS_BLOCK_END \ + } \ + ; +#define PLS_CONTEXT_DECL , PLS _pls, uint _plsIdx +#define PLS_CONTEXT_UNPACK , _pls, _plsIdx + +#define PLS_LOAD4F(PLANE) unpackUnorm4x8(_pls.PLANE[_plsIdx]) +#define PLS_LOADUI(PLANE) _pls.PLANE[_plsIdx] +#define PLS_LOADUI_ATOMIC(PLANE) \ + atomic_load_explicit(&_pls.PLANE[_plsIdx], memory_order::memory_order_relaxed) +#define PLS_STORE4F(PLANE, VALUE) _pls.PLANE[_plsIdx] = packUnorm4x8(VALUE) +#define PLS_STOREUI(PLANE, VALUE) _pls.PLANE[_plsIdx] = (VALUE) +#define PLS_STOREUI_ATOMIC(PLANE, VALUE) \ + atomic_store_explicit(&_pls.PLANE[_plsIdx], VALUE, memory_order::memory_order_relaxed) +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define PLS_ATOMIC_MAX(PLANE, X) \ + atomic_fetch_max_explicit(&_pls.PLANE[_plsIdx], X, memory_order::memory_order_relaxed) + +#define PLS_ATOMIC_ADD(PLANE, X) \ + atomic_fetch_add_explicit(&_pls.PLANE[_plsIdx], X, memory_order::memory_order_relaxed) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#define PLS_METAL_MAIN(NAME) \ + __attribute__((visibility("default"))) fragment NAME(PLS _pls, \ + constant _EXPORTED_FlushUniforms& uniforms \ + [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers) \ + { \ + float2 _fragCoord = _varyings._pos.xy; \ + uint2 _plsCoord = uint2(metal::floor(_fragCoord)); \ + uint _plsIdx = _plsCoord.y * uniforms.renderTargetWidth + _plsCoord.x; + +#define PLS_METAL_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + __attribute__((visibility("default"))) fragment NAME( \ + PLS _pls, \ + constant _EXPORTED_FlushUniforms& uniforms [[buffer(FLUSH_UNIFORM_BUFFER_IDX)]], \ + constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]], \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers) \ + { \ + float2 _fragCoord = _varyings._pos.xy; \ + uint2 _plsCoord = uint2(metal::floor(_fragCoord)); \ + uint _plsIdx = _plsCoord.y * uniforms.renderTargetWidth + _plsCoord.x; + +#define PLS_MAIN(NAME) void PLS_METAL_MAIN(NAME) +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) void PLS_METAL_MAIN_WITH_IMAGE_UNIFORMS(NAME) +#define EMIT_PLS } + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + half4 PLS_METAL_MAIN(NAME) \ + { \ + half4 _fragColor; + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + half4 PLS_METAL_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + { \ + half4 _fragColor; + +#define EMIT_PLS_AND_FRAG_COLOR \ + } \ + return _fragColor; \ + EMIT_PLS + +#else // Default implementation -- framebuffer reads. + +#define PLS_BLOCK_BEGIN \ + struct PLS \ + { +#define PLS_DECL4F(IDX, NAME) [[color(IDX)]] half4 NAME +#define PLS_DECLUI(IDX, NAME) [[color(IDX)]] uint NAME +#define PLS_DECLUI_ATOMIC PLS_DECLUI +#define PLS_BLOCK_END \ + } \ + ; +#define PLS_CONTEXT_DECL , thread PLS &_inpls, thread PLS &_pls +#define PLS_CONTEXT_UNPACK , _inpls, _pls + +#define PLS_LOAD4F(PLANE) _inpls.PLANE +#define PLS_LOADUI(PLANE) _inpls.PLANE +#define PLS_LOADUI_ATOMIC(PLANE) PLS_LOADUI +#define PLS_STORE4F(PLANE, VALUE) _pls.PLANE = (VALUE) +#define PLS_STOREUI(PLANE, VALUE) _pls.PLANE = (VALUE) +#define PLS_STOREUI_ATOMIC(PLANE) PLS_STOREUI +#define PLS_PRESERVE_4F(PLANE) _pls.PLANE = _inpls.PLANE +#define PLS_PRESERVE_UI(PLANE) _pls.PLANE = _inpls.PLANE + +INLINE uint pls_atomic_max(thread uint& dst, uint x) +{ + uint originalValue = dst; + dst = metal::max(originalValue, x); + return originalValue; +} + +#define PLS_ATOMIC_MAX(PLANE, X) pls_atomic_max(_pls.PLANE, X) + +INLINE uint pls_atomic_add(thread uint& dst, uint x) +{ + uint originalValue = dst; + dst = originalValue + x; + return originalValue; +} + +#define PLS_ATOMIC_ADD(PLANE, X) pls_atomic_add(_pls.PLANE, X) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#define PLS_METAL_MAIN(NAME, ...) \ + PLS __attribute__((visibility("default"))) fragment NAME(__VA_ARGS__) \ + { \ + float2 _fragCoord [[maybe_unused]] = _varyings._pos.xy; \ + PLS _pls; + +#define PLS_MAIN(NAME, ...) \ + PLS_METAL_MAIN(NAME, \ + PLS _inpls, \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers) + +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + PLS_METAL_MAIN(NAME, \ + PLS _inpls, \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers, \ + constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]]) + +#define EMIT_PLS \ + } \ + return _pls; + +#define PLS_FRAG_COLOR_METAL_MAIN(NAME, ...) \ + struct FragmentOut \ + { \ + half4 _color [[color(0)]]; \ + PLS _pls; \ + }; \ + FragmentOut __attribute__((visibility("default"))) fragment NAME(__VA_ARGS__) \ + { \ + float2 _fragCoord [[maybe_unused]] = _varyings._pos.xy; \ + half4 _fragColor; \ + PLS _pls; + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + PLS_FRAG_COLOR_METAL_MAIN(NAME, \ + PLS _inpls, \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers) + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) \ + PLS_FRAG_COLOR_METAL_MAIN(NAME, \ + PLS _inpls, \ + Varyings _varyings [[stage_in]], \ + FragmentTextures _textures, \ + FragmentStorageBuffers _buffers, \ + __VA_ARGS__ constant _EXPORTED_ImageDrawUniforms& imageDrawUniforms \ + [[buffer(IMAGE_DRAW_UNIFORM_BUFFER_IDX)]]) + +#define EMIT_PLS_AND_FRAG_COLOR \ + } \ + return {._color = _fragColor, ._pls = _pls}; + +#endif // PLS_IMPL_DEVICE_BUFFER + +#define discard discard_fragment() + +using namespace metal; + +template INLINE vec floatBitsToUint(vec x) +{ + return as_type>(x); +} + +template INLINE vec floatBitsToInt(vec x) +{ + return as_type>(x); +} + +INLINE uint floatBitsToUint(float x) { return as_type(x); } + +INLINE int floatBitsToInt(float x) { return as_type(x); } + +template INLINE vec uintBitsToFloat(vec x) +{ + return as_type>(x); +} + +INLINE float uintBitsToFloat(uint x) { return as_type(x); } +INLINE half2 unpackHalf2x16(uint x) { return as_type(x); } +INLINE uint packHalf2x16(half2 x) { return as_type(x); } +INLINE half4 unpackUnorm4x8(uint x) { return unpack_unorm4x8_to_half(x); } +INLINE uint packUnorm4x8(half4 x) { return pack_half_to_unorm4x8(x); } + +INLINE float2x2 inverse(float2x2 m) +{ + float2x2 m_ = float2x2(m[1][1], -m[0][1], -m[1][0], m[0][0]); + float det = (m_[0][0] * m[0][0]) + (m_[0][1] * m[1][0]); + return m_ * (1 / det); +} + +INLINE half3 mix(half3 a, half3 b, bool3 c) +{ + half3 result; + for (int i = 0; i < 3; ++i) + result[i] = c[i] ? b[i] : a[i]; + return result; +} diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/pls_load_store_ext.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/pls_load_store_ext.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/pls_load_store_ext.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/pls_load_store_ext.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/pls_load_store_ext.glsl.hpp new file mode 100644 index 00000000..f0933465 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/pls_load_store_ext.glsl.hpp @@ -0,0 +1,109 @@ +#pragma once + +#include "pls_load_store_ext.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char pls_load_store_ext[] = R"===(/* + * Copyright 2022 Rive + */ + +// The EXT_shader_pixel_local_storage extension does not provide a mechanism to load, store, or +// clear pixel local storage contents. This shader performs custom load, store, and clear +// operations via fullscreen draws. + +#ifdef _EXPORTED_VERTEX +void main() +{ + // [-1, -1] .. [+1, +1] + gl_Position = + vec4(mix(vec2(-1, 1), vec2(1, -1), equal(gl_VertexID & ivec2(1, 2), ivec2(0))), 0, 1); +} +#endif + +#ifdef _EXPORTED_FRAGMENT + +#extension GL_EXT_shader_pixel_local_storage : enable +#extension GL_ARM_shader_framebuffer_fetch : enable +#extension GL_EXT_shader_framebuffer_fetch : enable + +#ifdef _EXPORTED_CLEAR_COLOR +#if __VERSION__ >= 310 +layout(binding = 0, std140) uniform ClearColor { uniform highp vec4 value; } +clearColor; +#else +uniform mediump vec4 _EXPORTED_clearColor; +#endif +#endif + +#ifdef GL_EXT_shader_pixel_local_storage + +#ifdef _EXPORTED_STORE_COLOR +__pixel_local_inEXT PLS +#else +__pixel_local_outEXT PLS +#endif +{ + layout(rgba8) mediump vec4 colorBuffer; +#ifdef _EXPORTED_ENABLE_CLIPPING + layout(r32ui) highp uint clipBuffer; +#endif + layout(rgba8) mediump vec4 scratchColorBuffer; + layout(r32ui) highp uint coverageCountBuffer; +}; + +#ifndef GL_ARM_shader_framebuffer_fetch +#ifdef _EXPORTED_LOAD_COLOR +layout(location = 0) inout mediump vec4 fragColor; +#endif +#endif + +#ifdef _EXPORTED_STORE_COLOR +layout(location = 0) out mediump vec4 fragColor; +#endif + +void main() +{ +#ifdef _EXPORTED_CLEAR_COLOR +#if __VERSION__ >= 310 + colorBuffer = clearColor.value; +#else + colorBuffer = _EXPORTED_clearColor; +#endif +#endif + +#ifdef _EXPORTED_LOAD_COLOR +#ifdef GL_ARM_shader_framebuffer_fetch + colorBuffer = gl_LastFragColorARM; +#else + colorBuffer = fragColor; +#endif +#endif + +#ifdef _EXPORTED_CLEAR_COVERAGE + coverageCountBuffer = 0u; +#endif + +#ifdef _EXPORTED_CLEAR_CLIP + clipBuffer = 0u; +#endif + +#ifdef _EXPORTED_STORE_COLOR + fragColor = colorBuffer; +#endif +} + +#else + +// This shader is being parsed by WebGPU for introspection purposes. +layout(location = 0) out mediump vec4 unused; +void main() { unused = vec4(0, 1, 0, 1); } + +#endif // GL_EXT_shader_pixel_local_storage + +#endif // FRAGMENT +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/pls_load_store_ext.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/pls_load_store_ext.minified.ush new file mode 100644 index 00000000..356f89e2 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/pls_load_store_ext.minified.ush @@ -0,0 +1,98 @@ +/* + * Copyright 2022 Rive + */ + +// The EXT_shader_pixel_local_storage extension does not provide a mechanism to load, store, or +// clear pixel local storage contents. This shader performs custom load, store, and clear +// operations via fullscreen draws. + +#ifdef VERTEX +void main() +{ + // [-1, -1] .. [+1, +1] + gl_Position = + vec4(mix(vec2(-1, 1), vec2(1, -1), equal(gl_VertexID & ivec2(1, 2), ivec2(0))), 0, 1); +} +#endif + +#ifdef FRAGMENT + +#extension GL_EXT_shader_pixel_local_storage : enable +#extension GL_ARM_shader_framebuffer_fetch : enable +#extension GL_EXT_shader_framebuffer_fetch : enable + +#ifdef CLEAR_COLOR +#if __VERSION__ >= 310 +layout(binding = 0, std140) uniform ClearColor { uniform highp vec4 value; } +clearColor; +#else +uniform mediump vec4 _EXPORTED_clearColor; +#endif +#endif + +#ifdef GL_EXT_shader_pixel_local_storage + +#ifdef STORE_COLOR +__pixel_local_inEXT PLS +#else +__pixel_local_outEXT PLS +#endif +{ + layout(rgba8) mediump vec4 colorBuffer; +#ifdef ENABLE_CLIPPING + layout(r32ui) highp uint clipBuffer; +#endif + layout(rgba8) mediump vec4 scratchColorBuffer; + layout(r32ui) highp uint coverageCountBuffer; +}; + +#ifndef GL_ARM_shader_framebuffer_fetch +#ifdef LOAD_COLOR +layout(location = 0) inout mediump vec4 fragColor; +#endif +#endif + +#ifdef STORE_COLOR +layout(location = 0) out mediump vec4 fragColor; +#endif + +void main() +{ +#ifdef CLEAR_COLOR +#if __VERSION__ >= 310 + colorBuffer = clearColor.value; +#else + colorBuffer = _EXPORTED_clearColor; +#endif +#endif + +#ifdef LOAD_COLOR +#ifdef GL_ARM_shader_framebuffer_fetch + colorBuffer = gl_LastFragColorARM; +#else + colorBuffer = fragColor; +#endif +#endif + +#ifdef CLEAR_COVERAGE + coverageCountBuffer = 0u; +#endif + +#ifdef CLEAR_CLIP + clipBuffer = 0u; +#endif + +#ifdef STORE_COLOR + fragColor = colorBuffer; +#endif +} + +#else + +// This shader is being parsed by WebGPU for introspection purposes. +layout(location = 0) out mediump vec4 unused; +void main() { unused = vec4(0, 1, 0, 1); } + +#endif // GL_EXT_shader_pixel_local_storage + +#endif // FRAGMENT diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/rhi.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/rhi.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/rhi.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/rhi.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/rhi.glsl.hpp new file mode 100644 index 00000000..6814ec40 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/rhi.glsl.hpp @@ -0,0 +1,365 @@ +#pragma once + +#include "rhi.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char rhi[] = R"===(/* + * Copyright 2023 Rive + */ + +// This header provides GLSL-specific #defines and declarations that enable our shaders to be +// compiled on MSL and GLSL both. + +// HLSL warns that it will unroll the loops through r,g,b values in advanced_blend.glsl, but +// unrolling these loops is exactly what we want. +#pragma warning(disable : 3550) + +// Don't warn about uninitialized variables. If we leave one uninitialized it's because we know what +// we're doing and don't want to pay the cost of initializing it. +#pragma warning(disable : 4000) + +// #define native hlsl types if their names are being rewritten. +#define _ARE_TOKEN_NAMES_PRESERVED +#ifndef _ARE_TOKEN_NAMES_PRESERVED +#define half half +#define half2 half2 +#define half3 half3 +#define half4 half4 +#define short short +#define short2 short2 +#define short3 short3 +#define short4 short4 +#define ushort ushort +#define ushort2 ushort2 +#define ushort3 ushort3 +#define ushort4 ushort4 +#define float2 float2 +#define float3 float3 +#define float4 float4 +#define bool2 bool2 +#define bool3 bool3 +#define bool4 bool4 +#define uint2 uint2 +#define uint3 uint3 +#define uint4 uint4 +#define int2 int2 +#define int3 int3 +#define int4 int4 +#define float4x2 float4x2 +#define ushort ushort +#define float2x2 float2x2 +#define half3x4 half3x4 +#endif + +typedef float3 packed_float3; + +#ifdef _EXPORTED_ENABLE_MIN_16_PRECISION + +typedef min16uint ushort; + +#else + +typedef uint ushort; + +#endif + +#define SPLAT(A, B) A##B + +#define INLINE inline +#define OUT(ARG_TYPE) out ARG_TYPE + +#define ATTR_BLOCK_BEGIN(NAME) \ + struct NAME \ + { +#define ATTR(IDX, TYPE, NAME) TYPE NAME : SPLAT(ATTRIBUTE, IDX) +#define ATTR_BLOCK_END \ + } \ + ; +#define ATTR_LOAD(T, A, N, I) +#define ATTR_UNPACK(ID, attrs, NAME, TYPE) TYPE NAME = attrs.NAME + +#define UNIFORM_BUFFER_REGISTER(IDX) register(SPLAT(b,IDX)) + +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + cbuffer NAME : UNIFORM_BUFFER_REGISTER(IDX) \ + { \ + struct \ + { + +#define UNIFORM_BLOCK_END(NAME) \ + } \ + NAME; \ + } + +#define VARYING_BLOCK_BEGIN \ + struct Varyings \ + { + +#define NO_PERSPECTIVE noperspective +#define _EXPORTED_OPTIONALLY_FLAT nointerpolation +#define FLAT nointerpolation +#define VARYING(IDX, TYPE, NAME) TYPE NAME : SPLAT(TEXCOORD,IDX) + +#define VARYING_BLOCK_END \ + float4 _pos : SV_Position; \ + } \ + ; + +#define VARYING_INIT(NAME, TYPE) TYPE NAME +#define VARYING_PACK(NAME) _varyings.NAME = NAME +#define VARYING_UNPACK(NAME, TYPE) TYPE NAME = _varyings.NAME + +#ifdef _EXPORTED_VERTEX +#define VERTEX_TEXTURE_BLOCK_BEGIN +#define VERTEX_TEXTURE_BLOCK_END +#endif + +#ifdef _EXPORTED_FRAGMENT +#define FRAG_TEXTURE_BLOCK_BEGIN +#define FRAG_TEXTURE_BLOCK_END +#endif + +#define TEXTURE_RGBA32UI(SET, IDX, NAME) uniform Texture2D NAME : register(SPLAT(t,IDX)) +#define TEXTURE_RGBA32F(SET, IDX, NAME) uniform Texture2D NAME : register(SPLAT(t,IDX)) +#define TEXTURE_RGBA8(SET, IDX, NAME) uniform Texture2D NAME : register(SPLAT(t,IDX)) + +// SAMPLER_LINEAR and SAMPLER_MIPMAP are the same because in d3d11, sampler parameters are defined +// at the API level. +#define SAMPLER(TEXTURE_IDX, NAME) SamplerState NAME : register(SPLAT(s,TEXTURE_IDX)); +#define SAMPLER_LINEAR SAMPLER +#define SAMPLER_MIPMAP SAMPLER + +#define TEXEL_FETCH(NAME, COORD) NAME[COORD] +#define TEXTURE_SAMPLE(NAME, SAMPLER_NAME, COORD) NAME.Sample(SAMPLER_NAME, COORD) +#define TEXTURE_SAMPLE_LOD(NAME, SAMPLER_NAME, COORD, LOD) \ + NAME.SampleLevel(SAMPLER_NAME, COORD, LOD) +#define TEXTURE_SAMPLE_GRAD(NAME, SAMPLER_NAME, COORD, DDX, DDY) \ + NAME.SampleGrad(SAMPLER_NAME, COORD, DDX, DDY) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#ifdef _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define PLS_TEX2D RasterizerOrderedTexture2D +#else +#define PLS_TEX2D RWTexture2D +#endif + +#define PLS_BLOCK_BEGIN +#ifdef _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_DECL4F(IDX, NAME) uniform PLS_TEX2D NAME : register(SPLAT(u,IDX)) +#else +#define PLS_DECL4F(IDX, NAME) uniform PLS_TEX2D NAME : register(SPLAT(u,IDX)) +#endif +#define PLS_DECLUI(IDX, NAME) uniform PLS_TEX2D NAME : register(SPLAT(u,IDX)) +#define PLS_DECLUI_ATOMIC PLS_DECLUI +#define PLS_LOADUI_ATOMIC PLS_LOADUI +#define PLS_STOREUI_ATOMIC PLS_STOREUI +#define PLS_BLOCK_END + +#ifdef _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_LOAD4F(PLANE) PLANE[_plsCoord] +#else +#define PLS_LOAD4F(PLANE) unpackUnorm4x8(PLANE[_plsCoord]) +#endif +#define PLS_LOADUI(PLANE) PLANE[_plsCoord] +#ifdef _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_STORE4F(PLANE, VALUE) PLANE[_plsCoord] = (VALUE) +#else +#define PLS_STORE4F(PLANE, VALUE) PLANE[_plsCoord] = packUnorm4x8(VALUE) +#endif +#define PLS_STOREUI(PLANE, VALUE) PLANE[_plsCoord] = (VALUE) + +INLINE uint pls_atomic_max(PLS_TEX2D plane, int2 _plsCoord, uint x) +{ + uint originalValue; + InterlockedMax(plane[_plsCoord], x, originalValue); + return originalValue; +} + +#define PLS_ATOMIC_MAX(PLANE, X) pls_atomic_max(PLANE, _plsCoord, X) + +INLINE uint pls_atomic_add(PLS_TEX2D plane, int2 _plsCoord, uint x) +{ + uint originalValue; + InterlockedAdd(plane[_plsCoord], x, originalValue); + return originalValue; +} + +#define PLS_ATOMIC_ADD(PLANE, X) pls_atomic_add(PLANE, _plsCoord, X) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define VERTEX_CONTEXT_DECL +#define VERTEX_CONTEXT_UNPACK + +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + \ + uint baseInstance; \ + \ + Varyings NAME(Attrs attrs, uint _vertexID \ + : SV_VertexID, uint _instanceIDWithoutBase \ + : SV_InstanceID) \ + { \ + uint _instanceID = _instanceIDWithoutBase + baseInstance; \ + Varyings _varyings; + +#define IMAGE_RECT_VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + Varyings NAME(Attrs attrs, uint _vertexID : SV_VertexID) \ + { \ + Varyings _varyings; \ + float4 _pos; + +#define IMAGE_MESH_VERTEX_MAIN(NAME, PositionAttr, position, UVAttr, uv, _vertexID) \ + Varyings NAME(PositionAttr position, UVAttr uv, uint _vertexID : SV_VertexID) \ + { \ + Varyings _varyings; \ + float4 _pos; + +#define EMIT_VERTEX(POSITION) \ + _varyings._pos = POSITION; \ + } \ + return _varyings; + +#define FRAG_DATA_MAIN(DATA_TYPE, NAME) \ + DATA_TYPE NAME(Varyings _varyings) : SV_Target \ + { + +#define EMIT_FRAG_DATA(VALUE) \ + return VALUE; \ + } + +#define FRAGMENT_CONTEXT_DECL , float2 _fragCoord +#define FRAGMENT_CONTEXT_UNPACK , _fragCoord + +#define PLS_CONTEXT_DECL , int2 _plsCoord +#define PLS_CONTEXT_UNPACK , _plsCoord + +#define PLS_MAIN(NAME) [earlydepthstencil] void NAME(Varyings _varyings) { \ + float2 _fragCoord = _varyings._pos.xy;\ + int2 _plsCoord = int2(floor(_fragCoord)); + +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_MAIN(NAME) + +#define EMIT_PLS } + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + [earlydepthstencil] half4 NAME(Varyings _varyings) : SV_Target \ + { \ + float2 _fragCoord = _varyings._pos.xy; \ + int2 _plsCoord = int2(floor(_fragCoord)); \ + half4 _fragColor; + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_FRAG_COLOR_MAIN(NAME) + +#define EMIT_PLS_AND_FRAG_COLOR \ + } \ + return _fragColor; + +#define uintBitsToFloat asfloat +#define intBitsToFloat asfloat +#define floatBitsToInt asint +#define floatBitsToUint asuint +#define inversesqrt rsqrt +#define notEqual(A, B) ((A) != (B)) +#define lessThanEqual(A, B) ((A) <= (B)) +#define greaterThanEqual(A, B) ((A) >= (B)) + +// HLSL matrices are stored in row-major order, and therefore transposed from their counterparts +// in GLSL and Metal. We can work around this entirely by reversing the arguments to mul(). +#define MUL(A, B) mul(B, A) + +#define VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +#define VERTEX_STORAGE_BUFFER_BLOCK_END + +#define FRAG_STORAGE_BUFFER_BLOCK_BEGIN +#define FRAG_STORAGE_BUFFER_BLOCK_END + +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(SPLAT(t,IDX)) +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(SPLAT(t,IDX)) +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(SPLAT(t,IDX)) + +#define STORAGE_BUFFER_LOAD4(NAME, I) NAME[I] +#define STORAGE_BUFFER_LOAD2(NAME, I) NAME[I] + +INLINE half2 unpackHalf2x16(uint u) +{ + uint y = (u >> 16); + uint x = u & 0xffffu; + return half2(f16tof32(x), f16tof32(y)); +} + +INLINE uint packHalf2x16(float2 v) +{ + uint x = f32tof16(v.x); + uint y = f32tof16(v.y); + return (y << 16) | x; +} + +INLINE half4 unpackUnorm4x8(uint u) +{ + uint4 vals = uint4(u & 0xffu, (u >> 8) & 0xffu, (u >> 16) & 0xffu, u >> 24); + return half4(vals) * (1. / 255.); +} + +INLINE uint packUnorm4x8(half4 color) +{ + uint4 vals = (uint4(color * 255.) & 0xff) << uint4(0, 8, 16, 24); + vals.xy |= vals.zw; + vals.x |= vals.y; + return vals.x; +} + +INLINE float atan(float y, float x) { return atan2(y, x); } + +INLINE float2x2 inverse(float2x2 m) +{ + float2x2 adjoint = float2x2(m[1][1], -m[0][1], -m[1][0], m[0][0]); + return adjoint * (1. / determinant(m)); +} + +// Redirects for intrinsics that have different names in HLSL + +INLINE float mix(float x, float y, float s) { return lerp(x, y, s); } +INLINE float2 mix(float2 x, float2 y, float2 s) { return lerp(x, y, s); } +INLINE float3 mix(float3 x, float3 y, float3 s) { return lerp(x, y, s); } +INLINE float4 mix(float4 x, float4 y, float4 s) { return lerp(x, y, s); } + +INLINE float fract(float x) { return frac(x); } +INLINE float2 fract(float2 x) { return frac(x); } +INLINE float3 fract(float3 x) { return frac(x); } +INLINE float4 fract(float4 x) { return frac(x); } + +// Reimplement intrinsics for half types. +// This shadows the intrinsic function for floats, so we also have to declare that overload. + +INLINE float rive_sign(float x) { return sign(x); } +INLINE float2 rive_sign(float2 x) { return sign(x); } +INLINE float3 rive_sign(float3 x) { return sign(x); } +INLINE float4 rive_sign(float4 x) { return sign(x); } + +#define sign rive_sign + +INLINE float rive_abs(float x) { return abs(x); } +INLINE float2 rive_abs(float2 x) { return abs(x); } +INLINE float3 rive_abs(float3 x) { return abs(x); } +INLINE float4 rive_abs(float4 x) { return abs(x); } + +#define abs rive_abs + +INLINE float rive_sqrt(float x) { return sqrt(x); } +INLINE float2 rive_sqrt(float2 x) { return sqrt(x); } +INLINE float3 rive_sqrt(float3 x) { return sqrt(x); } +INLINE float4 rive_sqrt(float4 x) { return sqrt(x); } + +#define sqrt rive_sqrt +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/rhi.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/rhi.minified.ush new file mode 100644 index 00000000..41f7cae4 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/rhi.minified.ush @@ -0,0 +1,354 @@ +/* + * Copyright 2023 Rive + */ + +// This header provides GLSL-specific #defines and declarations that enable our shaders to be +// compiled on MSL and GLSL both. + +// HLSL warns that it will unroll the loops through r,g,b values in advanced_blend.glsl, but +// unrolling these loops is exactly what we want. +#pragma warning(disable : 3550) + +// Don't warn about uninitialized variables. If we leave one uninitialized it's because we know what +// we're doing and don't want to pay the cost of initializing it. +#pragma warning(disable : 4000) + +// #define native hlsl types if their names are being rewritten. +#define _ARE_TOKEN_NAMES_PRESERVED +#ifndef _ARE_TOKEN_NAMES_PRESERVED +#define half half +#define half2 half2 +#define half3 half3 +#define half4 half4 +#define short short +#define short2 short2 +#define short3 short3 +#define short4 short4 +#define ushort ushort +#define ushort2 ushort2 +#define ushort3 ushort3 +#define ushort4 ushort4 +#define float2 float2 +#define float3 float3 +#define float4 float4 +#define bool2 bool2 +#define bool3 bool3 +#define bool4 bool4 +#define uint2 uint2 +#define uint3 uint3 +#define uint4 uint4 +#define int2 int2 +#define int3 int3 +#define int4 int4 +#define float4x2 float4x2 +#define ushort ushort +#define float2x2 float2x2 +#define half3x4 half3x4 +#endif + +typedef float3 packed_float3; + +#ifdef ENABLE_MIN_16_PRECISION + +typedef min16uint ushort; + +#else + +typedef uint ushort; + +#endif + +#define SPLAT(A, B) A##B + +#define INLINE inline +#define OUT(ARG_TYPE) out ARG_TYPE + +#define ATTR_BLOCK_BEGIN(NAME) \ + struct NAME \ + { +#define ATTR(IDX, TYPE, NAME) TYPE NAME : SPLAT(ATTRIBUTE, IDX) +#define ATTR_BLOCK_END \ + } \ + ; +#define ATTR_LOAD(T, A, N, I) +#define ATTR_UNPACK(ID, attrs, NAME, TYPE) TYPE NAME = attrs.NAME + +#define UNIFORM_BUFFER_REGISTER(IDX) register(SPLAT(b,IDX)) + +#define UNIFORM_BLOCK_BEGIN(IDX, NAME) \ + cbuffer NAME : UNIFORM_BUFFER_REGISTER(IDX) \ + { \ + struct \ + { + +#define UNIFORM_BLOCK_END(NAME) \ + } \ + NAME; \ + } + +#define VARYING_BLOCK_BEGIN \ + struct Varyings \ + { + +#define NO_PERSPECTIVE noperspective +#define OPTIONALLY_FLAT nointerpolation +#define FLAT nointerpolation +#define VARYING(IDX, TYPE, NAME) TYPE NAME : SPLAT(TEXCOORD,IDX) + +#define VARYING_BLOCK_END \ + float4 _pos : SV_Position; \ + } \ + ; + +#define VARYING_INIT(NAME, TYPE) TYPE NAME +#define VARYING_PACK(NAME) _varyings.NAME = NAME +#define VARYING_UNPACK(NAME, TYPE) TYPE NAME = _varyings.NAME + +#ifdef VERTEX +#define VERTEX_TEXTURE_BLOCK_BEGIN +#define VERTEX_TEXTURE_BLOCK_END +#endif + +#ifdef FRAGMENT +#define FRAG_TEXTURE_BLOCK_BEGIN +#define FRAG_TEXTURE_BLOCK_END +#endif + +#define TEXTURE_RGBA32UI(SET, IDX, NAME) uniform Texture2D NAME : register(SPLAT(t,IDX)) +#define TEXTURE_RGBA32F(SET, IDX, NAME) uniform Texture2D NAME : register(SPLAT(t,IDX)) +#define TEXTURE_RGBA8(SET, IDX, NAME) uniform Texture2D NAME : register(SPLAT(t,IDX)) + +// SAMPLER_LINEAR and SAMPLER_MIPMAP are the same because in d3d11, sampler parameters are defined +// at the API level. +#define SAMPLER(TEXTURE_IDX, NAME) SamplerState NAME : register(SPLAT(s,TEXTURE_IDX)); +#define SAMPLER_LINEAR SAMPLER +#define SAMPLER_MIPMAP SAMPLER + +#define TEXEL_FETCH(NAME, COORD) NAME[COORD] +#define TEXTURE_SAMPLE(NAME, SAMPLER_NAME, COORD) NAME.Sample(SAMPLER_NAME, COORD) +#define TEXTURE_SAMPLE_LOD(NAME, SAMPLER_NAME, COORD, LOD) \ + NAME.SampleLevel(SAMPLER_NAME, COORD, LOD) +#define TEXTURE_SAMPLE_GRAD(NAME, SAMPLER_NAME, COORD, DDX, DDY) \ + NAME.SampleGrad(SAMPLER_NAME, COORD, DDX, DDY) + +#define PLS_INTERLOCK_BEGIN +#define PLS_INTERLOCK_END + +#ifdef ENABLE_RASTERIZER_ORDERED_VIEWS +#define PLS_TEX2D RasterizerOrderedTexture2D +#else +#define PLS_TEX2D RWTexture2D +#endif + +#define PLS_BLOCK_BEGIN +#ifdef ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_DECL4F(IDX, NAME) uniform PLS_TEX2D NAME : register(SPLAT(u,IDX)) +#else +#define PLS_DECL4F(IDX, NAME) uniform PLS_TEX2D NAME : register(SPLAT(u,IDX)) +#endif +#define PLS_DECLUI(IDX, NAME) uniform PLS_TEX2D NAME : register(SPLAT(u,IDX)) +#define PLS_DECLUI_ATOMIC PLS_DECLUI +#define PLS_LOADUI_ATOMIC PLS_LOADUI +#define PLS_STOREUI_ATOMIC PLS_STOREUI +#define PLS_BLOCK_END + +#ifdef ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_LOAD4F(PLANE) PLANE[_plsCoord] +#else +#define PLS_LOAD4F(PLANE) unpackUnorm4x8(PLANE[_plsCoord]) +#endif +#define PLS_LOADUI(PLANE) PLANE[_plsCoord] +#ifdef ENABLE_TYPED_UAV_LOAD_STORE +#define PLS_STORE4F(PLANE, VALUE) PLANE[_plsCoord] = (VALUE) +#else +#define PLS_STORE4F(PLANE, VALUE) PLANE[_plsCoord] = packUnorm4x8(VALUE) +#endif +#define PLS_STOREUI(PLANE, VALUE) PLANE[_plsCoord] = (VALUE) + +INLINE uint pls_atomic_max(PLS_TEX2D plane, int2 _plsCoord, uint x) +{ + uint originalValue; + InterlockedMax(plane[_plsCoord], x, originalValue); + return originalValue; +} + +#define PLS_ATOMIC_MAX(PLANE, X) pls_atomic_max(PLANE, _plsCoord, X) + +INLINE uint pls_atomic_add(PLS_TEX2D plane, int2 _plsCoord, uint x) +{ + uint originalValue; + InterlockedAdd(plane[_plsCoord], x, originalValue); + return originalValue; +} + +#define PLS_ATOMIC_ADD(PLANE, X) pls_atomic_add(PLANE, _plsCoord, X) + +#define PLS_PRESERVE_4F(PLANE) +#define PLS_PRESERVE_UI(PLANE) + +#define VERTEX_CONTEXT_DECL +#define VERTEX_CONTEXT_UNPACK + +#define VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + \ + uint baseInstance; \ + \ + Varyings NAME(Attrs attrs, uint _vertexID \ + : SV_VertexID, uint _instanceIDWithoutBase \ + : SV_InstanceID) \ + { \ + uint _instanceID = _instanceIDWithoutBase + baseInstance; \ + Varyings _varyings; + +#define IMAGE_RECT_VERTEX_MAIN(NAME, Attrs, attrs, _vertexID, _instanceID) \ + Varyings NAME(Attrs attrs, uint _vertexID : SV_VertexID) \ + { \ + Varyings _varyings; \ + float4 _pos; + +#define IMAGE_MESH_VERTEX_MAIN(NAME, PositionAttr, position, UVAttr, uv, _vertexID) \ + Varyings NAME(PositionAttr position, UVAttr uv, uint _vertexID : SV_VertexID) \ + { \ + Varyings _varyings; \ + float4 _pos; + +#define EMIT_VERTEX(POSITION) \ + _varyings._pos = POSITION; \ + } \ + return _varyings; + +#define FRAG_DATA_MAIN(DATA_TYPE, NAME) \ + DATA_TYPE NAME(Varyings _varyings) : SV_Target \ + { + +#define EMIT_FRAG_DATA(VALUE) \ + return VALUE; \ + } + +#define FRAGMENT_CONTEXT_DECL , float2 _fragCoord +#define FRAGMENT_CONTEXT_UNPACK , _fragCoord + +#define PLS_CONTEXT_DECL , int2 _plsCoord +#define PLS_CONTEXT_UNPACK , _plsCoord + +#define PLS_MAIN(NAME) [earlydepthstencil] void NAME(Varyings _varyings) { \ + float2 _fragCoord = _varyings._pos.xy;\ + int2 _plsCoord = int2(floor(_fragCoord)); + +#define PLS_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_MAIN(NAME) + +#define EMIT_PLS } + +#define PLS_FRAG_COLOR_MAIN(NAME) \ + [earlydepthstencil] half4 NAME(Varyings _varyings) : SV_Target \ + { \ + float2 _fragCoord = _varyings._pos.xy; \ + int2 _plsCoord = int2(floor(_fragCoord)); \ + half4 _fragColor; + +#define PLS_FRAG_COLOR_MAIN_WITH_IMAGE_UNIFORMS(NAME) PLS_FRAG_COLOR_MAIN(NAME) + +#define EMIT_PLS_AND_FRAG_COLOR \ + } \ + return _fragColor; + +#define uintBitsToFloat asfloat +#define intBitsToFloat asfloat +#define floatBitsToInt asint +#define floatBitsToUint asuint +#define inversesqrt rsqrt +#define notEqual(A, B) ((A) != (B)) +#define lessThanEqual(A, B) ((A) <= (B)) +#define greaterThanEqual(A, B) ((A) >= (B)) + +// HLSL matrices are stored in row-major order, and therefore transposed from their counterparts +// in GLSL and Metal. We can work around this entirely by reversing the arguments to mul(). +#define MUL(A, B) mul(B, A) + +#define VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +#define VERTEX_STORAGE_BUFFER_BLOCK_END + +#define FRAG_STORAGE_BUFFER_BLOCK_BEGIN +#define FRAG_STORAGE_BUFFER_BLOCK_END + +#define STORAGE_BUFFER_U32x2(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(SPLAT(t,IDX)) +#define STORAGE_BUFFER_U32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(SPLAT(t,IDX)) +#define STORAGE_BUFFER_F32x4(IDX, GLSL_STRUCT_NAME, NAME) \ + StructuredBuffer NAME : register(SPLAT(t,IDX)) + +#define STORAGE_BUFFER_LOAD4(NAME, I) NAME[I] +#define STORAGE_BUFFER_LOAD2(NAME, I) NAME[I] + +INLINE half2 unpackHalf2x16(uint u) +{ + uint y = (u >> 16); + uint x = u & 0xffffu; + return half2(f16tof32(x), f16tof32(y)); +} + +INLINE uint packHalf2x16(float2 v) +{ + uint x = f32tof16(v.x); + uint y = f32tof16(v.y); + return (y << 16) | x; +} + +INLINE half4 unpackUnorm4x8(uint u) +{ + uint4 vals = uint4(u & 0xffu, (u >> 8) & 0xffu, (u >> 16) & 0xffu, u >> 24); + return half4(vals) * (1. / 255.); +} + +INLINE uint packUnorm4x8(half4 color) +{ + uint4 vals = (uint4(color * 255.) & 0xff) << uint4(0, 8, 16, 24); + vals.xy |= vals.zw; + vals.x |= vals.y; + return vals.x; +} + +INLINE float atan(float y, float x) { return atan2(y, x); } + +INLINE float2x2 inverse(float2x2 m) +{ + float2x2 adjoint = float2x2(m[1][1], -m[0][1], -m[1][0], m[0][0]); + return adjoint * (1. / determinant(m)); +} + +// Redirects for intrinsics that have different names in HLSL + +INLINE float mix(float x, float y, float s) { return lerp(x, y, s); } +INLINE float2 mix(float2 x, float2 y, float2 s) { return lerp(x, y, s); } +INLINE float3 mix(float3 x, float3 y, float3 s) { return lerp(x, y, s); } +INLINE float4 mix(float4 x, float4 y, float4 s) { return lerp(x, y, s); } + +INLINE float fract(float x) { return frac(x); } +INLINE float2 fract(float2 x) { return frac(x); } +INLINE float3 fract(float3 x) { return frac(x); } +INLINE float4 fract(float4 x) { return frac(x); } + +// Reimplement intrinsics for half types. +// This shadows the intrinsic function for floats, so we also have to declare that overload. + +INLINE float rive_sign(float x) { return sign(x); } +INLINE float2 rive_sign(float2 x) { return sign(x); } +INLINE float3 rive_sign(float3 x) { return sign(x); } +INLINE float4 rive_sign(float4 x) { return sign(x); } + +#define sign rive_sign + +INLINE float rive_abs(float x) { return abs(x); } +INLINE float2 rive_abs(float2 x) { return abs(x); } +INLINE float3 rive_abs(float3 x) { return abs(x); } +INLINE float4 rive_abs(float4 x) { return abs(x); } + +#define abs rive_abs + +INLINE float rive_sqrt(float x) { return sqrt(x); } +INLINE float2 rive_sqrt(float2 x) { return sqrt(x); } +INLINE float3 rive_sqrt(float3 x) { return sqrt(x); } +INLINE float4 rive_sqrt(float4 x) { return sqrt(x); } + +#define sqrt rive_sqrt diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/specialization.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/specialization.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/specialization.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/specialization.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/specialization.glsl.hpp new file mode 100644 index 00000000..b9e5f703 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/specialization.glsl.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include "specialization.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char specialization[] = R"===(layout(constant_id = CLIPPING_SPECIALIZATION_IDX) const bool kEnableClipping = false; +layout(constant_id = CLIP_RECT_SPECIALIZATION_IDX) const bool kEnableClipRect = false; +layout(constant_id = ADVANCED_BLEND_SPECIALIZATION_IDX) const bool kEnableAdvancedBlend = false; +layout(constant_id = EVEN_ODD_SPECIALIZATION_IDX) const bool kEnableEvenOdd = false; +layout(constant_id = NESTED_CLIPPING_SPECIALIZATION_IDX) const bool kEnableNestedClipping = false; +layout(constant_id = HSL_BLEND_MODES_SPECIALIZATION_IDX) const bool kEnableHSLBlendModes = false; + +#define _EXPORTED_ENABLE_CLIPPING kEnableClipping +#define _EXPORTED_ENABLE_CLIP_RECT kEnableClipRect +#define _EXPORTED_ENABLE_ADVANCED_BLEND kEnableAdvancedBlend +#define _EXPORTED_ENABLE_EVEN_ODD kEnableEvenOdd +#define _EXPORTED_ENABLE_NESTED_CLIPPING kEnableNestedClipping +#define _EXPORTED_ENABLE_HSL_BLEND_MODES kEnableHSLBlendModes +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/specialization.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/specialization.minified.ush new file mode 100644 index 00000000..c39726bf --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/specialization.minified.ush @@ -0,0 +1,13 @@ +layout(constant_id = CLIPPING_SPECIALIZATION_IDX) const bool kEnableClipping = false; +layout(constant_id = CLIP_RECT_SPECIALIZATION_IDX) const bool kEnableClipRect = false; +layout(constant_id = ADVANCED_BLEND_SPECIALIZATION_IDX) const bool kEnableAdvancedBlend = false; +layout(constant_id = EVEN_ODD_SPECIALIZATION_IDX) const bool kEnableEvenOdd = false; +layout(constant_id = NESTED_CLIPPING_SPECIALIZATION_IDX) const bool kEnableNestedClipping = false; +layout(constant_id = HSL_BLEND_MODES_SPECIALIZATION_IDX) const bool kEnableHSLBlendModes = false; + +#define ENABLE_CLIPPING kEnableClipping +#define ENABLE_CLIP_RECT kEnableClipRect +#define ENABLE_ADVANCED_BLEND kEnableAdvancedBlend +#define ENABLE_EVEN_ODD kEnableEvenOdd +#define ENABLE_NESTED_CLIPPING kEnableNestedClipping +#define ENABLE_HSL_BLEND_MODES kEnableHSLBlendModes diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/stencil_draw.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/stencil_draw.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/stencil_draw.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/stencil_draw.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/stencil_draw.glsl.hpp new file mode 100644 index 00000000..58dc2a96 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/stencil_draw.glsl.hpp @@ -0,0 +1,41 @@ +#pragma once + +#include "stencil_draw.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char stencil_draw[] = R"===(/* + * Copyright 2024 Rive + */ + +#ifdef _EXPORTED_VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, packed_float3, _EXPORTED_a_triangleVertex); +ATTR_BLOCK_END + +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +VERTEX_MAIN(_EXPORTED_stencilVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(_EXPORTED_a_triangleVertex.xy); + uint zIndex = floatBitsToUint(_EXPORTED_a_triangleVertex.z) & 0xffffu; + pos.z = normalize_z_index(zIndex); + EMIT_VERTEX(pos); +} +#endif + +#ifdef _EXPORTED_FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +FRAG_TEXTURE_BLOCK_END + +FRAG_DATA_MAIN(half4, _EXPORTED_blitFragmentMain) { EMIT_FRAG_DATA(make_half4(.0)); } +#endif // FRAGMENT +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/stencil_draw.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/stencil_draw.minified.ush new file mode 100644 index 00000000..800c793d --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/stencil_draw.minified.ush @@ -0,0 +1,30 @@ +/* + * Copyright 2024 Rive + */ + +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, packed_float3, _EXPORTED_a_triangleVertex); +ATTR_BLOCK_END + +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +VERTEX_STORAGE_BUFFER_BLOCK_END + +VERTEX_MAIN(_EXPORTED_stencilVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + float4 pos = RENDER_TARGET_COORD_TO_CLIP_COORD(_EXPORTED_a_triangleVertex.xy); + uint zIndex = floatBitsToUint(_EXPORTED_a_triangleVertex.z) & 0xffffu; + pos.z = normalize_z_index(zIndex); + EMIT_VERTEX(pos); +} +#endif + +#ifdef FRAGMENT +FRAG_TEXTURE_BLOCK_BEGIN +FRAG_TEXTURE_BLOCK_END + +FRAG_DATA_MAIN(half4, _EXPORTED_blitFragmentMain) { EMIT_FRAG_DATA(make_half4(.0)); } +#endif // FRAGMENT diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/tessellate.exports.h b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/tessellate.exports.h new file mode 100644 index 00000000..2d88d890 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/tessellate.exports.h @@ -0,0 +1,178 @@ +#pragma once + +#define GLSL_CLEAR_CLIP "_EXPORTED_CLEAR_CLIP" +#define GLSL_CLEAR_CLIP_raw _EXPORTED_CLEAR_CLIP +#define GLSL_CLEAR_COLOR "_EXPORTED_CLEAR_COLOR" +#define GLSL_CLEAR_COLOR_raw _EXPORTED_CLEAR_COLOR +#define GLSL_CLEAR_COVERAGE "_EXPORTED_CLEAR_COVERAGE" +#define GLSL_CLEAR_COVERAGE_raw _EXPORTED_CLEAR_COVERAGE +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER "_EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER" +#define GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER_raw _EXPORTED_COALESCED_PLS_RESOLVE_AND_TRANSFER +#define GLSL_COLOR_PLANE_IDX_OVERRIDE "_EXPORTED_COLOR_PLANE_IDX_OVERRIDE" +#define GLSL_COLOR_PLANE_IDX_OVERRIDE_raw _EXPORTED_COLOR_PLANE_IDX_OVERRIDE +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS "_EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS" +#define GLSL_DISABLE_SHADER_STORAGE_BUFFERS_raw _EXPORTED_DISABLE_SHADER_STORAGE_BUFFERS +#define GLSL_DRAW_IMAGE "_EXPORTED_DRAW_IMAGE" +#define GLSL_DRAW_IMAGE_raw _EXPORTED_DRAW_IMAGE +#define GLSL_DRAW_IMAGE_MESH "_EXPORTED_DRAW_IMAGE_MESH" +#define GLSL_DRAW_IMAGE_MESH_raw _EXPORTED_DRAW_IMAGE_MESH +#define GLSL_DRAW_IMAGE_RECT "_EXPORTED_DRAW_IMAGE_RECT" +#define GLSL_DRAW_IMAGE_RECT_raw _EXPORTED_DRAW_IMAGE_RECT +#define GLSL_DRAW_INTERIOR_TRIANGLES "_EXPORTED_DRAW_INTERIOR_TRIANGLES" +#define GLSL_DRAW_INTERIOR_TRIANGLES_raw _EXPORTED_DRAW_INTERIOR_TRIANGLES +#define GLSL_DRAW_PATH "_EXPORTED_DRAW_PATH" +#define GLSL_DRAW_PATH_raw _EXPORTED_DRAW_PATH +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS "_EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS" +#define GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS_raw _EXPORTED_DRAW_RENDER_TARGET_UPDATE_BOUNDS +#define GLSL_ENABLE_ADVANCED_BLEND "_EXPORTED_ENABLE_ADVANCED_BLEND" +#define GLSL_ENABLE_ADVANCED_BLEND_raw _EXPORTED_ENABLE_ADVANCED_BLEND +#define GLSL_ENABLE_BINDLESS_TEXTURES "_EXPORTED_ENABLE_BINDLESS_TEXTURES" +#define GLSL_ENABLE_BINDLESS_TEXTURES_raw _EXPORTED_ENABLE_BINDLESS_TEXTURES +#define GLSL_ENABLE_CLIPPING "_EXPORTED_ENABLE_CLIPPING" +#define GLSL_ENABLE_CLIPPING_raw _EXPORTED_ENABLE_CLIPPING +#define GLSL_ENABLE_CLIP_RECT "_EXPORTED_ENABLE_CLIP_RECT" +#define GLSL_ENABLE_CLIP_RECT_raw _EXPORTED_ENABLE_CLIP_RECT +#define GLSL_ENABLE_EVEN_ODD "_EXPORTED_ENABLE_EVEN_ODD" +#define GLSL_ENABLE_EVEN_ODD_raw _EXPORTED_ENABLE_EVEN_ODD +#define GLSL_ENABLE_HSL_BLEND_MODES "_EXPORTED_ENABLE_HSL_BLEND_MODES" +#define GLSL_ENABLE_HSL_BLEND_MODES_raw _EXPORTED_ENABLE_HSL_BLEND_MODES +#define GLSL_ENABLE_INSTANCE_INDEX "_EXPORTED_ENABLE_INSTANCE_INDEX" +#define GLSL_ENABLE_INSTANCE_INDEX_raw _EXPORTED_ENABLE_INSTANCE_INDEX +#define GLSL_ENABLE_KHR_BLEND "_EXPORTED_ENABLE_KHR_BLEND" +#define GLSL_ENABLE_KHR_BLEND_raw _EXPORTED_ENABLE_KHR_BLEND +#define GLSL_ENABLE_MIN_16_PRECISION "_EXPORTED_ENABLE_MIN_16_PRECISION" +#define GLSL_ENABLE_MIN_16_PRECISION_raw _EXPORTED_ENABLE_MIN_16_PRECISION +#define GLSL_ENABLE_NESTED_CLIPPING "_EXPORTED_ENABLE_NESTED_CLIPPING" +#define GLSL_ENABLE_NESTED_CLIPPING_raw _EXPORTED_ENABLE_NESTED_CLIPPING +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS "_EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS" +#define GLSL_ENABLE_RASTERIZER_ORDERED_VIEWS_raw _EXPORTED_ENABLE_RASTERIZER_ORDERED_VIEWS +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE "_EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE" +#define GLSL_ENABLE_SPIRV_CROSS_BASE_INSTANCE_raw _EXPORTED_ENABLE_SPIRV_CROSS_BASE_INSTANCE +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE "_EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE" +#define GLSL_ENABLE_TYPED_UAV_LOAD_STORE_raw _EXPORTED_ENABLE_TYPED_UAV_LOAD_STORE +#define GLSL_FIXED_FUNCTION_COLOR_BLEND "_EXPORTED_FIXED_FUNCTION_COLOR_BLEND" +#define GLSL_FIXED_FUNCTION_COLOR_BLEND_raw _EXPORTED_FIXED_FUNCTION_COLOR_BLEND +#define GLSL_FRAGMENT "_EXPORTED_FRAGMENT" +#define GLSL_FRAGMENT_raw _EXPORTED_FRAGMENT +#define GLSL_FlushUniforms "_EXPORTED_FlushUniforms" +#define GLSL_FlushUniforms_raw _EXPORTED_FlushUniforms +#define GLSL_GLSL_VERSION "_EXPORTED_GLSL_VERSION" +#define GLSL_GLSL_VERSION_raw _EXPORTED_GLSL_VERSION +#define GLSL_INITIALIZE_PLS "_EXPORTED_INITIALIZE_PLS" +#define GLSL_INITIALIZE_PLS_raw _EXPORTED_INITIALIZE_PLS +#define GLSL_ImageDrawUniforms "_EXPORTED_ImageDrawUniforms" +#define GLSL_ImageDrawUniforms_raw _EXPORTED_ImageDrawUniforms +#define GLSL_LOAD_COLOR "_EXPORTED_LOAD_COLOR" +#define GLSL_LOAD_COLOR_raw _EXPORTED_LOAD_COLOR +#define GLSL_OPTIONALLY_FLAT "_EXPORTED_OPTIONALLY_FLAT" +#define GLSL_OPTIONALLY_FLAT_raw _EXPORTED_OPTIONALLY_FLAT +#define GLSL_PLS_IMPL_ANGLE "_EXPORTED_PLS_IMPL_ANGLE" +#define GLSL_PLS_IMPL_ANGLE_raw _EXPORTED_PLS_IMPL_ANGLE +#define GLSL_PLS_IMPL_DEVICE_BUFFER "_EXPORTED_PLS_IMPL_DEVICE_BUFFER" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED "_EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED" +#define GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED_raw _EXPORTED_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED +#define GLSL_PLS_IMPL_EXT_NATIVE "_EXPORTED_PLS_IMPL_EXT_NATIVE" +#define GLSL_PLS_IMPL_EXT_NATIVE_raw _EXPORTED_PLS_IMPL_EXT_NATIVE +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH "_EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH" +#define GLSL_PLS_IMPL_FRAMEBUFFER_FETCH_raw _EXPORTED_PLS_IMPL_FRAMEBUFFER_FETCH +#define GLSL_PLS_IMPL_NONE "_EXPORTED_PLS_IMPL_NONE" +#define GLSL_PLS_IMPL_NONE_raw _EXPORTED_PLS_IMPL_NONE +#define GLSL_PLS_IMPL_STORAGE_TEXTURE "_EXPORTED_PLS_IMPL_STORAGE_TEXTURE" +#define GLSL_PLS_IMPL_STORAGE_TEXTURE_raw _EXPORTED_PLS_IMPL_STORAGE_TEXTURE +#define GLSL_PLS_IMPL_SUBPASS_LOAD "_EXPORTED_PLS_IMPL_SUBPASS_LOAD" +#define GLSL_PLS_IMPL_SUBPASS_LOAD_raw _EXPORTED_PLS_IMPL_SUBPASS_LOAD +#define GLSL_RESOLVE_PLS "_EXPORTED_RESOLVE_PLS" +#define GLSL_RESOLVE_PLS_raw _EXPORTED_RESOLVE_PLS +#define GLSL_STORE_COLOR "_EXPORTED_STORE_COLOR" +#define GLSL_STORE_COLOR_raw _EXPORTED_STORE_COLOR +#define GLSL_STORE_COLOR_CLEAR "_EXPORTED_STORE_COLOR_CLEAR" +#define GLSL_STORE_COLOR_CLEAR_raw _EXPORTED_STORE_COLOR_CLEAR +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA "_EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA" +#define GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA_raw _EXPORTED_SWIZZLE_COLOR_BGRA_TO_RGBA +#define GLSL_TARGET_VULKAN "_EXPORTED_TARGET_VULKAN" +#define GLSL_TARGET_VULKAN_raw _EXPORTED_TARGET_VULKAN +#define GLSL_USE_GENERATED_UNIFORMS "_EXPORTED_USE_GENERATED_UNIFORMS" +#define GLSL_USE_GENERATED_UNIFORMS_raw _EXPORTED_USE_GENERATED_UNIFORMS +#define GLSL_USING_DEPTH_STENCIL "_EXPORTED_USING_DEPTH_STENCIL" +#define GLSL_USING_DEPTH_STENCIL_raw _EXPORTED_USING_DEPTH_STENCIL +#define GLSL_USING_PLS_STORAGE_TEXTURES "_EXPORTED_USING_PLS_STORAGE_TEXTURES" +#define GLSL_USING_PLS_STORAGE_TEXTURES_raw _EXPORTED_USING_PLS_STORAGE_TEXTURES +#define GLSL_VERTEX "_EXPORTED_VERTEX" +#define GLSL_VERTEX_raw _EXPORTED_VERTEX +#define GLSL_a_args "_EXPORTED_a_args" +#define GLSL_a_args_raw _EXPORTED_a_args +#define GLSL_a_args_a "_EXPORTED_a_args_a" +#define GLSL_a_args_a_raw _EXPORTED_a_args_a +#define GLSL_a_args_b "_EXPORTED_a_args_b" +#define GLSL_a_args_b_raw _EXPORTED_a_args_b +#define GLSL_a_args_c "_EXPORTED_a_args_c" +#define GLSL_a_args_c_raw _EXPORTED_a_args_c +#define GLSL_a_args_d "_EXPORTED_a_args_d" +#define GLSL_a_args_d_raw _EXPORTED_a_args_d +#define GLSL_a_imageRectVertex "_EXPORTED_a_imageRectVertex" +#define GLSL_a_imageRectVertex_raw _EXPORTED_a_imageRectVertex +#define GLSL_a_joinTan_and_ys "_EXPORTED_a_joinTan_and_ys" +#define GLSL_a_joinTan_and_ys_raw _EXPORTED_a_joinTan_and_ys +#define GLSL_a_mirroredVertexData "_EXPORTED_a_mirroredVertexData" +#define GLSL_a_mirroredVertexData_raw _EXPORTED_a_mirroredVertexData +#define GLSL_a_p0p1_ "_EXPORTED_a_p0p1_" +#define GLSL_a_p0p1__raw _EXPORTED_a_p0p1_ +#define GLSL_a_p2p3_ "_EXPORTED_a_p2p3_" +#define GLSL_a_p2p3__raw _EXPORTED_a_p2p3_ +#define GLSL_a_patchVertexData "_EXPORTED_a_patchVertexData" +#define GLSL_a_patchVertexData_raw _EXPORTED_a_patchVertexData +#define GLSL_a_position "_EXPORTED_a_position" +#define GLSL_a_position_raw _EXPORTED_a_position +#define GLSL_a_span "_EXPORTED_a_span" +#define GLSL_a_span_raw _EXPORTED_a_span +#define GLSL_a_span_a "_EXPORTED_a_span_a" +#define GLSL_a_span_a_raw _EXPORTED_a_span_a +#define GLSL_a_span_b "_EXPORTED_a_span_b" +#define GLSL_a_span_b_raw _EXPORTED_a_span_b +#define GLSL_a_span_c "_EXPORTED_a_span_c" +#define GLSL_a_span_c_raw _EXPORTED_a_span_c +#define GLSL_a_span_d "_EXPORTED_a_span_d" +#define GLSL_a_span_d_raw _EXPORTED_a_span_d +#define GLSL_a_texCoord "_EXPORTED_a_texCoord" +#define GLSL_a_texCoord_raw _EXPORTED_a_texCoord +#define GLSL_a_triangleVertex "_EXPORTED_a_triangleVertex" +#define GLSL_a_triangleVertex_raw _EXPORTED_a_triangleVertex +#define GLSL_blitFragmentMain "_EXPORTED_blitFragmentMain" +#define GLSL_blitFragmentMain_raw _EXPORTED_blitFragmentMain +#define GLSL_blitTextureSource "_EXPORTED_blitTextureSource" +#define GLSL_blitTextureSource_raw _EXPORTED_blitTextureSource +#define GLSL_blitVertexMain "_EXPORTED_blitVertexMain" +#define GLSL_blitVertexMain_raw _EXPORTED_blitVertexMain +#define GLSL_clearColor "_EXPORTED_clearColor" +#define GLSL_clearColor_raw _EXPORTED_clearColor +#define GLSL_colorRampFragmentMain "_EXPORTED_colorRampFragmentMain" +#define GLSL_colorRampFragmentMain_raw _EXPORTED_colorRampFragmentMain +#define GLSL_colorRampVertexMain "_EXPORTED_colorRampVertexMain" +#define GLSL_colorRampVertexMain_raw _EXPORTED_colorRampVertexMain +#define GLSL_contourBuffer "_EXPORTED_contourBuffer" +#define GLSL_contourBuffer_raw _EXPORTED_contourBuffer +#define GLSL_drawFragmentMain "_EXPORTED_drawFragmentMain" +#define GLSL_drawFragmentMain_raw _EXPORTED_drawFragmentMain +#define GLSL_drawVertexMain "_EXPORTED_drawVertexMain" +#define GLSL_drawVertexMain_raw _EXPORTED_drawVertexMain +#define GLSL_dstColorTexture "_EXPORTED_dstColorTexture" +#define GLSL_dstColorTexture_raw _EXPORTED_dstColorTexture +#define GLSL_gradTexture "_EXPORTED_gradTexture" +#define GLSL_gradTexture_raw _EXPORTED_gradTexture +#define GLSL_imageTexture "_EXPORTED_imageTexture" +#define GLSL_imageTexture_raw _EXPORTED_imageTexture +#define GLSL_paintAuxBuffer "_EXPORTED_paintAuxBuffer" +#define GLSL_paintAuxBuffer_raw _EXPORTED_paintAuxBuffer +#define GLSL_paintBuffer "_EXPORTED_paintBuffer" +#define GLSL_paintBuffer_raw _EXPORTED_paintBuffer +#define GLSL_pathBuffer "_EXPORTED_pathBuffer" +#define GLSL_pathBuffer_raw _EXPORTED_pathBuffer +#define GLSL_stencilVertexMain "_EXPORTED_stencilVertexMain" +#define GLSL_stencilVertexMain_raw _EXPORTED_stencilVertexMain +#define GLSL_tessVertexTexture "_EXPORTED_tessVertexTexture" +#define GLSL_tessVertexTexture_raw _EXPORTED_tessVertexTexture +#define GLSL_tessellateFragmentMain "_EXPORTED_tessellateFragmentMain" +#define GLSL_tessellateFragmentMain_raw _EXPORTED_tessellateFragmentMain +#define GLSL_tessellateVertexMain "_EXPORTED_tessellateVertexMain" +#define GLSL_tessellateVertexMain_raw _EXPORTED_tessellateVertexMain diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/tessellate.glsl.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/tessellate.glsl.hpp new file mode 100644 index 00000000..90c38f91 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/tessellate.glsl.hpp @@ -0,0 +1,436 @@ +#pragma once + +#include "tessellate.exports.h" + +namespace rive { +namespace gpu { +namespace glsl { +const char tessellate[] = R"===(/* + * Copyright 2020 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + * + * Initial import from skia:src/gpu/ganesh/tessellate/GrStrokeTessellationShader.cpp + * + * Copyright 2022 Rive + */ + +#define MAX_PARAMETRIC_SEGMENTS_LOG2 10 // Max 1024 segments. + +#ifdef _EXPORTED_VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, float4, _EXPORTED_a_p0p1_); // End in '_' because D3D interprets the '1' as a semantic index. +ATTR(1, float4, _EXPORTED_a_p2p3_); +ATTR(2, float4, _EXPORTED_a_joinTan_and_ys); // [joinTangent, y, reflectionY] +#ifdef SPLIT_UINT4_ATTRIBUTES +ATTR(3, uint, _EXPORTED_a_args_a); +ATTR(4, uint, _EXPORTED_a_args_b); +ATTR(5, uint, _EXPORTED_a_args_c); +ATTR(6, uint, _EXPORTED_a_args_d); +#else +ATTR(3, uint4, _EXPORTED_a_args); // [x0x1, reflectionX0X1, segmentCounts, contourIDWithFlags] +#endif +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float4, v_p0p1); +NO_PERSPECTIVE VARYING(1, float4, v_p2p3); +NO_PERSPECTIVE VARYING(2, float4, v_args); // [vertexIdx, totalVertexCount, joinSegmentCount, + // parametricSegmentCount, radsPerPolarSegment] +NO_PERSPECTIVE VARYING(3, float3, v_joinArgs); // [joinTangent, radsPerJoinSegment] +FLAT VARYING(4, uint, v_contourIDWithFlags); +VARYING_BLOCK_END + +// Tangent of the curve at T=0 and T=1. +INLINE float2x2 find_tangents(float2 p0, float2 p1, float2 p2, float2 p3) +{ + float2x2 t; + t[0] = (any(notEqual(p0, p1)) ? p1 : any(notEqual(p1, p2)) ? p2 : p3) - p0; + t[1] = p3 - (any(notEqual(p3, p2)) ? p2 : any(notEqual(p2, p1)) ? p1 : p0); + return t; +} + +#ifdef _EXPORTED_VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +STORAGE_BUFFER_U32x4(PATH_BUFFER_IDX, PathBuffer, _EXPORTED_pathBuffer); +STORAGE_BUFFER_U32x4(CONTOUR_BUFFER_IDX, ContourBuffer, _EXPORTED_contourBuffer); +VERTEX_STORAGE_BUFFER_BLOCK_END + +float cosine_between_vectors(float2 a, float2 b) +{ + // FIXME(crbug.com/800804,skbug.com/11268): This can overflow if we don't normalize exponents. + float ab_cosTheta = dot(a, b); + float ab_pow2 = dot(a, a) * dot(b, b); + return (ab_pow2 == .0) ? 1. : clamp(ab_cosTheta * inversesqrt(ab_pow2), -1., 1.); +} + +VERTEX_MAIN(_EXPORTED_tessellateVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + // Each instance repeats twice. Once for normal patch(es) and once for reflection(s). + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_p0p1_, float4); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_p2p3_, float4); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_joinTan_and_ys, float4); + +#ifdef SPLIT_UINT4_ATTRIBUTES + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args_a, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args_b, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args_c, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args_d, uint); + + uint4 _EXPORTED_a_args = uint4(_EXPORTED_a_args_a, _EXPORTED_a_args_b, _EXPORTED_a_args_c, _EXPORTED_a_args_d); + +#else + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args, uint4); + +#endif + + VARYING_INIT(v_p0p1, float4); + VARYING_INIT(v_p2p3, float4); + VARYING_INIT(v_args, float4); + VARYING_INIT(v_joinArgs, float3); + VARYING_INIT(v_contourIDWithFlags, uint); + + float2 p0 = _EXPORTED_a_p0p1_.xy; + float2 p1 = _EXPORTED_a_p0p1_.zw; + float2 p2 = _EXPORTED_a_p2p3_.xy; + float2 p3 = _EXPORTED_a_p2p3_.zw; + // Each instance has two spans, potentially for both a forward copy and and reflection. + // (If the second span isn't needed, the client will have placed it offscreen.) + bool isFirstSpan = _vertexID < 4; + float y = isFirstSpan ? _EXPORTED_a_joinTan_and_ys.z : _EXPORTED_a_joinTan_and_ys.w; + int x0x1 = int(isFirstSpan ? _EXPORTED_a_args.x : _EXPORTED_a_args.y); +#ifdef GLSL + int x1up = x0x1 << 16; + if (_EXPORTED_a_args.z == 0xffffffffu) + { + // Pixel 8 with ARM Mali-G715 throws away "x0x1 << 16 >> 16". We need this in order to + // sign-extend the bottom 16 bits of x0x1. + // Create a branch that we know won't be taken, in order to convince the compiler not to + // throw this operation away. + // NOTE: we could use bitfieldExtract(), but it isn't available on ES 3.0. + --x1up; + } + float x0 = float(x1up >> 16); +#else + float x0 = float(x0x1 << 16 >> 16); +#endif + float x1 = float(x0x1 >> 16); + float2 coord = float2((_vertexID & 1) == 0 ? x0 : x1, (_vertexID & 2) == 0 ? y + 1. : y); + + uint parametricSegmentCount = _EXPORTED_a_args.z & 0x3ffu; + uint polarSegmentCount = (_EXPORTED_a_args.z >> 10) & 0x3ffu; + uint joinSegmentCount = _EXPORTED_a_args.z >> 20; + uint contourIDWithFlags = _EXPORTED_a_args.w; + if (x1 < x0) // Reflections are drawn right to left. + { + contourIDWithFlags |= MIRRORED_CONTOUR_CONTOUR_FLAG; + } + if ((x1 - x0) * uniforms.tessInverseViewportY < .0) + { + // Make sure we always emit clockwise triangles. Swap the top and bottom vertices. + coord.y = 2. * y + 1. - coord.y; + } + if ((contourIDWithFlags & CULL_EXCESS_TESSELLATION_SEGMENTS_CONTOUR_FLAG) != 0u) + { + // This span may have more tessellation vertices allocated to it than necessary (e.g., + // outerCurve patches all have a fixed patch size, regardless of how many segments the curve + // actually needs). Re-run Wang's formula to figure out how many segments we actually need, + // and make any excess segments degenerate by co-locating their vertices at T=0. + uint pathIDBits = + STORAGE_BUFFER_LOAD4(_EXPORTED_contourBuffer, contour_data_idx(contourIDWithFlags)).z; + float2x2 mat = + make_float2x2(uintBitsToFloat(STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, pathIDBits * 2u))); + float2 d0 = MUL(mat, -2. * p1 + p2 + p0); + + float2 d1 = MUL(mat, -2. * p2 + p3 + p1); + float m = max(dot(d0, d0), dot(d1, d1)); + float n = max(ceil(sqrt(.75 * 4. * sqrt(m))), 1.); + parametricSegmentCount = min(uint(n), parametricSegmentCount); + } + // Polar and parametric segments share the same beginning and ending vertices, so the merged + // *vertex* count is equal to the sum of polar and parametric *segment* counts. + uint totalVertexCount = parametricSegmentCount + polarSegmentCount + joinSegmentCount - 1u; + + float2x2 tangents = find_tangents(p0, p1, p2, p3); + float theta = acos(cosine_between_vectors(tangents[0], tangents[1])); + float radsPerPolarSegment = theta / float(polarSegmentCount); + // Adjust sign of radsPerPolarSegment to match the direction the curve turns. + // NOTE: Since the curve is not allowed to inflect, we can just check F'(.5) x F''(.5). + // NOTE: F'(.5) x F''(.5) has the same sign as (p2 - p0) x (p3 - p1). + float turn = determinant(float2x2(p2 - p0, p3 - p1)); + if (turn == .0) // This is the case for joins and cusps where points are co-located. + turn = determinant(tangents); + if (turn < .0) + radsPerPolarSegment = -radsPerPolarSegment; + + v_p0p1 = float4(p0, p1); + v_p2p3 = float4(p2, p3); + v_args = float4(float(totalVertexCount) - abs(x1 - coord.x), // vertexIdx + float(totalVertexCount), // totalVertexCount + (joinSegmentCount << 10) | parametricSegmentCount, + radsPerPolarSegment); + if (joinSegmentCount > 1u) + { + float2x2 joinTangents = float2x2(tangents[1], _EXPORTED_a_joinTan_and_ys.xy); + float joinTheta = acos(cosine_between_vectors(joinTangents[0], joinTangents[1])); + float joinSpan = float(joinSegmentCount); + if ((contourIDWithFlags & (JOIN_TYPE_MASK | EMULATED_STROKE_CAP_CONTOUR_FLAG)) == + EMULATED_STROKE_CAP_CONTOUR_FLAG) + { + // Round caps emulated as joins need to emit vertices at T=0 and T=1, unlike normal + // round joins. The fragment shader will handle most of this, but here we need to adjust + // radsPerJoinSegment to account for the fact that this join will be rotating around two + // more segments. + joinSpan -= 2.; + } + float radsPerJoinSegment = joinTheta / joinSpan; + if (determinant(joinTangents) < .0) + radsPerJoinSegment = -radsPerJoinSegment; + v_joinArgs.xy = _EXPORTED_a_joinTan_and_ys.xy; + v_joinArgs.z = radsPerJoinSegment; + } + v_contourIDWithFlags = contourIDWithFlags; + + float4 pos; + pos.x = coord.x * (2. / TESS_TEXTURE_WIDTH) - 1.; + pos.y = coord.y * uniforms.tessInverseViewportY - sign(uniforms.tessInverseViewportY); + pos.zw = float2(0, 1); + + VARYING_PACK(v_p0p1); + VARYING_PACK(v_p2p3); + VARYING_PACK(v_args); + VARYING_PACK(v_joinArgs); + VARYING_PACK(v_contourIDWithFlags); + EMIT_VERTEX(pos); +} +#endif + +#ifdef _EXPORTED_FRAGMENT +FRAG_DATA_MAIN(uint4, _EXPORTED_tessellateFragmentMain) +{ + VARYING_UNPACK(v_p0p1, float4); + VARYING_UNPACK(v_p2p3, float4); + VARYING_UNPACK(v_args, float4); + VARYING_UNPACK(v_joinArgs, float3); + VARYING_UNPACK(v_contourIDWithFlags, uint); + + float2 p0 = v_p0p1.xy; + float2 p1 = v_p0p1.zw; + float2 p2 = v_p2p3.xy; + float2 p3 = v_p2p3.zw; + float2x2 tangents = find_tangents(p0, p1, p2, p3); + // Colocate any padding vertices at T=0. + float vertexIdx = max(floor(v_args.x), .0); + float totalVertexCount = v_args.y; + uint joinSegmentCount_and_parametricSegmentCount = uint(v_args.z); + float parametricSegmentCount = float(joinSegmentCount_and_parametricSegmentCount & 0x3ffu); + float joinSegmentCount = float(joinSegmentCount_and_parametricSegmentCount >> 10); + float radsPerPolarSegment = v_args.w; + uint contourIDWithFlags = v_contourIDWithFlags; + + // mergedVertexID/mergedSegmentCount are relative to the sub-section of the instance this vertex + // belongs to (either the curve section that consists of merged polar and parametric segments, + // or the join section composed of just polar segments). + // + // Begin with the assumption that we belong to the curve section. + float mergedSegmentCount = totalVertexCount - joinSegmentCount; + float mergedVertexID = vertexIdx; + if (mergedVertexID <= mergedSegmentCount) + { + // We do belong to the curve section. Clear out any stroke join flags. + contourIDWithFlags &= ~JOIN_TYPE_MASK; + } + else + { + // We actually belong to the join section following the curve. Construct a point-cubic with + // rotation. + p0 = p1 = p2 = p3; + tangents = float2x2(tangents[1], v_joinArgs.xy /*joinTangent*/); + parametricSegmentCount = 1.; + mergedVertexID -= mergedSegmentCount; + mergedSegmentCount = joinSegmentCount; + if ((contourIDWithFlags & JOIN_TYPE_MASK) != 0u) + { + // Miter or bevel join vertices snap to either tangents[0] or tangents[1], and get + // adjusted in the shader that follows. + if (mergedVertexID < 2.5) // With 5 join segments, this branch will see IDs: 1, 2, 3, 4. + contourIDWithFlags |= JOIN_TANGENT_0_CONTOUR_FLAG; + if (mergedVertexID > 1.5 && mergedVertexID < 3.5) + contourIDWithFlags |= JOIN_TANGENT_INNER_CONTOUR_FLAG; + } + else if ((contourIDWithFlags & EMULATED_STROKE_CAP_CONTOUR_FLAG) != 0u) + { + // Round caps emulated as joins need to emit vertices at T=0 and T=1, unlike normal + // round joins. Preserve the same number of vertices (the CPU should have given us two + // extra, knowing that we are an emulated cap, and the vertex shader should have already + // accounted for this in radsPerJoinSegment), but adjust our stepping parameters so we + // begin at T=0 and end at T=1. + mergedSegmentCount -= 2.; + mergedVertexID--; + } + radsPerPolarSegment = v_joinArgs.z; // radsPerJoinSegment. + contourIDWithFlags |= + radsPerPolarSegment < .0 ? LEFT_JOIN_CONTOUR_FLAG : RIGHT_JOIN_CONTOUR_FLAG; + } + + float2 tessCoord; + float theta = .0; + if (mergedVertexID == .0 || mergedVertexID == mergedSegmentCount || + (contourIDWithFlags & JOIN_TYPE_MASK) != 0u) + { + // Tessellated vertices at the beginning and end of the strip use exact endpoints and + // tangents. This ensures crack-free seaming between instances. + bool isTan0 = mergedVertexID < mergedSegmentCount * .5; + tessCoord = isTan0 ? p0 : p3; + theta = atan2(isTan0 ? tangents[0] : tangents[1]); + } + else if ((contourIDWithFlags & RETROFITTED_TRIANGLE_CONTOUR_FLAG) != 0u) + { + // This cubic should actually be drawn as the single, non-AA triangle: [p0, p1, p3]. + // This is used to squeeze in more rare triangles, like "grout" triangles from self + // intersections on interior triangulation, where it wouldn't be worth it to put them in + // their own dedicated draw call. + tessCoord = p1; + } + else + { + float T, polarT; + if (parametricSegmentCount == mergedSegmentCount) + { + // There are no polar vertices. This is (probably) a fill. Vertices are spaced evenly in + // parametric space. + T = mergedVertexID / parametricSegmentCount; + polarT = .0; // Set polarT != T to ensure we calculate the parametric tangent later. + } + else + { + // Compute the location and tangent direction of the tessellated stroke vertex with the + // integral id "mergedVertexID", where mergedVertexID is the sorted-order index of + // parametric and polar vertices. Start by finding the tangent function's power basis + // coefficients. These define a tangent direction (scaled by some uniform value) as: + // + // |T^2| + // Tangent_Direction(T) = dx,dy = |A 2B C| * |T | + // |. . .| |1 | + float2 A, B, C = p1 - p0; + float2 D = p3 - p0; + float2 E = p2 - p1; + B = E - C; + A = -3. * E + D; + // FIXME(crbug.com/800804,skbug.com/11268): Consider normalizing the exponents in A,B,C + // at this point in order to prevent fp32 overflow. + + // Now find the coefficients that give a tangent direction from a parametric vertex ID: + // + // |parametricVertexID^2| + // Tangent_Direction(parametricVertexID) = dx,dy = |A B_ C_| * |parametricVertexID | + // |. . .| |1 | + // + float2 B_ = B * (parametricSegmentCount * 2.); + float2 C_ = C * (parametricSegmentCount * parametricSegmentCount); + + // Run a binary search to determine the highest parametric vertex that is located on or + // before the mergedVertexID. A merged ID is determined by the sum of complete + // parametric and polar segments behind it. i.e., find the highest parametric vertex + // where: + // + // parametricVertexID + floor(numPolarSegmentsAtParametricT) <= mergedVertexID + // + float lastParametricVertexID = .0; + float maxParametricVertexID = min(parametricSegmentCount - 1., mergedVertexID); + // FIXME(crbug.com/800804,skbug.com/11268): This normalize() can overflow. + float2 tan0norm = normalize(tangents[0]); + float negAbsRadsPerSegment = -abs(radsPerPolarSegment); + float maxRotation0 = (1. + mergedVertexID) * abs(radsPerPolarSegment); + for (int p = MAX_PARAMETRIC_SEGMENTS_LOG2 - 1; p >= 0; --p) + { + // Test the parametric vertex at lastParametricVertexID + 2^p. + float testParametricID = lastParametricVertexID + exp2(float(p)); + if (testParametricID <= maxParametricVertexID) + { + float2 testTan = testParametricID * A + B_; + testTan = testParametricID * testTan + C_; + float cosRotation = dot(normalize(testTan), tan0norm); + float maxRotation = testParametricID * negAbsRadsPerSegment + maxRotation0; + maxRotation = min(maxRotation, PI); + // Is rotation <= maxRotation? (i.e., is the number of complete polar segments + // behind testT, + testParametricID <= mergedVertexID?) + if (cosRotation >= cos(maxRotation)) + lastParametricVertexID = testParametricID; + } + } + + // Find the T value of the parametric vertex at lastParametricVertexID. + float parametricT = lastParametricVertexID / parametricSegmentCount; + + // Now that we've identified the highest parametric vertex on or before the + // mergedVertexID, the highest polar vertex is easy: + float lastPolarVertexID = mergedVertexID - lastParametricVertexID; + + // Find the angle of tan0, or the angle between tan0norm and the positive x axis. + float theta0 = acos(clamp(tan0norm.x, -1., 1.)); + theta0 = tan0norm.y >= .0 ? theta0 : -theta0; + + // Find the tangent vector on the vertex at lastPolarVertexID. + theta = lastPolarVertexID * radsPerPolarSegment + theta0; + float2 norm = float2(sin(theta), -cos(theta)); + + // Find the T value where the tangent is orthogonal to norm. This is a quadratic: + // + // dot(norm, Tangent_Direction(T)) == 0 + // + // |T^2| + // norm * |A 2B C| * |T | == 0 + // |. . .| |1 | + // + float a = dot(norm, A), b_over_2 = dot(norm, B), c = dot(norm, C); + float discr_over_4 = max(b_over_2 * b_over_2 - a * c, .0); + float q = sqrt(discr_over_4); + if (b_over_2 > .0) + q = -q; + q -= b_over_2; + + // Roots are q/a and c/q. Since each curve section does not inflect or rotate more than + // 180 degrees, there can only be one tangent orthogonal to "norm" inside 0..1. Pick the + // root nearest .5. + float _5qa = -.5 * q * a; + float2 root = (abs(q * q + _5qa) < abs(a * c + _5qa)) ? float2(q, a) : float2(c, q); + polarT = (root.y != .0) ? root.x / root.y : .0; + polarT = clamp(polarT, .0, 1.); + + // The root finder above can become unstable when lastPolarVertexID == 0 (e.g., if there + // are roots at exatly 0 and 1 both). polarT should always == 0 in this case. + if (lastPolarVertexID == .0) + polarT = .0; + + // Now that we've identified the T values of the last parametric and polar vertices, our + // final T value for mergedVertexID is whichever is larger. + T = max(parametricT, polarT); + } + + // Evaluate the cubic at T. Use De Casteljau's for its accuracy and stability. + float2 ab = unchecked_mix(p0, p1, T); + float2 bc = unchecked_mix(p1, p2, T); + float2 cd = unchecked_mix(p2, p3, T); + float2 abc = unchecked_mix(ab, bc, T); + float2 bcd = unchecked_mix(bc, cd, T); + tessCoord = unchecked_mix(abc, bcd, T); + + // If we went with T=parametricT, then update theta. Otherwise leave it at the polar theta + // found previously. (In the event that parametricT == polarT, we keep the polar theta.) + if (T != polarT) + theta = atan2(bcd - abc); + } + + EMIT_FRAG_DATA(uint4(floatBitsToUint(float3(tessCoord, theta)), contourIDWithFlags)); +} +#endif +)==="; +} // namespace glsl +} // namespace gpu +} // namespace rive \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/tessellate.minified.ush b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/tessellate.minified.ush new file mode 100644 index 00000000..46e35447 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/shaders/out/generated/shaders/tessellate.minified.ush @@ -0,0 +1,425 @@ +/* + * Copyright 2020 Google LLC. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + * + * Initial import from skia:src/gpu/ganesh/tessellate/GrStrokeTessellationShader.cpp + * + * Copyright 2022 Rive + */ + +#define MAX_PARAMETRIC_SEGMENTS_LOG2 10 // Max 1024 segments. + +#ifdef VERTEX +ATTR_BLOCK_BEGIN(Attrs) +ATTR(0, float4, _EXPORTED_a_p0p1_); // End in '_' because D3D interprets the '1' as a semantic index. +ATTR(1, float4, _EXPORTED_a_p2p3_); +ATTR(2, float4, _EXPORTED_a_joinTan_and_ys); // [joinTangent, y, reflectionY] +#ifdef SPLIT_UINT4_ATTRIBUTES +ATTR(3, uint, _EXPORTED_a_args_a); +ATTR(4, uint, _EXPORTED_a_args_b); +ATTR(5, uint, _EXPORTED_a_args_c); +ATTR(6, uint, _EXPORTED_a_args_d); +#else +ATTR(3, uint4, _EXPORTED_a_args); // [x0x1, reflectionX0X1, segmentCounts, contourIDWithFlags] +#endif +ATTR_BLOCK_END +#endif + +VARYING_BLOCK_BEGIN +NO_PERSPECTIVE VARYING(0, float4, v_p0p1); +NO_PERSPECTIVE VARYING(1, float4, v_p2p3); +NO_PERSPECTIVE VARYING(2, float4, v_args); // [vertexIdx, totalVertexCount, joinSegmentCount, + // parametricSegmentCount, radsPerPolarSegment] +NO_PERSPECTIVE VARYING(3, float3, v_joinArgs); // [joinTangent, radsPerJoinSegment] +FLAT VARYING(4, uint, v_contourIDWithFlags); +VARYING_BLOCK_END + +// Tangent of the curve at T=0 and T=1. +INLINE float2x2 find_tangents(float2 p0, float2 p1, float2 p2, float2 p3) +{ + float2x2 t; + t[0] = (any(notEqual(p0, p1)) ? p1 : any(notEqual(p1, p2)) ? p2 : p3) - p0; + t[1] = p3 - (any(notEqual(p3, p2)) ? p2 : any(notEqual(p2, p1)) ? p1 : p0); + return t; +} + +#ifdef VERTEX +VERTEX_TEXTURE_BLOCK_BEGIN +VERTEX_TEXTURE_BLOCK_END + +VERTEX_STORAGE_BUFFER_BLOCK_BEGIN +STORAGE_BUFFER_U32x4(PATH_BUFFER_IDX, PathBuffer, _EXPORTED_pathBuffer); +STORAGE_BUFFER_U32x4(CONTOUR_BUFFER_IDX, ContourBuffer, _EXPORTED_contourBuffer); +VERTEX_STORAGE_BUFFER_BLOCK_END + +float cosine_between_vectors(float2 a, float2 b) +{ + // FIXME(crbug.com/800804,skbug.com/11268): This can overflow if we don't normalize exponents. + float ab_cosTheta = dot(a, b); + float ab_pow2 = dot(a, a) * dot(b, b); + return (ab_pow2 == .0) ? 1. : clamp(ab_cosTheta * inversesqrt(ab_pow2), -1., 1.); +} + +VERTEX_MAIN(_EXPORTED_tessellateVertexMain, Attrs, attrs, _vertexID, _instanceID) +{ + // Each instance repeats twice. Once for normal patch(es) and once for reflection(s). + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_p0p1_, float4); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_p2p3_, float4); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_joinTan_and_ys, float4); + +#ifdef SPLIT_UINT4_ATTRIBUTES + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args_a, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args_b, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args_c, uint); + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args_d, uint); + + uint4 _EXPORTED_a_args = uint4(_EXPORTED_a_args_a, _EXPORTED_a_args_b, _EXPORTED_a_args_c, _EXPORTED_a_args_d); + +#else + ATTR_UNPACK(_instanceID, attrs, _EXPORTED_a_args, uint4); + +#endif + + VARYING_INIT(v_p0p1, float4); + VARYING_INIT(v_p2p3, float4); + VARYING_INIT(v_args, float4); + VARYING_INIT(v_joinArgs, float3); + VARYING_INIT(v_contourIDWithFlags, uint); + + float2 p0 = _EXPORTED_a_p0p1_.xy; + float2 p1 = _EXPORTED_a_p0p1_.zw; + float2 p2 = _EXPORTED_a_p2p3_.xy; + float2 p3 = _EXPORTED_a_p2p3_.zw; + // Each instance has two spans, potentially for both a forward copy and and reflection. + // (If the second span isn't needed, the client will have placed it offscreen.) + bool isFirstSpan = _vertexID < 4; + float y = isFirstSpan ? _EXPORTED_a_joinTan_and_ys.z : _EXPORTED_a_joinTan_and_ys.w; + int x0x1 = int(isFirstSpan ? _EXPORTED_a_args.x : _EXPORTED_a_args.y); +#ifdef GLSL + int x1up = x0x1 << 16; + if (_EXPORTED_a_args.z == 0xffffffffu) + { + // Pixel 8 with ARM Mali-G715 throws away "x0x1 << 16 >> 16". We need this in order to + // sign-extend the bottom 16 bits of x0x1. + // Create a branch that we know won't be taken, in order to convince the compiler not to + // throw this operation away. + // NOTE: we could use bitfieldExtract(), but it isn't available on ES 3.0. + --x1up; + } + float x0 = float(x1up >> 16); +#else + float x0 = float(x0x1 << 16 >> 16); +#endif + float x1 = float(x0x1 >> 16); + float2 coord = float2((_vertexID & 1) == 0 ? x0 : x1, (_vertexID & 2) == 0 ? y + 1. : y); + + uint parametricSegmentCount = _EXPORTED_a_args.z & 0x3ffu; + uint polarSegmentCount = (_EXPORTED_a_args.z >> 10) & 0x3ffu; + uint joinSegmentCount = _EXPORTED_a_args.z >> 20; + uint contourIDWithFlags = _EXPORTED_a_args.w; + if (x1 < x0) // Reflections are drawn right to left. + { + contourIDWithFlags |= MIRRORED_CONTOUR_CONTOUR_FLAG; + } + if ((x1 - x0) * uniforms.tessInverseViewportY < .0) + { + // Make sure we always emit clockwise triangles. Swap the top and bottom vertices. + coord.y = 2. * y + 1. - coord.y; + } + if ((contourIDWithFlags & CULL_EXCESS_TESSELLATION_SEGMENTS_CONTOUR_FLAG) != 0u) + { + // This span may have more tessellation vertices allocated to it than necessary (e.g., + // outerCurve patches all have a fixed patch size, regardless of how many segments the curve + // actually needs). Re-run Wang's formula to figure out how many segments we actually need, + // and make any excess segments degenerate by co-locating their vertices at T=0. + uint pathIDBits = + STORAGE_BUFFER_LOAD4(_EXPORTED_contourBuffer, contour_data_idx(contourIDWithFlags)).z; + float2x2 mat = + make_float2x2(uintBitsToFloat(STORAGE_BUFFER_LOAD4(_EXPORTED_pathBuffer, pathIDBits * 2u))); + float2 d0 = MUL(mat, -2. * p1 + p2 + p0); + + float2 d1 = MUL(mat, -2. * p2 + p3 + p1); + float m = max(dot(d0, d0), dot(d1, d1)); + float n = max(ceil(sqrt(.75 * 4. * sqrt(m))), 1.); + parametricSegmentCount = min(uint(n), parametricSegmentCount); + } + // Polar and parametric segments share the same beginning and ending vertices, so the merged + // *vertex* count is equal to the sum of polar and parametric *segment* counts. + uint totalVertexCount = parametricSegmentCount + polarSegmentCount + joinSegmentCount - 1u; + + float2x2 tangents = find_tangents(p0, p1, p2, p3); + float theta = acos(cosine_between_vectors(tangents[0], tangents[1])); + float radsPerPolarSegment = theta / float(polarSegmentCount); + // Adjust sign of radsPerPolarSegment to match the direction the curve turns. + // NOTE: Since the curve is not allowed to inflect, we can just check F'(.5) x F''(.5). + // NOTE: F'(.5) x F''(.5) has the same sign as (p2 - p0) x (p3 - p1). + float turn = determinant(float2x2(p2 - p0, p3 - p1)); + if (turn == .0) // This is the case for joins and cusps where points are co-located. + turn = determinant(tangents); + if (turn < .0) + radsPerPolarSegment = -radsPerPolarSegment; + + v_p0p1 = float4(p0, p1); + v_p2p3 = float4(p2, p3); + v_args = float4(float(totalVertexCount) - abs(x1 - coord.x), // vertexIdx + float(totalVertexCount), // totalVertexCount + (joinSegmentCount << 10) | parametricSegmentCount, + radsPerPolarSegment); + if (joinSegmentCount > 1u) + { + float2x2 joinTangents = float2x2(tangents[1], _EXPORTED_a_joinTan_and_ys.xy); + float joinTheta = acos(cosine_between_vectors(joinTangents[0], joinTangents[1])); + float joinSpan = float(joinSegmentCount); + if ((contourIDWithFlags & (JOIN_TYPE_MASK | EMULATED_STROKE_CAP_CONTOUR_FLAG)) == + EMULATED_STROKE_CAP_CONTOUR_FLAG) + { + // Round caps emulated as joins need to emit vertices at T=0 and T=1, unlike normal + // round joins. The fragment shader will handle most of this, but here we need to adjust + // radsPerJoinSegment to account for the fact that this join will be rotating around two + // more segments. + joinSpan -= 2.; + } + float radsPerJoinSegment = joinTheta / joinSpan; + if (determinant(joinTangents) < .0) + radsPerJoinSegment = -radsPerJoinSegment; + v_joinArgs.xy = _EXPORTED_a_joinTan_and_ys.xy; + v_joinArgs.z = radsPerJoinSegment; + } + v_contourIDWithFlags = contourIDWithFlags; + + float4 pos; + pos.x = coord.x * (2. / TESS_TEXTURE_WIDTH) - 1.; + pos.y = coord.y * uniforms.tessInverseViewportY - sign(uniforms.tessInverseViewportY); + pos.zw = float2(0, 1); + + VARYING_PACK(v_p0p1); + VARYING_PACK(v_p2p3); + VARYING_PACK(v_args); + VARYING_PACK(v_joinArgs); + VARYING_PACK(v_contourIDWithFlags); + EMIT_VERTEX(pos); +} +#endif + +#ifdef FRAGMENT +FRAG_DATA_MAIN(uint4, _EXPORTED_tessellateFragmentMain) +{ + VARYING_UNPACK(v_p0p1, float4); + VARYING_UNPACK(v_p2p3, float4); + VARYING_UNPACK(v_args, float4); + VARYING_UNPACK(v_joinArgs, float3); + VARYING_UNPACK(v_contourIDWithFlags, uint); + + float2 p0 = v_p0p1.xy; + float2 p1 = v_p0p1.zw; + float2 p2 = v_p2p3.xy; + float2 p3 = v_p2p3.zw; + float2x2 tangents = find_tangents(p0, p1, p2, p3); + // Colocate any padding vertices at T=0. + float vertexIdx = max(floor(v_args.x), .0); + float totalVertexCount = v_args.y; + uint joinSegmentCount_and_parametricSegmentCount = uint(v_args.z); + float parametricSegmentCount = float(joinSegmentCount_and_parametricSegmentCount & 0x3ffu); + float joinSegmentCount = float(joinSegmentCount_and_parametricSegmentCount >> 10); + float radsPerPolarSegment = v_args.w; + uint contourIDWithFlags = v_contourIDWithFlags; + + // mergedVertexID/mergedSegmentCount are relative to the sub-section of the instance this vertex + // belongs to (either the curve section that consists of merged polar and parametric segments, + // or the join section composed of just polar segments). + // + // Begin with the assumption that we belong to the curve section. + float mergedSegmentCount = totalVertexCount - joinSegmentCount; + float mergedVertexID = vertexIdx; + if (mergedVertexID <= mergedSegmentCount) + { + // We do belong to the curve section. Clear out any stroke join flags. + contourIDWithFlags &= ~JOIN_TYPE_MASK; + } + else + { + // We actually belong to the join section following the curve. Construct a point-cubic with + // rotation. + p0 = p1 = p2 = p3; + tangents = float2x2(tangents[1], v_joinArgs.xy /*joinTangent*/); + parametricSegmentCount = 1.; + mergedVertexID -= mergedSegmentCount; + mergedSegmentCount = joinSegmentCount; + if ((contourIDWithFlags & JOIN_TYPE_MASK) != 0u) + { + // Miter or bevel join vertices snap to either tangents[0] or tangents[1], and get + // adjusted in the shader that follows. + if (mergedVertexID < 2.5) // With 5 join segments, this branch will see IDs: 1, 2, 3, 4. + contourIDWithFlags |= JOIN_TANGENT_0_CONTOUR_FLAG; + if (mergedVertexID > 1.5 && mergedVertexID < 3.5) + contourIDWithFlags |= JOIN_TANGENT_INNER_CONTOUR_FLAG; + } + else if ((contourIDWithFlags & EMULATED_STROKE_CAP_CONTOUR_FLAG) != 0u) + { + // Round caps emulated as joins need to emit vertices at T=0 and T=1, unlike normal + // round joins. Preserve the same number of vertices (the CPU should have given us two + // extra, knowing that we are an emulated cap, and the vertex shader should have already + // accounted for this in radsPerJoinSegment), but adjust our stepping parameters so we + // begin at T=0 and end at T=1. + mergedSegmentCount -= 2.; + mergedVertexID--; + } + radsPerPolarSegment = v_joinArgs.z; // radsPerJoinSegment. + contourIDWithFlags |= + radsPerPolarSegment < .0 ? LEFT_JOIN_CONTOUR_FLAG : RIGHT_JOIN_CONTOUR_FLAG; + } + + float2 tessCoord; + float theta = .0; + if (mergedVertexID == .0 || mergedVertexID == mergedSegmentCount || + (contourIDWithFlags & JOIN_TYPE_MASK) != 0u) + { + // Tessellated vertices at the beginning and end of the strip use exact endpoints and + // tangents. This ensures crack-free seaming between instances. + bool isTan0 = mergedVertexID < mergedSegmentCount * .5; + tessCoord = isTan0 ? p0 : p3; + theta = atan2(isTan0 ? tangents[0] : tangents[1]); + } + else if ((contourIDWithFlags & RETROFITTED_TRIANGLE_CONTOUR_FLAG) != 0u) + { + // This cubic should actually be drawn as the single, non-AA triangle: [p0, p1, p3]. + // This is used to squeeze in more rare triangles, like "grout" triangles from self + // intersections on interior triangulation, where it wouldn't be worth it to put them in + // their own dedicated draw call. + tessCoord = p1; + } + else + { + float T, polarT; + if (parametricSegmentCount == mergedSegmentCount) + { + // There are no polar vertices. This is (probably) a fill. Vertices are spaced evenly in + // parametric space. + T = mergedVertexID / parametricSegmentCount; + polarT = .0; // Set polarT != T to ensure we calculate the parametric tangent later. + } + else + { + // Compute the location and tangent direction of the tessellated stroke vertex with the + // integral id "mergedVertexID", where mergedVertexID is the sorted-order index of + // parametric and polar vertices. Start by finding the tangent function's power basis + // coefficients. These define a tangent direction (scaled by some uniform value) as: + // + // |T^2| + // Tangent_Direction(T) = dx,dy = |A 2B C| * |T | + // |. . .| |1 | + float2 A, B, C = p1 - p0; + float2 D = p3 - p0; + float2 E = p2 - p1; + B = E - C; + A = -3. * E + D; + // FIXME(crbug.com/800804,skbug.com/11268): Consider normalizing the exponents in A,B,C + // at this point in order to prevent fp32 overflow. + + // Now find the coefficients that give a tangent direction from a parametric vertex ID: + // + // |parametricVertexID^2| + // Tangent_Direction(parametricVertexID) = dx,dy = |A B_ C_| * |parametricVertexID | + // |. . .| |1 | + // + float2 B_ = B * (parametricSegmentCount * 2.); + float2 C_ = C * (parametricSegmentCount * parametricSegmentCount); + + // Run a binary search to determine the highest parametric vertex that is located on or + // before the mergedVertexID. A merged ID is determined by the sum of complete + // parametric and polar segments behind it. i.e., find the highest parametric vertex + // where: + // + // parametricVertexID + floor(numPolarSegmentsAtParametricT) <= mergedVertexID + // + float lastParametricVertexID = .0; + float maxParametricVertexID = min(parametricSegmentCount - 1., mergedVertexID); + // FIXME(crbug.com/800804,skbug.com/11268): This normalize() can overflow. + float2 tan0norm = normalize(tangents[0]); + float negAbsRadsPerSegment = -abs(radsPerPolarSegment); + float maxRotation0 = (1. + mergedVertexID) * abs(radsPerPolarSegment); + for (int p = MAX_PARAMETRIC_SEGMENTS_LOG2 - 1; p >= 0; --p) + { + // Test the parametric vertex at lastParametricVertexID + 2^p. + float testParametricID = lastParametricVertexID + exp2(float(p)); + if (testParametricID <= maxParametricVertexID) + { + float2 testTan = testParametricID * A + B_; + testTan = testParametricID * testTan + C_; + float cosRotation = dot(normalize(testTan), tan0norm); + float maxRotation = testParametricID * negAbsRadsPerSegment + maxRotation0; + maxRotation = min(maxRotation, PI); + // Is rotation <= maxRotation? (i.e., is the number of complete polar segments + // behind testT, + testParametricID <= mergedVertexID?) + if (cosRotation >= cos(maxRotation)) + lastParametricVertexID = testParametricID; + } + } + + // Find the T value of the parametric vertex at lastParametricVertexID. + float parametricT = lastParametricVertexID / parametricSegmentCount; + + // Now that we've identified the highest parametric vertex on or before the + // mergedVertexID, the highest polar vertex is easy: + float lastPolarVertexID = mergedVertexID - lastParametricVertexID; + + // Find the angle of tan0, or the angle between tan0norm and the positive x axis. + float theta0 = acos(clamp(tan0norm.x, -1., 1.)); + theta0 = tan0norm.y >= .0 ? theta0 : -theta0; + + // Find the tangent vector on the vertex at lastPolarVertexID. + theta = lastPolarVertexID * radsPerPolarSegment + theta0; + float2 norm = float2(sin(theta), -cos(theta)); + + // Find the T value where the tangent is orthogonal to norm. This is a quadratic: + // + // dot(norm, Tangent_Direction(T)) == 0 + // + // |T^2| + // norm * |A 2B C| * |T | == 0 + // |. . .| |1 | + // + float a = dot(norm, A), b_over_2 = dot(norm, B), c = dot(norm, C); + float discr_over_4 = max(b_over_2 * b_over_2 - a * c, .0); + float q = sqrt(discr_over_4); + if (b_over_2 > .0) + q = -q; + q -= b_over_2; + + // Roots are q/a and c/q. Since each curve section does not inflect or rotate more than + // 180 degrees, there can only be one tangent orthogonal to "norm" inside 0..1. Pick the + // root nearest .5. + float _5qa = -.5 * q * a; + float2 root = (abs(q * q + _5qa) < abs(a * c + _5qa)) ? float2(q, a) : float2(c, q); + polarT = (root.y != .0) ? root.x / root.y : .0; + polarT = clamp(polarT, .0, 1.); + + // The root finder above can become unstable when lastPolarVertexID == 0 (e.g., if there + // are roots at exatly 0 and 1 both). polarT should always == 0 in this case. + if (lastPolarVertexID == .0) + polarT = .0; + + // Now that we've identified the T values of the last parametric and polar vertices, our + // final T value for mergedVertexID is whichever is larger. + T = max(parametricT, polarT); + } + + // Evaluate the cubic at T. Use De Casteljau's for its accuracy and stability. + float2 ab = unchecked_mix(p0, p1, T); + float2 bc = unchecked_mix(p1, p2, T); + float2 cd = unchecked_mix(p2, p3, T); + float2 abc = unchecked_mix(ab, bc, T); + float2 bcd = unchecked_mix(bc, cd, T); + tessCoord = unchecked_mix(abc, bcd, T); + + // If we went with T=parametricT, then update theta. Otherwise leave it at the polar theta + // found previously. (In the event that parametricT == polarT, we keep the polar theta.) + if (T != polarT) + theta = atan2(bcd - abc); + } + + EMIT_FRAG_DATA(uint4(floatBitsToUint(float3(tessCoord, theta)), contourIDWithFlags)); +} +#endif diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/text/raw_text.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/text/raw_text.hpp index 2d76fb8c..f1e2817f 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/text/raw_text.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/text/raw_text.hpp @@ -82,6 +82,7 @@ class RawText TextSizing m_sizing = TextSizing::autoWidth; TextOverflow m_overflow = TextOverflow::visible; TextAlign m_align = TextAlign::left; + TextWrap m_wrap = TextWrap::wrap; float m_maxWidth = 0.0f; float m_maxHeight = 0.0f; std::vector m_orderedLines; diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/text/text.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/text/text.hpp index 8dbe74a1..c60797d1 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/text/text.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/text/text.hpp @@ -32,12 +32,6 @@ enum class TextOrigin : uint8_t baseline }; -enum class TextWrap : uint8_t -{ - wrap, - noWrap -}; - class OrderedLine; class TextModifierGroup; @@ -182,7 +176,7 @@ class Text : public TextBase void modifierShapeDirty(); void markPaintDirty(); void update(ComponentDirt value) override; - Mat2D m_fitScale; + Mat2D m_transform; TextSizing sizing() const { return (TextSizing)sizingValue(); } TextSizing effectiveSizing() const @@ -192,6 +186,7 @@ class Text : public TextBase TextOverflow overflow() const { return (TextOverflow)overflowValue(); } TextOrigin textOrigin() const { return (TextOrigin)originValue(); } TextWrap wrap() const { return (TextWrap)wrapValue(); } + VerticalTextAlign verticalAlign() const { return (VerticalTextAlign)verticalAlignValue(); } void overflow(TextOverflow value) { return overflowValue((uint32_t)value); } void buildRenderStyles(); const TextStyle* styleFromShaperId(uint16_t id) const; @@ -211,7 +206,8 @@ class Text : public TextBase const std::vector& runs() const { return m_runs; } static SimpleArray> BreakLines(const SimpleArray& paragraphs, float width, - TextAlign align); + TextAlign align, + TextWrap wrap); #endif bool haveModifiers() const diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/text_engine.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/text_engine.hpp index a0ccd637..30acd7dd 100644 --- a/Source/ThirdParty/RiveLibrary/Includes/rive/text_engine.hpp +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/text_engine.hpp @@ -38,6 +38,21 @@ enum class TextAlign : uint8_t center = 2 }; +// The wrap mode. +enum class TextWrap : uint8_t +{ + wrap = 0, + noWrap = 1 +}; + +// The alignment of each word wrapped line in a paragraph. +enum class VerticalTextAlign : uint8_t +{ + top = 0, + bottom = 1, + middle = 2 +}; + // A horizontal line of text within a paragraph, after line-breaking. struct GlyphLine { diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/viewmodel/viewmodel_instance_trigger.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/viewmodel/viewmodel_instance_trigger.hpp new file mode 100644 index 00000000..38a84696 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/viewmodel/viewmodel_instance_trigger.hpp @@ -0,0 +1,23 @@ +#ifndef _RIVE_VIEW_MODEL_INSTANCE_TRIGGER_HPP_ +#define _RIVE_VIEW_MODEL_INSTANCE_TRIGGER_HPP_ +#include "rive/generated/viewmodel/viewmodel_instance_trigger_base.hpp" +#include +namespace rive +{ +#ifdef WITH_RIVE_TOOLS +class ViewModelInstanceTrigger; +typedef void (*ViewModelTriggerChanged)(ViewModelInstanceTrigger* vmi, uint32_t value); +#endif +class ViewModelInstanceTrigger : public ViewModelInstanceTriggerBase +{ +protected: + void propertyValueChanged() override; +#ifdef WITH_RIVE_TOOLS +public: + void onChanged(ViewModelTriggerChanged callback) { m_changedCallback = callback; } + ViewModelTriggerChanged m_changedCallback = nullptr; +#endif +}; +} // namespace rive + +#endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/rive/viewmodel/viewmodel_property_trigger.hpp b/Source/ThirdParty/RiveLibrary/Includes/rive/viewmodel/viewmodel_property_trigger.hpp new file mode 100644 index 00000000..794618d1 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/rive/viewmodel/viewmodel_property_trigger.hpp @@ -0,0 +1,13 @@ +#ifndef _RIVE_VIEW_MODEL_PROPERTY_TRIGGER_HPP_ +#define _RIVE_VIEW_MODEL_PROPERTY_TRIGGER_HPP_ +#include "rive/generated/viewmodel/viewmodel_property_trigger_base.hpp" +#include +namespace rive +{ +class ViewModelPropertyTrigger : public ViewModelPropertyTriggerBase +{ +public: +}; +} // namespace rive + +#endif \ No newline at end of file diff --git a/Source/ThirdParty/RiveLibrary/Includes/webp/decode.h b/Source/ThirdParty/RiveLibrary/Includes/webp/decode.h new file mode 100644 index 00000000..d6895f5c --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/webp/decode.h @@ -0,0 +1,506 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Main decoding functions for WebP images. +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_WEBP_DECODE_H_ +#define WEBP_WEBP_DECODE_H_ + +#include "./types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define WEBP_DECODER_ABI_VERSION 0x0209 // MAJOR(8b) + MINOR(8b) + +// Note: forward declaring enumerations is not allowed in (strict) C and C++, +// the types are left here for reference. +// typedef enum VP8StatusCode VP8StatusCode; +// typedef enum WEBP_CSP_MODE WEBP_CSP_MODE; +typedef struct WebPRGBABuffer WebPRGBABuffer; +typedef struct WebPYUVABuffer WebPYUVABuffer; +typedef struct WebPDecBuffer WebPDecBuffer; +typedef struct WebPIDecoder WebPIDecoder; +typedef struct WebPBitstreamFeatures WebPBitstreamFeatures; +typedef struct WebPDecoderOptions WebPDecoderOptions; +typedef struct WebPDecoderConfig WebPDecoderConfig; + +// Return the decoder's version number, packed in hexadecimal using 8bits for +// each of major/minor/revision. E.g: v2.5.7 is 0x020507. +WEBP_EXTERN int WebPGetDecoderVersion(void); + +// Retrieve basic header information: width, height. +// This function will also validate the header, returning true on success, +// false otherwise. '*width' and '*height' are only valid on successful return. +// Pointers 'width' and 'height' can be passed NULL if deemed irrelevant. +// Note: The following chunk sequences (before the raw VP8/VP8L data) are +// considered valid by this function: +// RIFF + VP8(L) +// RIFF + VP8X + (optional chunks) + VP8(L) +// ALPH + VP8 <-- Not a valid WebP format: only allowed for internal purpose. +// VP8(L) <-- Not a valid WebP format: only allowed for internal purpose. +WEBP_NODISCARD WEBP_EXTERN int WebPGetInfo( + const uint8_t* data, size_t data_size, int* width, int* height); + +// Decodes WebP images pointed to by 'data' and returns RGBA samples, along +// with the dimensions in *width and *height. The ordering of samples in +// memory is R, G, B, A, R, G, B, A... in scan order (endian-independent). +// The returned pointer should be deleted calling WebPFree(). +// Returns NULL in case of error. +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPDecodeRGBA( + const uint8_t* data, size_t data_size, int* width, int* height); + +// Same as WebPDecodeRGBA, but returning A, R, G, B, A, R, G, B... ordered data. +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPDecodeARGB( + const uint8_t* data, size_t data_size, int* width, int* height); + +// Same as WebPDecodeRGBA, but returning B, G, R, A, B, G, R, A... ordered data. +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPDecodeBGRA( + const uint8_t* data, size_t data_size, int* width, int* height); + +// Same as WebPDecodeRGBA, but returning R, G, B, R, G, B... ordered data. +// If the bitstream contains transparency, it is ignored. +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPDecodeRGB( + const uint8_t* data, size_t data_size, int* width, int* height); + +// Same as WebPDecodeRGB, but returning B, G, R, B, G, R... ordered data. +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPDecodeBGR( + const uint8_t* data, size_t data_size, int* width, int* height); + +// Decode WebP images pointed to by 'data' to Y'UV format(*). The pointer +// returned is the Y samples buffer. Upon return, *u and *v will point to +// the U and V chroma data. These U and V buffers need NOT be passed to +// WebPFree(), unlike the returned Y luma one. The dimension of the U and V +// planes are both (*width + 1) / 2 and (*height + 1) / 2. +// Upon return, the Y buffer has a stride returned as '*stride', while U and V +// have a common stride returned as '*uv_stride'. +// 'width' and 'height' may be NULL, the other pointers must not be. +// Returns NULL in case of error. +// (*) Also named Y'CbCr. See: https://en.wikipedia.org/wiki/YCbCr +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPDecodeYUV( + const uint8_t* data, size_t data_size, int* width, int* height, + uint8_t** u, uint8_t** v, int* stride, int* uv_stride); + +// These five functions are variants of the above ones, that decode the image +// directly into a pre-allocated buffer 'output_buffer'. The maximum storage +// available in this buffer is indicated by 'output_buffer_size'. If this +// storage is not sufficient (or an error occurred), NULL is returned. +// Otherwise, output_buffer is returned, for convenience. +// The parameter 'output_stride' specifies the distance (in bytes) +// between scanlines. Hence, output_buffer_size is expected to be at least +// output_stride x picture-height. +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPDecodeRGBAInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPDecodeARGBInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPDecodeBGRAInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); + +// RGB and BGR variants. Here too the transparency information, if present, +// will be dropped and ignored. +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPDecodeRGBInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPDecodeBGRInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); + +// WebPDecodeYUVInto() is a variant of WebPDecodeYUV() that operates directly +// into pre-allocated luma/chroma plane buffers. This function requires the +// strides to be passed: one for the luma plane and one for each of the +// chroma ones. The size of each plane buffer is passed as 'luma_size', +// 'u_size' and 'v_size' respectively. +// Pointer to the luma plane ('*luma') is returned or NULL if an error occurred +// during decoding (or because some buffers were found to be too small). +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPDecodeYUVInto( + const uint8_t* data, size_t data_size, + uint8_t* luma, size_t luma_size, int luma_stride, + uint8_t* u, size_t u_size, int u_stride, + uint8_t* v, size_t v_size, int v_stride); + +//------------------------------------------------------------------------------ +// Output colorspaces and buffer + +// Colorspaces +// Note: the naming describes the byte-ordering of packed samples in memory. +// For instance, MODE_BGRA relates to samples ordered as B,G,R,A,B,G,R,A,... +// Non-capital names (e.g.:MODE_Argb) relates to pre-multiplied RGB channels. +// RGBA-4444 and RGB-565 colorspaces are represented by following byte-order: +// RGBA-4444: [r3 r2 r1 r0 g3 g2 g1 g0], [b3 b2 b1 b0 a3 a2 a1 a0], ... +// RGB-565: [r4 r3 r2 r1 r0 g5 g4 g3], [g2 g1 g0 b4 b3 b2 b1 b0], ... +// In the case WEBP_SWAP_16BITS_CSP is defined, the bytes are swapped for +// these two modes: +// RGBA-4444: [b3 b2 b1 b0 a3 a2 a1 a0], [r3 r2 r1 r0 g3 g2 g1 g0], ... +// RGB-565: [g2 g1 g0 b4 b3 b2 b1 b0], [r4 r3 r2 r1 r0 g5 g4 g3], ... + +typedef enum WEBP_CSP_MODE { + MODE_RGB = 0, MODE_RGBA = 1, + MODE_BGR = 2, MODE_BGRA = 3, + MODE_ARGB = 4, MODE_RGBA_4444 = 5, + MODE_RGB_565 = 6, + // RGB-premultiplied transparent modes (alpha value is preserved) + MODE_rgbA = 7, + MODE_bgrA = 8, + MODE_Argb = 9, + MODE_rgbA_4444 = 10, + // YUV modes must come after RGB ones. + MODE_YUV = 11, MODE_YUVA = 12, // yuv 4:2:0 + MODE_LAST = 13 +} WEBP_CSP_MODE; + +// Some useful macros: +static WEBP_INLINE int WebPIsPremultipliedMode(WEBP_CSP_MODE mode) { + return (mode == MODE_rgbA || mode == MODE_bgrA || mode == MODE_Argb || + mode == MODE_rgbA_4444); +} + +static WEBP_INLINE int WebPIsAlphaMode(WEBP_CSP_MODE mode) { + return (mode == MODE_RGBA || mode == MODE_BGRA || mode == MODE_ARGB || + mode == MODE_RGBA_4444 || mode == MODE_YUVA || + WebPIsPremultipliedMode(mode)); +} + +static WEBP_INLINE int WebPIsRGBMode(WEBP_CSP_MODE mode) { + return (mode < MODE_YUV); +} + +//------------------------------------------------------------------------------ +// WebPDecBuffer: Generic structure for describing the output sample buffer. + +struct WebPRGBABuffer { // view as RGBA + uint8_t* rgba; // pointer to RGBA samples + int stride; // stride in bytes from one scanline to the next. + size_t size; // total size of the *rgba buffer. +}; + +struct WebPYUVABuffer { // view as YUVA + uint8_t* y, *u, *v, *a; // pointer to luma, chroma U/V, alpha samples + int y_stride; // luma stride + int u_stride, v_stride; // chroma strides + int a_stride; // alpha stride + size_t y_size; // luma plane size + size_t u_size, v_size; // chroma planes size + size_t a_size; // alpha-plane size +}; + +// Output buffer +struct WebPDecBuffer { + WEBP_CSP_MODE colorspace; // Colorspace. + int width, height; // Dimensions. + int is_external_memory; // If non-zero, 'internal_memory' pointer is not + // used. If value is '2' or more, the external + // memory is considered 'slow' and multiple + // read/write will be avoided. + union { + WebPRGBABuffer RGBA; + WebPYUVABuffer YUVA; + } u; // Nameless union of buffer parameters. + uint32_t pad[4]; // padding for later use + + uint8_t* private_memory; // Internally allocated memory (only when + // is_external_memory is 0). Should not be used + // externally, but accessed via the buffer union. +}; + +// Internal, version-checked, entry point +WEBP_NODISCARD WEBP_EXTERN int WebPInitDecBufferInternal(WebPDecBuffer*, int); + +// Initialize the structure as empty. Must be called before any other use. +// Returns false in case of version mismatch +WEBP_NODISCARD static WEBP_INLINE int WebPInitDecBuffer(WebPDecBuffer* buffer) { + return WebPInitDecBufferInternal(buffer, WEBP_DECODER_ABI_VERSION); +} + +// Free any memory associated with the buffer. Must always be called last. +// Note: doesn't free the 'buffer' structure itself. +WEBP_EXTERN void WebPFreeDecBuffer(WebPDecBuffer* buffer); + +//------------------------------------------------------------------------------ +// Enumeration of the status codes + +typedef enum WEBP_NODISCARD VP8StatusCode { + VP8_STATUS_OK = 0, + VP8_STATUS_OUT_OF_MEMORY, + VP8_STATUS_INVALID_PARAM, + VP8_STATUS_BITSTREAM_ERROR, + VP8_STATUS_UNSUPPORTED_FEATURE, + VP8_STATUS_SUSPENDED, + VP8_STATUS_USER_ABORT, + VP8_STATUS_NOT_ENOUGH_DATA +} VP8StatusCode; + +//------------------------------------------------------------------------------ +// Incremental decoding +// +// This API allows streamlined decoding of partial data. +// Picture can be incrementally decoded as data become available thanks to the +// WebPIDecoder object. This object can be left in a SUSPENDED state if the +// picture is only partially decoded, pending additional input. +// Code example: +/* + WebPInitDecBuffer(&output_buffer); + output_buffer.colorspace = mode; + ... + WebPIDecoder* idec = WebPINewDecoder(&output_buffer); + while (additional_data_is_available) { + // ... (get additional data in some new_data[] buffer) + status = WebPIAppend(idec, new_data, new_data_size); + if (status != VP8_STATUS_OK && status != VP8_STATUS_SUSPENDED) { + break; // an error occurred. + } + + // The above call decodes the current available buffer. + // Part of the image can now be refreshed by calling + // WebPIDecGetRGB()/WebPIDecGetYUVA() etc. + } + WebPIDelete(idec); +*/ + +// Creates a new incremental decoder with the supplied buffer parameter. +// This output_buffer can be passed NULL, in which case a default output buffer +// is used (with MODE_RGB). Otherwise, an internal reference to 'output_buffer' +// is kept, which means that the lifespan of 'output_buffer' must be larger than +// that of the returned WebPIDecoder object. +// The supplied 'output_buffer' content MUST NOT be changed between calls to +// WebPIAppend() or WebPIUpdate() unless 'output_buffer.is_external_memory' is +// not set to 0. In such a case, it is allowed to modify the pointers, size and +// stride of output_buffer.u.RGBA or output_buffer.u.YUVA, provided they remain +// within valid bounds. +// All other fields of WebPDecBuffer MUST remain constant between calls. +// Returns NULL if the allocation failed. +WEBP_NODISCARD WEBP_EXTERN WebPIDecoder* WebPINewDecoder( + WebPDecBuffer* output_buffer); + +// This function allocates and initializes an incremental-decoder object, which +// will output the RGB/A samples specified by 'csp' into a preallocated +// buffer 'output_buffer'. The size of this buffer is at least +// 'output_buffer_size' and the stride (distance in bytes between two scanlines) +// is specified by 'output_stride'. +// Additionally, output_buffer can be passed NULL in which case the output +// buffer will be allocated automatically when the decoding starts. The +// colorspace 'csp' is taken into account for allocating this buffer. All other +// parameters are ignored. +// Returns NULL if the allocation failed, or if some parameters are invalid. +WEBP_NODISCARD WEBP_EXTERN WebPIDecoder* WebPINewRGB( + WEBP_CSP_MODE csp, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); + +// This function allocates and initializes an incremental-decoder object, which +// will output the raw luma/chroma samples into a preallocated planes if +// supplied. The luma plane is specified by its pointer 'luma', its size +// 'luma_size' and its stride 'luma_stride'. Similarly, the chroma-u plane +// is specified by the 'u', 'u_size' and 'u_stride' parameters, and the chroma-v +// plane by 'v' and 'v_size'. And same for the alpha-plane. The 'a' pointer +// can be pass NULL in case one is not interested in the transparency plane. +// Conversely, 'luma' can be passed NULL if no preallocated planes are supplied. +// In this case, the output buffer will be automatically allocated (using +// MODE_YUVA) when decoding starts. All parameters are then ignored. +// Returns NULL if the allocation failed or if a parameter is invalid. +WEBP_NODISCARD WEBP_EXTERN WebPIDecoder* WebPINewYUVA( + uint8_t* luma, size_t luma_size, int luma_stride, + uint8_t* u, size_t u_size, int u_stride, + uint8_t* v, size_t v_size, int v_stride, + uint8_t* a, size_t a_size, int a_stride); + +// Deprecated version of the above, without the alpha plane. +// Kept for backward compatibility. +WEBP_NODISCARD WEBP_EXTERN WebPIDecoder* WebPINewYUV( + uint8_t* luma, size_t luma_size, int luma_stride, + uint8_t* u, size_t u_size, int u_stride, + uint8_t* v, size_t v_size, int v_stride); + +// Deletes the WebPIDecoder object and associated memory. Must always be called +// if WebPINewDecoder, WebPINewRGB or WebPINewYUV succeeded. +WEBP_EXTERN void WebPIDelete(WebPIDecoder* idec); + +// Copies and decodes the next available data. Returns VP8_STATUS_OK when +// the image is successfully decoded. Returns VP8_STATUS_SUSPENDED when more +// data is expected. Returns error in other cases. +WEBP_EXTERN VP8StatusCode WebPIAppend( + WebPIDecoder* idec, const uint8_t* data, size_t data_size); + +// A variant of the above function to be used when data buffer contains +// partial data from the beginning. In this case data buffer is not copied +// to the internal memory. +// Note that the value of the 'data' pointer can change between calls to +// WebPIUpdate, for instance when the data buffer is resized to fit larger data. +WEBP_EXTERN VP8StatusCode WebPIUpdate( + WebPIDecoder* idec, const uint8_t* data, size_t data_size); + +// Returns the RGB/A image decoded so far. Returns NULL if output params +// are not initialized yet. The RGB/A output type corresponds to the colorspace +// specified during call to WebPINewDecoder() or WebPINewRGB(). +// *last_y is the index of last decoded row in raster scan order. Some pointers +// (*last_y, *width etc.) can be NULL if corresponding information is not +// needed. The values in these pointers are only valid on successful (non-NULL) +// return. +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPIDecGetRGB( + const WebPIDecoder* idec, int* last_y, + int* width, int* height, int* stride); + +// Same as above function to get a YUVA image. Returns pointer to the luma +// plane or NULL in case of error. If there is no alpha information +// the alpha pointer '*a' will be returned NULL. +WEBP_NODISCARD WEBP_EXTERN uint8_t* WebPIDecGetYUVA( + const WebPIDecoder* idec, int* last_y, + uint8_t** u, uint8_t** v, uint8_t** a, + int* width, int* height, int* stride, int* uv_stride, int* a_stride); + +// Deprecated alpha-less version of WebPIDecGetYUVA(): it will ignore the +// alpha information (if present). Kept for backward compatibility. +WEBP_NODISCARD static WEBP_INLINE uint8_t* WebPIDecGetYUV( + const WebPIDecoder* idec, int* last_y, uint8_t** u, uint8_t** v, + int* width, int* height, int* stride, int* uv_stride) { + return WebPIDecGetYUVA(idec, last_y, u, v, NULL, width, height, + stride, uv_stride, NULL); +} + +// Generic call to retrieve information about the displayable area. +// If non NULL, the left/right/width/height pointers are filled with the visible +// rectangular area so far. +// Returns NULL in case the incremental decoder object is in an invalid state. +// Otherwise returns the pointer to the internal representation. This structure +// is read-only, tied to WebPIDecoder's lifespan and should not be modified. +WEBP_NODISCARD WEBP_EXTERN const WebPDecBuffer* WebPIDecodedArea( + const WebPIDecoder* idec, int* left, int* top, int* width, int* height); + +//------------------------------------------------------------------------------ +// Advanced decoding parametrization +// +// Code sample for using the advanced decoding API +/* + // A) Init a configuration object + WebPDecoderConfig config; + CHECK(WebPInitDecoderConfig(&config)); + + // B) optional: retrieve the bitstream's features. + CHECK(WebPGetFeatures(data, data_size, &config.input) == VP8_STATUS_OK); + + // C) Adjust 'config', if needed + config.options.no_fancy_upsampling = 1; + config.output.colorspace = MODE_BGRA; + // etc. + + // Note that you can also make config.output point to an externally + // supplied memory buffer, provided it's big enough to store the decoded + // picture. Otherwise, config.output will just be used to allocate memory + // and store the decoded picture. + + // D) Decode! + CHECK(WebPDecode(data, data_size, &config) == VP8_STATUS_OK); + + // E) Decoded image is now in config.output (and config.output.u.RGBA) + + // F) Reclaim memory allocated in config's object. It's safe to call + // this function even if the memory is external and wasn't allocated + // by WebPDecode(). + WebPFreeDecBuffer(&config.output); +*/ + +// Features gathered from the bitstream +struct WebPBitstreamFeatures { + int width; // Width in pixels, as read from the bitstream. + int height; // Height in pixels, as read from the bitstream. + int has_alpha; // True if the bitstream contains an alpha channel. + int has_animation; // True if the bitstream is an animation. + int format; // 0 = undefined (/mixed), 1 = lossy, 2 = lossless + + uint32_t pad[5]; // padding for later use +}; + +// Internal, version-checked, entry point +WEBP_EXTERN VP8StatusCode WebPGetFeaturesInternal( + const uint8_t*, size_t, WebPBitstreamFeatures*, int); + +// Retrieve features from the bitstream. The *features structure is filled +// with information gathered from the bitstream. +// Returns VP8_STATUS_OK when the features are successfully retrieved. Returns +// VP8_STATUS_NOT_ENOUGH_DATA when more data is needed to retrieve the +// features from headers. Returns error in other cases. +// Note: The following chunk sequences (before the raw VP8/VP8L data) are +// considered valid by this function: +// RIFF + VP8(L) +// RIFF + VP8X + (optional chunks) + VP8(L) +// ALPH + VP8 <-- Not a valid WebP format: only allowed for internal purpose. +// VP8(L) <-- Not a valid WebP format: only allowed for internal purpose. +static WEBP_INLINE VP8StatusCode WebPGetFeatures( + const uint8_t* data, size_t data_size, + WebPBitstreamFeatures* features) { + return WebPGetFeaturesInternal(data, data_size, features, + WEBP_DECODER_ABI_VERSION); +} + +// Decoding options +struct WebPDecoderOptions { + int bypass_filtering; // if true, skip the in-loop filtering + int no_fancy_upsampling; // if true, use faster pointwise upsampler + int use_cropping; // if true, cropping is applied _first_ + int crop_left, crop_top; // top-left position for cropping. + // Will be snapped to even values. + int crop_width, crop_height; // dimension of the cropping area + int use_scaling; // if true, scaling is applied _afterward_ + int scaled_width, scaled_height; // final resolution + int use_threads; // if true, use multi-threaded decoding + int dithering_strength; // dithering strength (0=Off, 100=full) + int flip; // if true, flip output vertically + int alpha_dithering_strength; // alpha dithering strength in [0..100] + + uint32_t pad[5]; // padding for later use +}; + +// Main object storing the configuration for advanced decoding. +struct WebPDecoderConfig { + WebPBitstreamFeatures input; // Immutable bitstream features (optional) + WebPDecBuffer output; // Output buffer (can point to external mem) + WebPDecoderOptions options; // Decoding options +}; + +// Internal, version-checked, entry point +WEBP_NODISCARD WEBP_EXTERN int WebPInitDecoderConfigInternal(WebPDecoderConfig*, + int); + +// Initialize the configuration as empty. This function must always be +// called first, unless WebPGetFeatures() is to be called. +// Returns false in case of mismatched version. +WEBP_NODISCARD static WEBP_INLINE int WebPInitDecoderConfig( + WebPDecoderConfig* config) { + return WebPInitDecoderConfigInternal(config, WEBP_DECODER_ABI_VERSION); +} + +// Instantiate a new incremental decoder object with the requested +// configuration. The bitstream can be passed using 'data' and 'data_size' +// parameter, in which case the features will be parsed and stored into +// config->input. Otherwise, 'data' can be NULL and no parsing will occur. +// Note that 'config' can be NULL too, in which case a default configuration +// is used. If 'config' is not NULL, it must outlive the WebPIDecoder object +// as some references to its fields will be used. No internal copy of 'config' +// is made. +// The return WebPIDecoder object must always be deleted calling WebPIDelete(). +// Returns NULL in case of error (and config->status will then reflect +// the error condition, if available). +WEBP_NODISCARD WEBP_EXTERN WebPIDecoder* WebPIDecode( + const uint8_t* data, size_t data_size, WebPDecoderConfig* config); + +// Non-incremental version. This version decodes the full data at once, taking +// 'config' into account. Returns decoding status (which should be VP8_STATUS_OK +// if the decoding was successful). Note that 'config' cannot be NULL. +WEBP_EXTERN VP8StatusCode WebPDecode(const uint8_t* data, size_t data_size, + WebPDecoderConfig* config); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_WEBP_DECODE_H_ diff --git a/Source/ThirdParty/RiveLibrary/Includes/webp/demux.h b/Source/ThirdParty/RiveLibrary/Includes/webp/demux.h new file mode 100644 index 00000000..8d246550 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/webp/demux.h @@ -0,0 +1,367 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Demux API. +// Enables extraction of image and extended format data from WebP files. + +// Code Example: Demuxing WebP data to extract all the frames, ICC profile +// and EXIF/XMP metadata. +/* + WebPDemuxer* demux = WebPDemux(&webp_data); + + uint32_t width = WebPDemuxGetI(demux, WEBP_FF_CANVAS_WIDTH); + uint32_t height = WebPDemuxGetI(demux, WEBP_FF_CANVAS_HEIGHT); + // ... (Get information about the features present in the WebP file). + uint32_t flags = WebPDemuxGetI(demux, WEBP_FF_FORMAT_FLAGS); + + // ... (Iterate over all frames). + WebPIterator iter; + if (WebPDemuxGetFrame(demux, 1, &iter)) { + do { + // ... (Consume 'iter'; e.g. Decode 'iter.fragment' with WebPDecode(), + // ... and get other frame properties like width, height, offsets etc. + // ... see 'struct WebPIterator' below for more info). + } while (WebPDemuxNextFrame(&iter)); + WebPDemuxReleaseIterator(&iter); + } + + // ... (Extract metadata). + WebPChunkIterator chunk_iter; + if (flags & ICCP_FLAG) WebPDemuxGetChunk(demux, "ICCP", 1, &chunk_iter); + // ... (Consume the ICC profile in 'chunk_iter.chunk'). + WebPDemuxReleaseChunkIterator(&chunk_iter); + if (flags & EXIF_FLAG) WebPDemuxGetChunk(demux, "EXIF", 1, &chunk_iter); + // ... (Consume the EXIF metadata in 'chunk_iter.chunk'). + WebPDemuxReleaseChunkIterator(&chunk_iter); + if (flags & XMP_FLAG) WebPDemuxGetChunk(demux, "XMP ", 1, &chunk_iter); + // ... (Consume the XMP metadata in 'chunk_iter.chunk'). + WebPDemuxReleaseChunkIterator(&chunk_iter); + WebPDemuxDelete(demux); +*/ + +#ifndef WEBP_WEBP_DEMUX_H_ +#define WEBP_WEBP_DEMUX_H_ + +#include "./decode.h" // for WEBP_CSP_MODE +#include "./mux_types.h" +#include "./types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define WEBP_DEMUX_ABI_VERSION 0x0107 // MAJOR(8b) + MINOR(8b) + +// Note: forward declaring enumerations is not allowed in (strict) C and C++, +// the types are left here for reference. +// typedef enum WebPDemuxState WebPDemuxState; +// typedef enum WebPFormatFeature WebPFormatFeature; +typedef struct WebPDemuxer WebPDemuxer; +typedef struct WebPIterator WebPIterator; +typedef struct WebPChunkIterator WebPChunkIterator; +typedef struct WebPAnimInfo WebPAnimInfo; +typedef struct WebPAnimDecoderOptions WebPAnimDecoderOptions; + +//------------------------------------------------------------------------------ + +// Returns the version number of the demux library, packed in hexadecimal using +// 8bits for each of major/minor/revision. E.g: v2.5.7 is 0x020507. +WEBP_EXTERN int WebPGetDemuxVersion(void); + +//------------------------------------------------------------------------------ +// Life of a Demux object + +typedef enum WebPDemuxState { + WEBP_DEMUX_PARSE_ERROR = -1, // An error occurred while parsing. + WEBP_DEMUX_PARSING_HEADER = 0, // Not enough data to parse full header. + WEBP_DEMUX_PARSED_HEADER = 1, // Header parsing complete, + // data may be available. + WEBP_DEMUX_DONE = 2 // Entire file has been parsed. +} WebPDemuxState; + +// Internal, version-checked, entry point +WEBP_NODISCARD WEBP_EXTERN WebPDemuxer* WebPDemuxInternal( + const WebPData*, int, WebPDemuxState*, int); + +// Parses the full WebP file given by 'data'. For single images the WebP file +// header alone or the file header and the chunk header may be absent. +// Returns a WebPDemuxer object on successful parse, NULL otherwise. +WEBP_NODISCARD static WEBP_INLINE WebPDemuxer* WebPDemux(const WebPData* data) { + return WebPDemuxInternal(data, 0, NULL, WEBP_DEMUX_ABI_VERSION); +} + +// Parses the possibly incomplete WebP file given by 'data'. +// If 'state' is non-NULL it will be set to indicate the status of the demuxer. +// Returns NULL in case of error or if there isn't enough data to start parsing; +// and a WebPDemuxer object on successful parse. +// Note that WebPDemuxer keeps internal pointers to 'data' memory segment. +// If this data is volatile, the demuxer object should be deleted (by calling +// WebPDemuxDelete()) and WebPDemuxPartial() called again on the new data. +// This is usually an inexpensive operation. +WEBP_NODISCARD static WEBP_INLINE WebPDemuxer* WebPDemuxPartial( + const WebPData* data, WebPDemuxState* state) { + return WebPDemuxInternal(data, 1, state, WEBP_DEMUX_ABI_VERSION); +} + +// Frees memory associated with 'dmux'. +WEBP_EXTERN void WebPDemuxDelete(WebPDemuxer* dmux); + +//------------------------------------------------------------------------------ +// Data/information extraction. + +typedef enum WebPFormatFeature { + WEBP_FF_FORMAT_FLAGS, // bit-wise combination of WebPFeatureFlags + // corresponding to the 'VP8X' chunk (if present). + WEBP_FF_CANVAS_WIDTH, + WEBP_FF_CANVAS_HEIGHT, + WEBP_FF_LOOP_COUNT, // only relevant for animated file + WEBP_FF_BACKGROUND_COLOR, // idem. + WEBP_FF_FRAME_COUNT // Number of frames present in the demux object. + // In case of a partial demux, this is the number + // of frames seen so far, with the last frame + // possibly being partial. +} WebPFormatFeature; + +// Get the 'feature' value from the 'dmux'. +// NOTE: values are only valid if WebPDemux() was used or WebPDemuxPartial() +// returned a state > WEBP_DEMUX_PARSING_HEADER. +// If 'feature' is WEBP_FF_FORMAT_FLAGS, the returned value is a bit-wise +// combination of WebPFeatureFlags values. +// If 'feature' is WEBP_FF_LOOP_COUNT, WEBP_FF_BACKGROUND_COLOR, the returned +// value is only meaningful if the bitstream is animated. +WEBP_EXTERN uint32_t WebPDemuxGetI( + const WebPDemuxer* dmux, WebPFormatFeature feature); + +//------------------------------------------------------------------------------ +// Frame iteration. + +struct WebPIterator { + int frame_num; + int num_frames; // equivalent to WEBP_FF_FRAME_COUNT. + int x_offset, y_offset; // offset relative to the canvas. + int width, height; // dimensions of this frame. + int duration; // display duration in milliseconds. + WebPMuxAnimDispose dispose_method; // dispose method for the frame. + int complete; // true if 'fragment' contains a full frame. partial images + // may still be decoded with the WebP incremental decoder. + WebPData fragment; // The frame given by 'frame_num'. Note for historical + // reasons this is called a fragment. + int has_alpha; // True if the frame contains transparency. + WebPMuxAnimBlend blend_method; // Blend operation for the frame. + + uint32_t pad[2]; // padding for later use. + void* private_; // for internal use only. +}; + +// Retrieves frame 'frame_number' from 'dmux'. +// 'iter->fragment' points to the frame on return from this function. +// Setting 'frame_number' equal to 0 will return the last frame of the image. +// Returns false if 'dmux' is NULL or frame 'frame_number' is not present. +// Call WebPDemuxReleaseIterator() when use of the iterator is complete. +// NOTE: 'dmux' must persist for the lifetime of 'iter'. +WEBP_NODISCARD WEBP_EXTERN int WebPDemuxGetFrame( + const WebPDemuxer* dmux, int frame_number, WebPIterator* iter); + +// Sets 'iter->fragment' to point to the next ('iter->frame_num' + 1) or +// previous ('iter->frame_num' - 1) frame. These functions do not loop. +// Returns true on success, false otherwise. +WEBP_NODISCARD WEBP_EXTERN int WebPDemuxNextFrame(WebPIterator* iter); +WEBP_NODISCARD WEBP_EXTERN int WebPDemuxPrevFrame(WebPIterator* iter); + +// Releases any memory associated with 'iter'. +// Must be called before any subsequent calls to WebPDemuxGetChunk() on the same +// iter. Also, must be called before destroying the associated WebPDemuxer with +// WebPDemuxDelete(). +WEBP_EXTERN void WebPDemuxReleaseIterator(WebPIterator* iter); + +//------------------------------------------------------------------------------ +// Chunk iteration. + +struct WebPChunkIterator { + // The current and total number of chunks with the fourcc given to + // WebPDemuxGetChunk(). + int chunk_num; + int num_chunks; + WebPData chunk; // The payload of the chunk. + + uint32_t pad[6]; // padding for later use + void* private_; +}; + +// Retrieves the 'chunk_number' instance of the chunk with id 'fourcc' from +// 'dmux'. +// 'fourcc' is a character array containing the fourcc of the chunk to return, +// e.g., "ICCP", "XMP ", "EXIF", etc. +// Setting 'chunk_number' equal to 0 will return the last chunk in a set. +// Returns true if the chunk is found, false otherwise. Image related chunk +// payloads are accessed through WebPDemuxGetFrame() and related functions. +// Call WebPDemuxReleaseChunkIterator() when use of the iterator is complete. +// NOTE: 'dmux' must persist for the lifetime of the iterator. +WEBP_NODISCARD WEBP_EXTERN int WebPDemuxGetChunk(const WebPDemuxer* dmux, + const char fourcc[4], + int chunk_number, + WebPChunkIterator* iter); + +// Sets 'iter->chunk' to point to the next ('iter->chunk_num' + 1) or previous +// ('iter->chunk_num' - 1) chunk. These functions do not loop. +// Returns true on success, false otherwise. +WEBP_NODISCARD WEBP_EXTERN int WebPDemuxNextChunk(WebPChunkIterator* iter); +WEBP_NODISCARD WEBP_EXTERN int WebPDemuxPrevChunk(WebPChunkIterator* iter); + +// Releases any memory associated with 'iter'. +// Must be called before destroying the associated WebPDemuxer with +// WebPDemuxDelete(). +WEBP_EXTERN void WebPDemuxReleaseChunkIterator(WebPChunkIterator* iter); + +//------------------------------------------------------------------------------ +// WebPAnimDecoder API +// +// This API allows decoding (possibly) animated WebP images. +// +// Code Example: +/* + WebPAnimDecoderOptions dec_options; + WebPAnimDecoderOptionsInit(&dec_options); + // Tune 'dec_options' as needed. + WebPAnimDecoder* dec = WebPAnimDecoderNew(webp_data, &dec_options); + WebPAnimInfo anim_info; + WebPAnimDecoderGetInfo(dec, &anim_info); + for (uint32_t i = 0; i < anim_info.loop_count; ++i) { + while (WebPAnimDecoderHasMoreFrames(dec)) { + uint8_t* buf; + int timestamp; + WebPAnimDecoderGetNext(dec, &buf, ×tamp); + // ... (Render 'buf' based on 'timestamp'). + // ... (Do NOT free 'buf', as it is owned by 'dec'). + } + WebPAnimDecoderReset(dec); + } + const WebPDemuxer* demuxer = WebPAnimDecoderGetDemuxer(dec); + // ... (Do something using 'demuxer'; e.g. get EXIF/XMP/ICC data). + WebPAnimDecoderDelete(dec); +*/ + +typedef struct WebPAnimDecoder WebPAnimDecoder; // Main opaque object. + +// Global options. +struct WebPAnimDecoderOptions { + // Output colorspace. Only the following modes are supported: + // MODE_RGBA, MODE_BGRA, MODE_rgbA and MODE_bgrA. + WEBP_CSP_MODE color_mode; + int use_threads; // If true, use multi-threaded decoding. + uint32_t padding[7]; // Padding for later use. +}; + +// Internal, version-checked, entry point. +WEBP_NODISCARD WEBP_EXTERN int WebPAnimDecoderOptionsInitInternal( + WebPAnimDecoderOptions*, int); + +// Should always be called, to initialize a fresh WebPAnimDecoderOptions +// structure before modification. Returns false in case of version mismatch. +// WebPAnimDecoderOptionsInit() must have succeeded before using the +// 'dec_options' object. +WEBP_NODISCARD static WEBP_INLINE int WebPAnimDecoderOptionsInit( + WebPAnimDecoderOptions* dec_options) { + return WebPAnimDecoderOptionsInitInternal(dec_options, + WEBP_DEMUX_ABI_VERSION); +} + +// Internal, version-checked, entry point. +WEBP_NODISCARD WEBP_EXTERN WebPAnimDecoder* WebPAnimDecoderNewInternal( + const WebPData*, const WebPAnimDecoderOptions*, int); + +// Creates and initializes a WebPAnimDecoder object. +// Parameters: +// webp_data - (in) WebP bitstream. This should remain unchanged during the +// lifetime of the output WebPAnimDecoder object. +// dec_options - (in) decoding options. Can be passed NULL to choose +// reasonable defaults (in particular, color mode MODE_RGBA +// will be picked). +// Returns: +// A pointer to the newly created WebPAnimDecoder object, or NULL in case of +// parsing error, invalid option or memory error. +WEBP_NODISCARD static WEBP_INLINE WebPAnimDecoder* WebPAnimDecoderNew( + const WebPData* webp_data, const WebPAnimDecoderOptions* dec_options) { + return WebPAnimDecoderNewInternal(webp_data, dec_options, + WEBP_DEMUX_ABI_VERSION); +} + +// Global information about the animation.. +struct WebPAnimInfo { + uint32_t canvas_width; + uint32_t canvas_height; + uint32_t loop_count; + uint32_t bgcolor; + uint32_t frame_count; + uint32_t pad[4]; // padding for later use +}; + +// Get global information about the animation. +// Parameters: +// dec - (in) decoder instance to get information from. +// info - (out) global information fetched from the animation. +// Returns: +// True on success. +WEBP_NODISCARD WEBP_EXTERN int WebPAnimDecoderGetInfo( + const WebPAnimDecoder* dec, WebPAnimInfo* info); + +// Fetch the next frame from 'dec' based on options supplied to +// WebPAnimDecoderNew(). This will be a fully reconstructed canvas of size +// 'canvas_width * 4 * canvas_height', and not just the frame sub-rectangle. The +// returned buffer 'buf' is valid only until the next call to +// WebPAnimDecoderGetNext(), WebPAnimDecoderReset() or WebPAnimDecoderDelete(). +// Parameters: +// dec - (in/out) decoder instance from which the next frame is to be fetched. +// buf - (out) decoded frame. +// timestamp - (out) timestamp of the frame in milliseconds. +// Returns: +// False if any of the arguments are NULL, or if there is a parsing or +// decoding error, or if there are no more frames. Otherwise, returns true. +WEBP_NODISCARD WEBP_EXTERN int WebPAnimDecoderGetNext(WebPAnimDecoder* dec, + uint8_t** buf, + int* timestamp); + +// Check if there are more frames left to decode. +// Parameters: +// dec - (in) decoder instance to be checked. +// Returns: +// True if 'dec' is not NULL and some frames are yet to be decoded. +// Otherwise, returns false. +WEBP_NODISCARD WEBP_EXTERN int WebPAnimDecoderHasMoreFrames( + const WebPAnimDecoder* dec); + +// Resets the WebPAnimDecoder object, so that next call to +// WebPAnimDecoderGetNext() will restart decoding from 1st frame. This would be +// helpful when all frames need to be decoded multiple times (e.g. +// info.loop_count times) without destroying and recreating the 'dec' object. +// Parameters: +// dec - (in/out) decoder instance to be reset +WEBP_EXTERN void WebPAnimDecoderReset(WebPAnimDecoder* dec); + +// Grab the internal demuxer object. +// Getting the demuxer object can be useful if one wants to use operations only +// available through demuxer; e.g. to get XMP/EXIF/ICC metadata. The returned +// demuxer object is owned by 'dec' and is valid only until the next call to +// WebPAnimDecoderDelete(). +// +// Parameters: +// dec - (in) decoder instance from which the demuxer object is to be fetched. +WEBP_NODISCARD WEBP_EXTERN const WebPDemuxer* WebPAnimDecoderGetDemuxer( + const WebPAnimDecoder* dec); + +// Deletes the WebPAnimDecoder object. +// Parameters: +// dec - (in/out) decoder instance to be deleted +WEBP_EXTERN void WebPAnimDecoderDelete(WebPAnimDecoder* dec); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_WEBP_DEMUX_H_ diff --git a/Source/ThirdParty/RiveLibrary/Includes/webp/encode.h b/Source/ThirdParty/RiveLibrary/Includes/webp/encode.h new file mode 100644 index 00000000..f3d59297 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/webp/encode.h @@ -0,0 +1,557 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// WebP encoder: main interface +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_WEBP_ENCODE_H_ +#define WEBP_WEBP_ENCODE_H_ + +#include "./types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define WEBP_ENCODER_ABI_VERSION 0x020f // MAJOR(8b) + MINOR(8b) + +// Note: forward declaring enumerations is not allowed in (strict) C and C++, +// the types are left here for reference. +// typedef enum WebPImageHint WebPImageHint; +// typedef enum WebPEncCSP WebPEncCSP; +// typedef enum WebPPreset WebPPreset; +// typedef enum WebPEncodingError WebPEncodingError; +typedef struct WebPConfig WebPConfig; +typedef struct WebPPicture WebPPicture; // main structure for I/O +typedef struct WebPAuxStats WebPAuxStats; +typedef struct WebPMemoryWriter WebPMemoryWriter; + +// Return the encoder's version number, packed in hexadecimal using 8bits for +// each of major/minor/revision. E.g: v2.5.7 is 0x020507. +WEBP_EXTERN int WebPGetEncoderVersion(void); + +//------------------------------------------------------------------------------ +// One-stop-shop call! No questions asked: + +// Returns the size of the compressed data (pointed to by *output), or 0 if +// an error occurred. The compressed data must be released by the caller +// using the call 'WebPFree(*output)'. +// These functions compress using the lossy format, and the quality_factor +// can go from 0 (smaller output, lower quality) to 100 (best quality, +// larger output). +WEBP_EXTERN size_t WebPEncodeRGB(const uint8_t* rgb, + int width, int height, int stride, + float quality_factor, uint8_t** output); +WEBP_EXTERN size_t WebPEncodeBGR(const uint8_t* bgr, + int width, int height, int stride, + float quality_factor, uint8_t** output); +WEBP_EXTERN size_t WebPEncodeRGBA(const uint8_t* rgba, + int width, int height, int stride, + float quality_factor, uint8_t** output); +WEBP_EXTERN size_t WebPEncodeBGRA(const uint8_t* bgra, + int width, int height, int stride, + float quality_factor, uint8_t** output); + +// These functions are the equivalent of the above, but compressing in a +// lossless manner. Files are usually larger than lossy format, but will +// not suffer any compression loss. +// Note these functions, like the lossy versions, use the library's default +// settings. For lossless this means 'exact' is disabled. RGB values in +// transparent areas will be modified to improve compression. To avoid this, +// use WebPEncode() and set WebPConfig::exact to 1. +WEBP_EXTERN size_t WebPEncodeLosslessRGB(const uint8_t* rgb, + int width, int height, int stride, + uint8_t** output); +WEBP_EXTERN size_t WebPEncodeLosslessBGR(const uint8_t* bgr, + int width, int height, int stride, + uint8_t** output); +WEBP_EXTERN size_t WebPEncodeLosslessRGBA(const uint8_t* rgba, + int width, int height, int stride, + uint8_t** output); +WEBP_EXTERN size_t WebPEncodeLosslessBGRA(const uint8_t* bgra, + int width, int height, int stride, + uint8_t** output); + +//------------------------------------------------------------------------------ +// Coding parameters + +// Image characteristics hint for the underlying encoder. +typedef enum WebPImageHint { + WEBP_HINT_DEFAULT = 0, // default preset. + WEBP_HINT_PICTURE, // digital picture, like portrait, inner shot + WEBP_HINT_PHOTO, // outdoor photograph, with natural lighting + WEBP_HINT_GRAPH, // Discrete tone image (graph, map-tile etc). + WEBP_HINT_LAST +} WebPImageHint; + +// Compression parameters. +struct WebPConfig { + int lossless; // Lossless encoding (0=lossy(default), 1=lossless). + float quality; // between 0 and 100. For lossy, 0 gives the smallest + // size and 100 the largest. For lossless, this + // parameter is the amount of effort put into the + // compression: 0 is the fastest but gives larger + // files compared to the slowest, but best, 100. + int method; // quality/speed trade-off (0=fast, 6=slower-better) + + WebPImageHint image_hint; // Hint for image type (lossless only for now). + + int target_size; // if non-zero, set the desired target size in bytes. + // Takes precedence over the 'compression' parameter. + float target_PSNR; // if non-zero, specifies the minimal distortion to + // try to achieve. Takes precedence over target_size. + int segments; // maximum number of segments to use, in [1..4] + int sns_strength; // Spatial Noise Shaping. 0=off, 100=maximum. + int filter_strength; // range: [0 = off .. 100 = strongest] + int filter_sharpness; // range: [0 = off .. 7 = least sharp] + int filter_type; // filtering type: 0 = simple, 1 = strong (only used + // if filter_strength > 0 or autofilter > 0) + int autofilter; // Auto adjust filter's strength [0 = off, 1 = on] + int alpha_compression; // Algorithm for encoding the alpha plane (0 = none, + // 1 = compressed with WebP lossless). Default is 1. + int alpha_filtering; // Predictive filtering method for alpha plane. + // 0: none, 1: fast, 2: best. Default if 1. + int alpha_quality; // Between 0 (smallest size) and 100 (lossless). + // Default is 100. + int pass; // number of entropy-analysis passes (in [1..10]). + + int show_compressed; // if true, export the compressed picture back. + // In-loop filtering is not applied. + int preprocessing; // preprocessing filter: + // 0=none, 1=segment-smooth, 2=pseudo-random dithering + int partitions; // log2(number of token partitions) in [0..3]. Default + // is set to 0 for easier progressive decoding. + int partition_limit; // quality degradation allowed to fit the 512k limit + // on prediction modes coding (0: no degradation, + // 100: maximum possible degradation). + int emulate_jpeg_size; // If true, compression parameters will be remapped + // to better match the expected output size from + // JPEG compression. Generally, the output size will + // be similar but the degradation will be lower. + int thread_level; // If non-zero, try and use multi-threaded encoding. + int low_memory; // If set, reduce memory usage (but increase CPU use). + + int near_lossless; // Near lossless encoding [0 = max loss .. 100 = off + // (default)]. + int exact; // if non-zero, preserve the exact RGB values under + // transparent area. Otherwise, discard this invisible + // RGB information for better compression. The default + // value is 0. + + int use_delta_palette; // reserved for future lossless feature + int use_sharp_yuv; // if needed, use sharp (and slow) RGB->YUV conversion + + int qmin; // minimum permissible quality factor + int qmax; // maximum permissible quality factor +}; + +// Enumerate some predefined settings for WebPConfig, depending on the type +// of source picture. These presets are used when calling WebPConfigPreset(). +typedef enum WebPPreset { + WEBP_PRESET_DEFAULT = 0, // default preset. + WEBP_PRESET_PICTURE, // digital picture, like portrait, inner shot + WEBP_PRESET_PHOTO, // outdoor photograph, with natural lighting + WEBP_PRESET_DRAWING, // hand or line drawing, with high-contrast details + WEBP_PRESET_ICON, // small-sized colorful images + WEBP_PRESET_TEXT // text-like +} WebPPreset; + +// Internal, version-checked, entry point +WEBP_NODISCARD WEBP_EXTERN int WebPConfigInitInternal(WebPConfig*, WebPPreset, + float, int); + +// Should always be called, to initialize a fresh WebPConfig structure before +// modification. Returns false in case of version mismatch. WebPConfigInit() +// must have succeeded before using the 'config' object. +// Note that the default values are lossless=0 and quality=75. +WEBP_NODISCARD static WEBP_INLINE int WebPConfigInit(WebPConfig* config) { + return WebPConfigInitInternal(config, WEBP_PRESET_DEFAULT, 75.f, + WEBP_ENCODER_ABI_VERSION); +} + +// This function will initialize the configuration according to a predefined +// set of parameters (referred to by 'preset') and a given quality factor. +// This function can be called as a replacement to WebPConfigInit(). Will +// return false in case of error. +WEBP_NODISCARD static WEBP_INLINE int WebPConfigPreset(WebPConfig* config, + WebPPreset preset, + float quality) { + return WebPConfigInitInternal(config, preset, quality, + WEBP_ENCODER_ABI_VERSION); +} + +// Activate the lossless compression mode with the desired efficiency level +// between 0 (fastest, lowest compression) and 9 (slower, best compression). +// A good default level is '6', providing a fair tradeoff between compression +// speed and final compressed size. +// This function will overwrite several fields from config: 'method', 'quality' +// and 'lossless'. Returns false in case of parameter error. +WEBP_NODISCARD WEBP_EXTERN int WebPConfigLosslessPreset(WebPConfig* config, + int level); + +// Returns true if 'config' is non-NULL and all configuration parameters are +// within their valid ranges. +WEBP_NODISCARD WEBP_EXTERN int WebPValidateConfig(const WebPConfig* config); + +//------------------------------------------------------------------------------ +// Input / Output +// Structure for storing auxiliary statistics. + +struct WebPAuxStats { + int coded_size; // final size + + float PSNR[5]; // peak-signal-to-noise ratio for Y/U/V/All/Alpha + int block_count[3]; // number of intra4/intra16/skipped macroblocks + int header_bytes[2]; // approximate number of bytes spent for header + // and mode-partition #0 + int residual_bytes[3][4]; // approximate number of bytes spent for + // DC/AC/uv coefficients for each (0..3) segments. + int segment_size[4]; // number of macroblocks in each segments + int segment_quant[4]; // quantizer values for each segments + int segment_level[4]; // filtering strength for each segments [0..63] + + int alpha_data_size; // size of the transparency data + int layer_data_size; // size of the enhancement layer data + + // lossless encoder statistics + uint32_t lossless_features; // bit0:predictor bit1:cross-color transform + // bit2:subtract-green bit3:color indexing + int histogram_bits; // number of precision bits of histogram + int transform_bits; // precision bits for transform + int cache_bits; // number of bits for color cache lookup + int palette_size; // number of color in palette, if used + int lossless_size; // final lossless size + int lossless_hdr_size; // lossless header (transform, huffman etc) size + int lossless_data_size; // lossless image data size + + uint32_t pad[2]; // padding for later use +}; + +// Signature for output function. Should return true if writing was successful. +// data/data_size is the segment of data to write, and 'picture' is for +// reference (and so one can make use of picture->custom_ptr). +typedef int (*WebPWriterFunction)(const uint8_t* data, size_t data_size, + const WebPPicture* picture); + +// WebPMemoryWrite: a special WebPWriterFunction that writes to memory using +// the following WebPMemoryWriter object (to be set as a custom_ptr). +struct WebPMemoryWriter { + uint8_t* mem; // final buffer (of size 'max_size', larger than 'size'). + size_t size; // final size + size_t max_size; // total capacity + uint32_t pad[1]; // padding for later use +}; + +// The following must be called first before any use. +WEBP_EXTERN void WebPMemoryWriterInit(WebPMemoryWriter* writer); + +// The following must be called to deallocate writer->mem memory. The 'writer' +// object itself is not deallocated. +WEBP_EXTERN void WebPMemoryWriterClear(WebPMemoryWriter* writer); +// The custom writer to be used with WebPMemoryWriter as custom_ptr. Upon +// completion, writer.mem and writer.size will hold the coded data. +// writer.mem must be freed by calling WebPMemoryWriterClear. +WEBP_NODISCARD WEBP_EXTERN int WebPMemoryWrite( + const uint8_t* data, size_t data_size, const WebPPicture* picture); + +// Progress hook, called from time to time to report progress. It can return +// false to request an abort of the encoding process, or true otherwise if +// everything is OK. +typedef int (*WebPProgressHook)(int percent, const WebPPicture* picture); + +// Color spaces. +typedef enum WebPEncCSP { + // chroma sampling + WEBP_YUV420 = 0, // 4:2:0 + WEBP_YUV420A = 4, // alpha channel variant + WEBP_CSP_UV_MASK = 3, // bit-mask to get the UV sampling factors + WEBP_CSP_ALPHA_BIT = 4 // bit that is set if alpha is present +} WebPEncCSP; + +// Encoding error conditions. +typedef enum WebPEncodingError { + VP8_ENC_OK = 0, + VP8_ENC_ERROR_OUT_OF_MEMORY, // memory error allocating objects + VP8_ENC_ERROR_BITSTREAM_OUT_OF_MEMORY, // memory error while flushing bits + VP8_ENC_ERROR_NULL_PARAMETER, // a pointer parameter is NULL + VP8_ENC_ERROR_INVALID_CONFIGURATION, // configuration is invalid + VP8_ENC_ERROR_BAD_DIMENSION, // picture has invalid width/height + VP8_ENC_ERROR_PARTITION0_OVERFLOW, // partition is bigger than 512k + VP8_ENC_ERROR_PARTITION_OVERFLOW, // partition is bigger than 16M + VP8_ENC_ERROR_BAD_WRITE, // error while flushing bytes + VP8_ENC_ERROR_FILE_TOO_BIG, // file is bigger than 4G + VP8_ENC_ERROR_USER_ABORT, // abort request by user + VP8_ENC_ERROR_LAST // list terminator. always last. +} WebPEncodingError; + +// maximum width/height allowed (inclusive), in pixels +#define WEBP_MAX_DIMENSION 16383 + +// Main exchange structure (input samples, output bytes, statistics) +// +// Once WebPPictureInit() has been called, it's ok to make all the INPUT fields +// (use_argb, y/u/v, argb, ...) point to user-owned data, even if +// WebPPictureAlloc() has been called. Depending on the value use_argb, +// it's guaranteed that either *argb or *y/*u/*v content will be kept untouched. +struct WebPPicture { + // INPUT + ////////////// + // Main flag for encoder selecting between ARGB or YUV input. + // It is recommended to use ARGB input (*argb, argb_stride) for lossless + // compression, and YUV input (*y, *u, *v, etc.) for lossy compression + // since these are the respective native colorspace for these formats. + int use_argb; + + // YUV input (mostly used for input to lossy compression) + WebPEncCSP colorspace; // colorspace: should be YUV420 for now (=Y'CbCr). + int width, height; // dimensions (less or equal to WEBP_MAX_DIMENSION) + uint8_t* y, *u, *v; // pointers to luma/chroma planes. + int y_stride, uv_stride; // luma/chroma strides. + uint8_t* a; // pointer to the alpha plane + int a_stride; // stride of the alpha plane + uint32_t pad1[2]; // padding for later use + + // ARGB input (mostly used for input to lossless compression) + uint32_t* argb; // Pointer to argb (32 bit) plane. + int argb_stride; // This is stride in pixels units, not bytes. + uint32_t pad2[3]; // padding for later use + + // OUTPUT + /////////////// + // Byte-emission hook, to store compressed bytes as they are ready. + WebPWriterFunction writer; // can be NULL + void* custom_ptr; // can be used by the writer. + + // map for extra information (only for lossy compression mode) + int extra_info_type; // 1: intra type, 2: segment, 3: quant + // 4: intra-16 prediction mode, + // 5: chroma prediction mode, + // 6: bit cost, 7: distortion + uint8_t* extra_info; // if not NULL, points to an array of size + // ((width + 15) / 16) * ((height + 15) / 16) that + // will be filled with a macroblock map, depending + // on extra_info_type. + + // STATS AND REPORTS + /////////////////////////// + // Pointer to side statistics (updated only if not NULL) + WebPAuxStats* stats; + + // Error code for the latest error encountered during encoding + WebPEncodingError error_code; + + // If not NULL, report progress during encoding. + WebPProgressHook progress_hook; + + void* user_data; // this field is free to be set to any value and + // used during callbacks (like progress-report e.g.). + + uint32_t pad3[3]; // padding for later use + + // Unused for now + uint8_t* pad4, *pad5; + uint32_t pad6[8]; // padding for later use + + // PRIVATE FIELDS + //////////////////// + void* memory_; // row chunk of memory for yuva planes + void* memory_argb_; // and for argb too. + void* pad7[2]; // padding for later use +}; + +// Internal, version-checked, entry point +WEBP_NODISCARD WEBP_EXTERN int WebPPictureInitInternal(WebPPicture*, int); + +// Should always be called, to initialize the structure. Returns false in case +// of version mismatch. WebPPictureInit() must have succeeded before using the +// 'picture' object. +// Note that, by default, use_argb is false and colorspace is WEBP_YUV420. +WEBP_NODISCARD static WEBP_INLINE int WebPPictureInit(WebPPicture* picture) { + return WebPPictureInitInternal(picture, WEBP_ENCODER_ABI_VERSION); +} + +//------------------------------------------------------------------------------ +// WebPPicture utils + +// Convenience allocation / deallocation based on picture->width/height: +// Allocate y/u/v buffers as per colorspace/width/height specification. +// Note! This function will free the previous buffer if needed. +// Returns false in case of memory error. +WEBP_NODISCARD WEBP_EXTERN int WebPPictureAlloc(WebPPicture* picture); + +// Release the memory allocated by WebPPictureAlloc() or WebPPictureImport*(). +// Note that this function does _not_ free the memory used by the 'picture' +// object itself. +// Besides memory (which is reclaimed) all other fields of 'picture' are +// preserved. +WEBP_EXTERN void WebPPictureFree(WebPPicture* picture); + +// Copy the pixels of *src into *dst, using WebPPictureAlloc. Upon return, *dst +// will fully own the copied pixels (this is not a view). The 'dst' picture need +// not be initialized as its content is overwritten. +// Returns false in case of memory allocation error. +WEBP_NODISCARD WEBP_EXTERN int WebPPictureCopy(const WebPPicture* src, + WebPPicture* dst); + +// Compute the single distortion for packed planes of samples. +// 'src' will be compared to 'ref', and the raw distortion stored into +// '*distortion'. The refined metric (log(MSE), log(1 - ssim),...' will be +// stored in '*result'. +// 'x_step' is the horizontal stride (in bytes) between samples. +// 'src/ref_stride' is the byte distance between rows. +// Returns false in case of error (bad parameter, memory allocation error, ...). +WEBP_NODISCARD WEBP_EXTERN int WebPPlaneDistortion( + const uint8_t* src, size_t src_stride, + const uint8_t* ref, size_t ref_stride, int width, int height, size_t x_step, + int type, // 0 = PSNR, 1 = SSIM, 2 = LSIM + float* distortion, float* result); + +// Compute PSNR, SSIM or LSIM distortion metric between two pictures. Results +// are in dB, stored in result[] in the B/G/R/A/All order. The distortion is +// always performed using ARGB samples. Hence if the input is YUV(A), the +// picture will be internally converted to ARGB (just for the measurement). +// Warning: this function is rather CPU-intensive. +WEBP_NODISCARD WEBP_EXTERN int WebPPictureDistortion( + const WebPPicture* src, const WebPPicture* ref, + int metric_type, // 0 = PSNR, 1 = SSIM, 2 = LSIM + float result[5]); + +// self-crops a picture to the rectangle defined by top/left/width/height. +// Returns false in case of memory allocation error, or if the rectangle is +// outside of the source picture. +// The rectangle for the view is defined by the top-left corner pixel +// coordinates (left, top) as well as its width and height. This rectangle +// must be fully be comprised inside the 'src' source picture. If the source +// picture uses the YUV420 colorspace, the top and left coordinates will be +// snapped to even values. +WEBP_NODISCARD WEBP_EXTERN int WebPPictureCrop( + WebPPicture* picture, int left, int top, int width, int height); + +// Extracts a view from 'src' picture into 'dst'. The rectangle for the view +// is defined by the top-left corner pixel coordinates (left, top) as well +// as its width and height. This rectangle must be fully be comprised inside +// the 'src' source picture. If the source picture uses the YUV420 colorspace, +// the top and left coordinates will be snapped to even values. +// Picture 'src' must out-live 'dst' picture. Self-extraction of view is allowed +// ('src' equal to 'dst') as a mean of fast-cropping (but note that doing so, +// the original dimension will be lost). Picture 'dst' need not be initialized +// with WebPPictureInit() if it is different from 'src', since its content will +// be overwritten. +// Returns false in case of invalid parameters. +WEBP_NODISCARD WEBP_EXTERN int WebPPictureView( + const WebPPicture* src, int left, int top, int width, int height, + WebPPicture* dst); + +// Returns true if the 'picture' is actually a view and therefore does +// not own the memory for pixels. +WEBP_EXTERN int WebPPictureIsView(const WebPPicture* picture); + +// Rescale a picture to new dimension width x height. +// If either 'width' or 'height' (but not both) is 0 the corresponding +// dimension will be calculated preserving the aspect ratio. +// No gamma correction is applied. +// Returns false in case of error (invalid parameter or insufficient memory). +WEBP_NODISCARD WEBP_EXTERN int WebPPictureRescale(WebPPicture* picture, + int width, int height); + +// Colorspace conversion function to import RGB samples. +// Previous buffer will be free'd, if any. +// *rgb buffer should have a size of at least height * rgb_stride. +// Returns false in case of memory error. +WEBP_NODISCARD WEBP_EXTERN int WebPPictureImportRGB( + WebPPicture* picture, const uint8_t* rgb, int rgb_stride); +// Same, but for RGBA buffer. +WEBP_NODISCARD WEBP_EXTERN int WebPPictureImportRGBA( + WebPPicture* picture, const uint8_t* rgba, int rgba_stride); +// Same, but for RGBA buffer. Imports the RGB direct from the 32-bit format +// input buffer ignoring the alpha channel. Avoids needing to copy the data +// to a temporary 24-bit RGB buffer to import the RGB only. +WEBP_NODISCARD WEBP_EXTERN int WebPPictureImportRGBX( + WebPPicture* picture, const uint8_t* rgbx, int rgbx_stride); + +// Variants of the above, but taking BGR(A|X) input. +WEBP_NODISCARD WEBP_EXTERN int WebPPictureImportBGR( + WebPPicture* picture, const uint8_t* bgr, int bgr_stride); +WEBP_NODISCARD WEBP_EXTERN int WebPPictureImportBGRA( + WebPPicture* picture, const uint8_t* bgra, int bgra_stride); +WEBP_NODISCARD WEBP_EXTERN int WebPPictureImportBGRX( + WebPPicture* picture, const uint8_t* bgrx, int bgrx_stride); + +// Converts picture->argb data to the YUV420A format. The 'colorspace' +// parameter is deprecated and should be equal to WEBP_YUV420. +// Upon return, picture->use_argb is set to false. The presence of real +// non-opaque transparent values is detected, and 'colorspace' will be +// adjusted accordingly. Note that this method is lossy. +// Returns false in case of error. +WEBP_NODISCARD WEBP_EXTERN int WebPPictureARGBToYUVA( + WebPPicture* picture, WebPEncCSP /*colorspace = WEBP_YUV420*/); + +// Same as WebPPictureARGBToYUVA(), but the conversion is done using +// pseudo-random dithering with a strength 'dithering' between +// 0.0 (no dithering) and 1.0 (maximum dithering). This is useful +// for photographic picture. +WEBP_NODISCARD WEBP_EXTERN int WebPPictureARGBToYUVADithered( + WebPPicture* picture, WebPEncCSP colorspace, float dithering); + +// Performs 'sharp' RGBA->YUVA420 downsampling and colorspace conversion +// Downsampling is handled with extra care in case of color clipping. This +// method is roughly 2x slower than WebPPictureARGBToYUVA() but produces better +// and sharper YUV representation. +// Returns false in case of error. +WEBP_NODISCARD WEBP_EXTERN int WebPPictureSharpARGBToYUVA(WebPPicture* picture); +// kept for backward compatibility: +WEBP_NODISCARD WEBP_EXTERN int WebPPictureSmartARGBToYUVA(WebPPicture* picture); + +// Converts picture->yuv to picture->argb and sets picture->use_argb to true. +// The input format must be YUV_420 or YUV_420A. The conversion from YUV420 to +// ARGB incurs a small loss too. +// Note that the use of this colorspace is discouraged if one has access to the +// raw ARGB samples, since using YUV420 is comparatively lossy. +// Returns false in case of error. +WEBP_NODISCARD WEBP_EXTERN int WebPPictureYUVAToARGB(WebPPicture* picture); + +// Helper function: given a width x height plane of RGBA or YUV(A) samples +// clean-up or smoothen the YUV or RGB samples under fully transparent area, +// to help compressibility (no guarantee, though). +WEBP_EXTERN void WebPCleanupTransparentArea(WebPPicture* picture); + +// Scan the picture 'picture' for the presence of non fully opaque alpha values. +// Returns true in such case. Otherwise returns false (indicating that the +// alpha plane can be ignored altogether e.g.). +WEBP_EXTERN int WebPPictureHasTransparency(const WebPPicture* picture); + +// Remove the transparency information (if present) by blending the color with +// the background color 'background_rgb' (specified as 24bit RGB triplet). +// After this call, all alpha values are reset to 0xff. +WEBP_EXTERN void WebPBlendAlpha(WebPPicture* picture, uint32_t background_rgb); + +//------------------------------------------------------------------------------ +// Main call + +// Main encoding call, after config and picture have been initialized. +// 'picture' must be less than 16384x16384 in dimension (cf WEBP_MAX_DIMENSION), +// and the 'config' object must be a valid one. +// Returns false in case of error, true otherwise. +// In case of error, picture->error_code is updated accordingly. +// 'picture' can hold the source samples in both YUV(A) or ARGB input, depending +// on the value of 'picture->use_argb'. It is highly recommended to use +// the former for lossy encoding, and the latter for lossless encoding +// (when config.lossless is true). Automatic conversion from one format to +// another is provided but they both incur some loss. +WEBP_NODISCARD WEBP_EXTERN int WebPEncode(const WebPConfig* config, + WebPPicture* picture); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_WEBP_ENCODE_H_ diff --git a/Source/ThirdParty/RiveLibrary/Includes/webp/format_constants.h b/Source/ThirdParty/RiveLibrary/Includes/webp/format_constants.h new file mode 100644 index 00000000..999035c5 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/webp/format_constants.h @@ -0,0 +1,87 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Internal header for constants related to WebP file format. +// +// Author: Urvang (urvang@google.com) + +#ifndef WEBP_WEBP_FORMAT_CONSTANTS_H_ +#define WEBP_WEBP_FORMAT_CONSTANTS_H_ + +// Create fourcc of the chunk from the chunk tag characters. +#define MKFOURCC(a, b, c, d) ((a) | (b) << 8 | (c) << 16 | (uint32_t)(d) << 24) + +// VP8 related constants. +#define VP8_SIGNATURE 0x9d012a // Signature in VP8 data. +#define VP8_MAX_PARTITION0_SIZE (1 << 19) // max size of mode partition +#define VP8_MAX_PARTITION_SIZE (1 << 24) // max size for token partition +#define VP8_FRAME_HEADER_SIZE 10 // Size of the frame header within VP8 data. + +// VP8L related constants. +#define VP8L_SIGNATURE_SIZE 1 // VP8L signature size. +#define VP8L_MAGIC_BYTE 0x2f // VP8L signature byte. +#define VP8L_IMAGE_SIZE_BITS 14 // Number of bits used to store + // width and height. +#define VP8L_VERSION_BITS 3 // 3 bits reserved for version. +#define VP8L_VERSION 0 // version 0 +#define VP8L_FRAME_HEADER_SIZE 5 // Size of the VP8L frame header. + +#define MAX_PALETTE_SIZE 256 +#define MAX_CACHE_BITS 11 +#define HUFFMAN_CODES_PER_META_CODE 5 +#define ARGB_BLACK 0xff000000 + +#define DEFAULT_CODE_LENGTH 8 +#define MAX_ALLOWED_CODE_LENGTH 15 + +#define NUM_LITERAL_CODES 256 +#define NUM_LENGTH_CODES 24 +#define NUM_DISTANCE_CODES 40 +#define CODE_LENGTH_CODES 19 + +#define MIN_HUFFMAN_BITS 2 // min number of Huffman bits +#define MAX_HUFFMAN_BITS 9 // max number of Huffman bits + +#define TRANSFORM_PRESENT 1 // The bit to be written when next data + // to be read is a transform. +#define NUM_TRANSFORMS 4 // Maximum number of allowed transform + // in a bitstream. +typedef enum { + PREDICTOR_TRANSFORM = 0, + CROSS_COLOR_TRANSFORM = 1, + SUBTRACT_GREEN_TRANSFORM = 2, + COLOR_INDEXING_TRANSFORM = 3 +} VP8LImageTransformType; + +// Alpha related constants. +#define ALPHA_HEADER_LEN 1 +#define ALPHA_NO_COMPRESSION 0 +#define ALPHA_LOSSLESS_COMPRESSION 1 +#define ALPHA_PREPROCESSED_LEVELS 1 + +// Mux related constants. +#define TAG_SIZE 4 // Size of a chunk tag (e.g. "VP8L"). +#define CHUNK_SIZE_BYTES 4 // Size needed to store chunk's size. +#define CHUNK_HEADER_SIZE 8 // Size of a chunk header. +#define RIFF_HEADER_SIZE 12 // Size of the RIFF header ("RIFFnnnnWEBP"). +#define ANMF_CHUNK_SIZE 16 // Size of an ANMF chunk. +#define ANIM_CHUNK_SIZE 6 // Size of an ANIM chunk. +#define VP8X_CHUNK_SIZE 10 // Size of a VP8X chunk. + +#define MAX_CANVAS_SIZE (1 << 24) // 24-bit max for VP8X width/height. +#define MAX_IMAGE_AREA (1ULL << 32) // 32-bit max for width x height. +#define MAX_LOOP_COUNT (1 << 16) // maximum value for loop-count +#define MAX_DURATION (1 << 24) // maximum duration +#define MAX_POSITION_OFFSET (1 << 24) // maximum frame x/y offset + +// Maximum chunk payload is such that adding the header and padding won't +// overflow a uint32_t. +#define MAX_CHUNK_PAYLOAD (~0U - CHUNK_HEADER_SIZE - 1) + +#endif // WEBP_WEBP_FORMAT_CONSTANTS_H_ diff --git a/Source/ThirdParty/RiveLibrary/Includes/webp/mux.h b/Source/ThirdParty/RiveLibrary/Includes/webp/mux.h new file mode 100644 index 00000000..8fb067e4 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/webp/mux.h @@ -0,0 +1,591 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// RIFF container manipulation and encoding for WebP images. +// +// Authors: Urvang (urvang@google.com) +// Vikas (vikasa@google.com) + +#ifndef WEBP_WEBP_MUX_H_ +#define WEBP_WEBP_MUX_H_ + +#include "./mux_types.h" +#include "./types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define WEBP_MUX_ABI_VERSION 0x0109 // MAJOR(8b) + MINOR(8b) + +//------------------------------------------------------------------------------ +// Mux API +// +// This API allows manipulation of WebP container images containing features +// like color profile, metadata, animation. +// +// Code Example#1: Create a WebPMux object with image data, color profile and +// XMP metadata. +/* + int copy_data = 0; + WebPMux* mux = WebPMuxNew(); + // ... (Prepare image data). + WebPMuxSetImage(mux, &image, copy_data); + // ... (Prepare ICCP color profile data). + WebPMuxSetChunk(mux, "ICCP", &icc_profile, copy_data); + // ... (Prepare XMP metadata). + WebPMuxSetChunk(mux, "XMP ", &xmp, copy_data); + // Get data from mux in WebP RIFF format. + WebPMuxAssemble(mux, &output_data); + WebPMuxDelete(mux); + // ... (Consume output_data; e.g. write output_data.bytes to file). + WebPDataClear(&output_data); +*/ + +// Code Example#2: Get image and color profile data from a WebP file. +/* + int copy_data = 0; + // ... (Read data from file). + WebPMux* mux = WebPMuxCreate(&data, copy_data); + WebPMuxGetFrame(mux, 1, &image); + // ... (Consume image; e.g. call WebPDecode() to decode the data). + WebPMuxGetChunk(mux, "ICCP", &icc_profile); + // ... (Consume icc_data). + WebPMuxDelete(mux); + WebPFree(data); +*/ + +// Note: forward declaring enumerations is not allowed in (strict) C and C++, +// the types are left here for reference. +// typedef enum WebPMuxError WebPMuxError; +// typedef enum WebPChunkId WebPChunkId; +typedef struct WebPMux WebPMux; // main opaque object. +typedef struct WebPMuxFrameInfo WebPMuxFrameInfo; +typedef struct WebPMuxAnimParams WebPMuxAnimParams; +typedef struct WebPAnimEncoderOptions WebPAnimEncoderOptions; + +// Error codes +typedef enum WEBP_NODISCARD WebPMuxError { + WEBP_MUX_OK = 1, + WEBP_MUX_NOT_FOUND = 0, + WEBP_MUX_INVALID_ARGUMENT = -1, + WEBP_MUX_BAD_DATA = -2, + WEBP_MUX_MEMORY_ERROR = -3, + WEBP_MUX_NOT_ENOUGH_DATA = -4 +} WebPMuxError; + +// IDs for different types of chunks. +typedef enum WebPChunkId { + WEBP_CHUNK_VP8X, // VP8X + WEBP_CHUNK_ICCP, // ICCP + WEBP_CHUNK_ANIM, // ANIM + WEBP_CHUNK_ANMF, // ANMF + WEBP_CHUNK_DEPRECATED, // (deprecated from FRGM) + WEBP_CHUNK_ALPHA, // ALPH + WEBP_CHUNK_IMAGE, // VP8/VP8L + WEBP_CHUNK_EXIF, // EXIF + WEBP_CHUNK_XMP, // XMP + WEBP_CHUNK_UNKNOWN, // Other chunks. + WEBP_CHUNK_NIL +} WebPChunkId; + +//------------------------------------------------------------------------------ + +// Returns the version number of the mux library, packed in hexadecimal using +// 8bits for each of major/minor/revision. E.g: v2.5.7 is 0x020507. +WEBP_EXTERN int WebPGetMuxVersion(void); + +//------------------------------------------------------------------------------ +// Life of a Mux object + +// Internal, version-checked, entry point +WEBP_NODISCARD WEBP_EXTERN WebPMux* WebPNewInternal(int); + +// Creates an empty mux object. +// Returns: +// A pointer to the newly created empty mux object. +// Or NULL in case of memory error. +WEBP_NODISCARD static WEBP_INLINE WebPMux* WebPMuxNew(void) { + return WebPNewInternal(WEBP_MUX_ABI_VERSION); +} + +// Deletes the mux object. +// Parameters: +// mux - (in/out) object to be deleted +WEBP_EXTERN void WebPMuxDelete(WebPMux* mux); + +//------------------------------------------------------------------------------ +// Mux creation. + +// Internal, version-checked, entry point +WEBP_NODISCARD WEBP_EXTERN WebPMux* WebPMuxCreateInternal(const WebPData*, int, + int); + +// Creates a mux object from raw data given in WebP RIFF format. +// Parameters: +// bitstream - (in) the bitstream data in WebP RIFF format +// copy_data - (in) value 1 indicates given data WILL be copied to the mux +// object and value 0 indicates data will NOT be copied. If the +// data is not copied, it must exist for the lifetime of the +// mux object. +// Returns: +// A pointer to the mux object created from given data - on success. +// NULL - In case of invalid data or memory error. +WEBP_NODISCARD static WEBP_INLINE WebPMux* WebPMuxCreate( + const WebPData* bitstream, int copy_data) { + return WebPMuxCreateInternal(bitstream, copy_data, WEBP_MUX_ABI_VERSION); +} + +//------------------------------------------------------------------------------ +// Non-image chunks. + +// Note: Only non-image related chunks should be managed through chunk APIs. +// (Image related chunks are: "ANMF", "VP8 ", "VP8L" and "ALPH"). +// To add, get and delete images, use WebPMuxSetImage(), WebPMuxPushFrame(), +// WebPMuxGetFrame() and WebPMuxDeleteFrame(). + +// Adds a chunk with id 'fourcc' and data 'chunk_data' in the mux object. +// Any existing chunk(s) with the same id will be removed. +// Parameters: +// mux - (in/out) object to which the chunk is to be added +// fourcc - (in) a character array containing the fourcc of the given chunk; +// e.g., "ICCP", "XMP ", "EXIF" etc. +// chunk_data - (in) the chunk data to be added +// copy_data - (in) value 1 indicates given data WILL be copied to the mux +// object and value 0 indicates data will NOT be copied. If the +// data is not copied, it must exist until a call to +// WebPMuxAssemble() is made. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux, fourcc or chunk_data is NULL +// or if fourcc corresponds to an image chunk. +// WEBP_MUX_MEMORY_ERROR - on memory allocation error. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxSetChunk( + WebPMux* mux, const char fourcc[4], const WebPData* chunk_data, + int copy_data); + +// Gets a reference to the data of the chunk with id 'fourcc' in the mux object. +// The caller should NOT free the returned data. +// Parameters: +// mux - (in) object from which the chunk data is to be fetched +// fourcc - (in) a character array containing the fourcc of the chunk; +// e.g., "ICCP", "XMP ", "EXIF" etc. +// chunk_data - (out) returned chunk data +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux, fourcc or chunk_data is NULL +// or if fourcc corresponds to an image chunk. +// WEBP_MUX_NOT_FOUND - If mux does not contain a chunk with the given id. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxGetChunk( + const WebPMux* mux, const char fourcc[4], WebPData* chunk_data); + +// Deletes the chunk with the given 'fourcc' from the mux object. +// Parameters: +// mux - (in/out) object from which the chunk is to be deleted +// fourcc - (in) a character array containing the fourcc of the chunk; +// e.g., "ICCP", "XMP ", "EXIF" etc. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux or fourcc is NULL +// or if fourcc corresponds to an image chunk. +// WEBP_MUX_NOT_FOUND - If mux does not contain a chunk with the given fourcc. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxDeleteChunk( + WebPMux* mux, const char fourcc[4]); + +//------------------------------------------------------------------------------ +// Images. + +// Encapsulates data about a single frame. +struct WebPMuxFrameInfo { + WebPData bitstream; // image data: can be a raw VP8/VP8L bitstream + // or a single-image WebP file. + int x_offset; // x-offset of the frame. + int y_offset; // y-offset of the frame. + int duration; // duration of the frame (in milliseconds). + + WebPChunkId id; // frame type: should be one of WEBP_CHUNK_ANMF + // or WEBP_CHUNK_IMAGE + WebPMuxAnimDispose dispose_method; // Disposal method for the frame. + WebPMuxAnimBlend blend_method; // Blend operation for the frame. + uint32_t pad[1]; // padding for later use +}; + +// Sets the (non-animated) image in the mux object. +// Note: Any existing images (including frames) will be removed. +// Parameters: +// mux - (in/out) object in which the image is to be set +// bitstream - (in) can be a raw VP8/VP8L bitstream or a single-image +// WebP file (non-animated) +// copy_data - (in) value 1 indicates given data WILL be copied to the mux +// object and value 0 indicates data will NOT be copied. If the +// data is not copied, it must exist until a call to +// WebPMuxAssemble() is made. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux is NULL or bitstream is NULL. +// WEBP_MUX_MEMORY_ERROR - on memory allocation error. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxSetImage( + WebPMux* mux, const WebPData* bitstream, int copy_data); + +// Adds a frame at the end of the mux object. +// Notes: (1) frame.id should be WEBP_CHUNK_ANMF +// (2) For setting a non-animated image, use WebPMuxSetImage() instead. +// (3) Type of frame being pushed must be same as the frames in mux. +// (4) As WebP only supports even offsets, any odd offset will be snapped +// to an even location using: offset &= ~1 +// Parameters: +// mux - (in/out) object to which the frame is to be added +// frame - (in) frame data. +// copy_data - (in) value 1 indicates given data WILL be copied to the mux +// object and value 0 indicates data will NOT be copied. If the +// data is not copied, it must exist until a call to +// WebPMuxAssemble() is made. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux or frame is NULL +// or if content of 'frame' is invalid. +// WEBP_MUX_MEMORY_ERROR - on memory allocation error. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxPushFrame( + WebPMux* mux, const WebPMuxFrameInfo* frame, int copy_data); + +// Gets the nth frame from the mux object. +// The content of 'frame->bitstream' is allocated using WebPMalloc(), and NOT +// owned by the 'mux' object. It MUST be deallocated by the caller by calling +// WebPDataClear(). +// nth=0 has a special meaning - last position. +// Parameters: +// mux - (in) object from which the info is to be fetched +// nth - (in) index of the frame in the mux object +// frame - (out) data of the returned frame +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux or frame is NULL. +// WEBP_MUX_NOT_FOUND - if there are less than nth frames in the mux object. +// WEBP_MUX_BAD_DATA - if nth frame chunk in mux is invalid. +// WEBP_MUX_MEMORY_ERROR - on memory allocation error. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxGetFrame( + const WebPMux* mux, uint32_t nth, WebPMuxFrameInfo* frame); + +// Deletes a frame from the mux object. +// nth=0 has a special meaning - last position. +// Parameters: +// mux - (in/out) object from which a frame is to be deleted +// nth - (in) The position from which the frame is to be deleted +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux is NULL. +// WEBP_MUX_NOT_FOUND - If there are less than nth frames in the mux object +// before deletion. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxDeleteFrame(WebPMux* mux, uint32_t nth); + +//------------------------------------------------------------------------------ +// Animation. + +// Animation parameters. +struct WebPMuxAnimParams { + uint32_t bgcolor; // Background color of the canvas stored (in MSB order) as: + // Bits 00 to 07: Alpha. + // Bits 08 to 15: Red. + // Bits 16 to 23: Green. + // Bits 24 to 31: Blue. + int loop_count; // Number of times to repeat the animation [0 = infinite]. +}; + +// Sets the animation parameters in the mux object. Any existing ANIM chunks +// will be removed. +// Parameters: +// mux - (in/out) object in which ANIM chunk is to be set/added +// params - (in) animation parameters. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux or params is NULL. +// WEBP_MUX_MEMORY_ERROR - on memory allocation error. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxSetAnimationParams( + WebPMux* mux, const WebPMuxAnimParams* params); + +// Gets the animation parameters from the mux object. +// Parameters: +// mux - (in) object from which the animation parameters to be fetched +// params - (out) animation parameters extracted from the ANIM chunk +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux or params is NULL. +// WEBP_MUX_NOT_FOUND - if ANIM chunk is not present in mux object. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxGetAnimationParams( + const WebPMux* mux, WebPMuxAnimParams* params); + +//------------------------------------------------------------------------------ +// Misc Utilities. + +// Sets the canvas size for the mux object. The width and height can be +// specified explicitly or left as zero (0, 0). +// * When width and height are specified explicitly, then this frame bound is +// enforced during subsequent calls to WebPMuxAssemble() and an error is +// reported if any animated frame does not completely fit within the canvas. +// * When unspecified (0, 0), the constructed canvas will get the frame bounds +// from the bounding-box over all frames after calling WebPMuxAssemble(). +// Parameters: +// mux - (in) object to which the canvas size is to be set +// width - (in) canvas width +// height - (in) canvas height +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux is NULL; or +// width or height are invalid or out of bounds +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxSetCanvasSize(WebPMux* mux, + int width, int height); + +// Gets the canvas size from the mux object. +// Note: This method assumes that the VP8X chunk, if present, is up-to-date. +// That is, the mux object hasn't been modified since the last call to +// WebPMuxAssemble() or WebPMuxCreate(). +// Parameters: +// mux - (in) object from which the canvas size is to be fetched +// width - (out) canvas width +// height - (out) canvas height +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux, width or height is NULL. +// WEBP_MUX_BAD_DATA - if VP8X/VP8/VP8L chunk or canvas size is invalid. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxGetCanvasSize(const WebPMux* mux, + int* width, int* height); + +// Gets the feature flags from the mux object. +// Note: This method assumes that the VP8X chunk, if present, is up-to-date. +// That is, the mux object hasn't been modified since the last call to +// WebPMuxAssemble() or WebPMuxCreate(). +// Parameters: +// mux - (in) object from which the features are to be fetched +// flags - (out) the flags specifying which features are present in the +// mux object. This will be an OR of various flag values. +// Enum 'WebPFeatureFlags' can be used to test individual flag values. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux or flags is NULL. +// WEBP_MUX_BAD_DATA - if VP8X/VP8/VP8L chunk or canvas size is invalid. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxGetFeatures(const WebPMux* mux, + uint32_t* flags); + +// Gets number of chunks with the given 'id' in the mux object. +// Parameters: +// mux - (in) object from which the info is to be fetched +// id - (in) chunk id specifying the type of chunk +// num_elements - (out) number of chunks with the given chunk id +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux, or num_elements is NULL. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxNumChunks(const WebPMux* mux, + WebPChunkId id, int* num_elements); + +// Assembles all chunks in WebP RIFF format and returns in 'assembled_data'. +// This function also validates the mux object. +// Note: The content of 'assembled_data' will be ignored and overwritten. +// Also, the content of 'assembled_data' is allocated using WebPMalloc(), and +// NOT owned by the 'mux' object. It MUST be deallocated by the caller by +// calling WebPDataClear(). It's always safe to call WebPDataClear() upon +// return, even in case of error. +// Parameters: +// mux - (in/out) object whose chunks are to be assembled +// assembled_data - (out) assembled WebP data +// Returns: +// WEBP_MUX_BAD_DATA - if mux object is invalid. +// WEBP_MUX_INVALID_ARGUMENT - if mux or assembled_data is NULL. +// WEBP_MUX_MEMORY_ERROR - on memory allocation error. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxAssemble(WebPMux* mux, + WebPData* assembled_data); + +//------------------------------------------------------------------------------ +// WebPAnimEncoder API +// +// This API allows encoding (possibly) animated WebP images. +// +// Code Example: +/* + WebPAnimEncoderOptions enc_options; + WebPAnimEncoderOptionsInit(&enc_options); + // Tune 'enc_options' as needed. + WebPAnimEncoder* enc = WebPAnimEncoderNew(width, height, &enc_options); + while() { + WebPConfig config; + WebPConfigInit(&config); + // Tune 'config' as needed. + WebPAnimEncoderAdd(enc, frame, timestamp_ms, &config); + } + WebPAnimEncoderAdd(enc, NULL, timestamp_ms, NULL); + WebPAnimEncoderAssemble(enc, webp_data); + WebPAnimEncoderDelete(enc); + // Write the 'webp_data' to a file, or re-mux it further. +*/ + +typedef struct WebPAnimEncoder WebPAnimEncoder; // Main opaque object. + +// Forward declarations. Defined in encode.h. +struct WebPPicture; +struct WebPConfig; + +// Global options. +struct WebPAnimEncoderOptions { + WebPMuxAnimParams anim_params; // Animation parameters. + int minimize_size; // If true, minimize the output size (slow). Implicitly + // disables key-frame insertion. + int kmin; + int kmax; // Minimum and maximum distance between consecutive key + // frames in the output. The library may insert some key + // frames as needed to satisfy this criteria. + // Note that these conditions should hold: kmax > kmin + // and kmin >= kmax / 2 + 1. Also, if kmax <= 0, then + // key-frame insertion is disabled; and if kmax == 1, + // then all frames will be key-frames (kmin value does + // not matter for these special cases). + int allow_mixed; // If true, use mixed compression mode; may choose + // either lossy and lossless for each frame. + int verbose; // If true, print info and warning messages to stderr. + + uint32_t padding[4]; // Padding for later use. +}; + +// Internal, version-checked, entry point. +WEBP_EXTERN int WebPAnimEncoderOptionsInitInternal( + WebPAnimEncoderOptions*, int); + +// Should always be called, to initialize a fresh WebPAnimEncoderOptions +// structure before modification. Returns false in case of version mismatch. +// WebPAnimEncoderOptionsInit() must have succeeded before using the +// 'enc_options' object. +WEBP_NODISCARD static WEBP_INLINE int WebPAnimEncoderOptionsInit( + WebPAnimEncoderOptions* enc_options) { + return WebPAnimEncoderOptionsInitInternal(enc_options, WEBP_MUX_ABI_VERSION); +} + +// Internal, version-checked, entry point. +WEBP_EXTERN WebPAnimEncoder* WebPAnimEncoderNewInternal( + int, int, const WebPAnimEncoderOptions*, int); + +// Creates and initializes a WebPAnimEncoder object. +// Parameters: +// width/height - (in) canvas width and height of the animation. +// enc_options - (in) encoding options; can be passed NULL to pick +// reasonable defaults. +// Returns: +// A pointer to the newly created WebPAnimEncoder object. +// Or NULL in case of memory error. +static WEBP_INLINE WebPAnimEncoder* WebPAnimEncoderNew( + int width, int height, const WebPAnimEncoderOptions* enc_options) { + return WebPAnimEncoderNewInternal(width, height, enc_options, + WEBP_MUX_ABI_VERSION); +} + +// Optimize the given frame for WebP, encode it and add it to the +// WebPAnimEncoder object. +// The last call to 'WebPAnimEncoderAdd' should be with frame = NULL, which +// indicates that no more frames are to be added. This call is also used to +// determine the duration of the last frame. +// Parameters: +// enc - (in/out) object to which the frame is to be added. +// frame - (in/out) frame data in ARGB or YUV(A) format. If it is in YUV(A) +// format, it will be converted to ARGB, which incurs a small loss. +// timestamp_ms - (in) timestamp of this frame in milliseconds. +// Duration of a frame would be calculated as +// "timestamp of next frame - timestamp of this frame". +// Hence, timestamps should be in non-decreasing order. +// config - (in) encoding options; can be passed NULL to pick +// reasonable defaults. +// Returns: +// On error, returns false and frame->error_code is set appropriately. +// Otherwise, returns true. +WEBP_NODISCARD WEBP_EXTERN int WebPAnimEncoderAdd( + WebPAnimEncoder* enc, struct WebPPicture* frame, int timestamp_ms, + const struct WebPConfig* config); + +// Assemble all frames added so far into a WebP bitstream. +// This call should be preceded by a call to 'WebPAnimEncoderAdd' with +// frame = NULL; if not, the duration of the last frame will be internally +// estimated. +// Parameters: +// enc - (in/out) object from which the frames are to be assembled. +// webp_data - (out) generated WebP bitstream. +// Returns: +// True on success. +WEBP_NODISCARD WEBP_EXTERN int WebPAnimEncoderAssemble(WebPAnimEncoder* enc, + WebPData* webp_data); + +// Get error string corresponding to the most recent call using 'enc'. The +// returned string is owned by 'enc' and is valid only until the next call to +// WebPAnimEncoderAdd() or WebPAnimEncoderAssemble() or WebPAnimEncoderDelete(). +// Parameters: +// enc - (in/out) object from which the error string is to be fetched. +// Returns: +// NULL if 'enc' is NULL. Otherwise, returns the error string if the last call +// to 'enc' had an error, or an empty string if the last call was a success. +WEBP_EXTERN const char* WebPAnimEncoderGetError(WebPAnimEncoder* enc); + +// Deletes the WebPAnimEncoder object. +// Parameters: +// enc - (in/out) object to be deleted +WEBP_EXTERN void WebPAnimEncoderDelete(WebPAnimEncoder* enc); + +//------------------------------------------------------------------------------ +// Non-image chunks. + +// Note: Only non-image related chunks should be managed through chunk APIs. +// (Image related chunks are: "ANMF", "VP8 ", "VP8L" and "ALPH"). + +// Adds a chunk with id 'fourcc' and data 'chunk_data' in the enc object. +// Any existing chunk(s) with the same id will be removed. +// Parameters: +// enc - (in/out) object to which the chunk is to be added +// fourcc - (in) a character array containing the fourcc of the given chunk; +// e.g., "ICCP", "XMP ", "EXIF", etc. +// chunk_data - (in) the chunk data to be added +// copy_data - (in) value 1 indicates given data WILL be copied to the enc +// object and value 0 indicates data will NOT be copied. If the +// data is not copied, it must exist until a call to +// WebPAnimEncoderAssemble() is made. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if enc, fourcc or chunk_data is NULL. +// WEBP_MUX_MEMORY_ERROR - on memory allocation error. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPAnimEncoderSetChunk( + WebPAnimEncoder* enc, const char fourcc[4], const WebPData* chunk_data, + int copy_data); + +// Gets a reference to the data of the chunk with id 'fourcc' in the enc object. +// The caller should NOT free the returned data. +// Parameters: +// enc - (in) object from which the chunk data is to be fetched +// fourcc - (in) a character array containing the fourcc of the chunk; +// e.g., "ICCP", "XMP ", "EXIF", etc. +// chunk_data - (out) returned chunk data +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if enc, fourcc or chunk_data is NULL. +// WEBP_MUX_NOT_FOUND - If enc does not contain a chunk with the given id. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPAnimEncoderGetChunk( + const WebPAnimEncoder* enc, const char fourcc[4], WebPData* chunk_data); + +// Deletes the chunk with the given 'fourcc' from the enc object. +// Parameters: +// enc - (in/out) object from which the chunk is to be deleted +// fourcc - (in) a character array containing the fourcc of the chunk; +// e.g., "ICCP", "XMP ", "EXIF", etc. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if enc or fourcc is NULL. +// WEBP_MUX_NOT_FOUND - If enc does not contain a chunk with the given fourcc. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPAnimEncoderDeleteChunk( + WebPAnimEncoder* enc, const char fourcc[4]); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_WEBP_MUX_H_ diff --git a/Source/ThirdParty/RiveLibrary/Includes/webp/mux_types.h b/Source/ThirdParty/RiveLibrary/Includes/webp/mux_types.h new file mode 100644 index 00000000..c585d208 --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/webp/mux_types.h @@ -0,0 +1,99 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Data-types common to the mux and demux libraries. +// +// Author: Urvang (urvang@google.com) + +#ifndef WEBP_WEBP_MUX_TYPES_H_ +#define WEBP_WEBP_MUX_TYPES_H_ + +#include // memset() +#include "./types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Note: forward declaring enumerations is not allowed in (strict) C and C++, +// the types are left here for reference. +// typedef enum WebPFeatureFlags WebPFeatureFlags; +// typedef enum WebPMuxAnimDispose WebPMuxAnimDispose; +// typedef enum WebPMuxAnimBlend WebPMuxAnimBlend; +typedef struct WebPData WebPData; + +// VP8X Feature Flags. +typedef enum WebPFeatureFlags { + ANIMATION_FLAG = 0x00000002, + XMP_FLAG = 0x00000004, + EXIF_FLAG = 0x00000008, + ALPHA_FLAG = 0x00000010, + ICCP_FLAG = 0x00000020, + + ALL_VALID_FLAGS = 0x0000003e +} WebPFeatureFlags; + +// Dispose method (animation only). Indicates how the area used by the current +// frame is to be treated before rendering the next frame on the canvas. +typedef enum WebPMuxAnimDispose { + WEBP_MUX_DISPOSE_NONE, // Do not dispose. + WEBP_MUX_DISPOSE_BACKGROUND // Dispose to background color. +} WebPMuxAnimDispose; + +// Blend operation (animation only). Indicates how transparent pixels of the +// current frame are blended with those of the previous canvas. +typedef enum WebPMuxAnimBlend { + WEBP_MUX_BLEND, // Blend. + WEBP_MUX_NO_BLEND // Do not blend. +} WebPMuxAnimBlend; + +// Data type used to describe 'raw' data, e.g., chunk data +// (ICC profile, metadata) and WebP compressed image data. +// 'bytes' memory must be allocated using WebPMalloc() and such. +struct WebPData { + const uint8_t* bytes; + size_t size; +}; + +// Initializes the contents of the 'webp_data' object with default values. +static WEBP_INLINE void WebPDataInit(WebPData* webp_data) { + if (webp_data != NULL) { + memset(webp_data, 0, sizeof(*webp_data)); + } +} + +// Clears the contents of the 'webp_data' object by calling WebPFree(). +// Does not deallocate the object itself. +static WEBP_INLINE void WebPDataClear(WebPData* webp_data) { + if (webp_data != NULL) { + WebPFree((void*)webp_data->bytes); + WebPDataInit(webp_data); + } +} + +// Allocates necessary storage for 'dst' and copies the contents of 'src'. +// Returns true on success. +WEBP_NODISCARD static WEBP_INLINE int WebPDataCopy(const WebPData* src, + WebPData* dst) { + if (src == NULL || dst == NULL) return 0; + WebPDataInit(dst); + if (src->bytes != NULL && src->size != 0) { + dst->bytes = (uint8_t*)WebPMalloc(src->size); + if (dst->bytes == NULL) return 0; + memcpy((void*)dst->bytes, src->bytes, src->size); + dst->size = src->size; + } + return 1; +} + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_WEBP_MUX_TYPES_H_ diff --git a/Source/ThirdParty/RiveLibrary/Includes/webp/types.h b/Source/ThirdParty/RiveLibrary/Includes/webp/types.h new file mode 100644 index 00000000..9c17edec --- /dev/null +++ b/Source/ThirdParty/RiveLibrary/Includes/webp/types.h @@ -0,0 +1,93 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Common types + memory wrappers +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_WEBP_TYPES_H_ +#define WEBP_WEBP_TYPES_H_ + +#include // for size_t + +#ifndef _MSC_VER +#include +#if defined(__cplusplus) || !defined(__STRICT_ANSI__) || \ + (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) +#define WEBP_INLINE inline +#else +#define WEBP_INLINE +#endif +#else +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef signed short int16_t; +typedef unsigned short uint16_t; +typedef signed int int32_t; +typedef unsigned int uint32_t; +typedef unsigned long long int uint64_t; +typedef long long int int64_t; +#define WEBP_INLINE __forceinline +#endif /* _MSC_VER */ + +#ifndef WEBP_NODISCARD +#if defined(WEBP_ENABLE_NODISCARD) && WEBP_ENABLE_NODISCARD +#if (defined(__cplusplus) && __cplusplus >= 201700L) || \ + (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) +#define WEBP_NODISCARD [[nodiscard]] +#else +// gcc's __has_attribute does not work for enums. +#if defined(__clang__) && defined(__has_attribute) +#if __has_attribute(warn_unused_result) +#define WEBP_NODISCARD __attribute__((warn_unused_result)) +#else +#define WEBP_NODISCARD +#endif /* __has_attribute(warn_unused_result) */ +#else +#define WEBP_NODISCARD +#endif /* defined(__clang__) && defined(__has_attribute) */ +#endif /* (defined(__cplusplus) && __cplusplus >= 201700L) || + (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) */ +#else +#define WEBP_NODISCARD +#endif /* defined(WEBP_ENABLE_NODISCARD) && WEBP_ENABLE_NODISCARD */ +#endif /* WEBP_NODISCARD */ + +#ifndef WEBP_EXTERN +// This explicitly marks library functions and allows for changing the +// signature for e.g., Windows DLL builds. +# if defined(_WIN32) && defined(WEBP_DLL) +# define WEBP_EXTERN __declspec(dllexport) +# elif defined(__GNUC__) && __GNUC__ >= 4 +# define WEBP_EXTERN extern __attribute__ ((visibility ("default"))) +# else +# define WEBP_EXTERN extern +# endif /* defined(_WIN32) && defined(WEBP_DLL) */ +#endif /* WEBP_EXTERN */ + +// Macro to check ABI compatibility (same major revision number) +#define WEBP_ABI_IS_INCOMPATIBLE(a, b) (((a) >> 8) != ((b) >> 8)) + +#ifdef __cplusplus +extern "C" { +#endif + +// Allocates 'size' bytes of memory. Returns NULL upon error. Memory +// must be deallocated by calling WebPFree(). This function is made available +// by the core 'libwebp' library. +WEBP_NODISCARD WEBP_EXTERN void* WebPMalloc(size_t size); + +// Releases memory returned by the WebPDecode*() functions (from decode.h). +WEBP_EXTERN void WebPFree(void* ptr); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_WEBP_TYPES_H_ diff --git a/Source/ThirdParty/RiveLibrary/RiveLibrary.Build.cs b/Source/ThirdParty/RiveLibrary/RiveLibrary.Build.cs index 61303b15..46d4171a 100644 --- a/Source/ThirdParty/RiveLibrary/RiveLibrary.Build.cs +++ b/Source/ThirdParty/RiveLibrary/RiveLibrary.Build.cs @@ -43,6 +43,7 @@ public RiveLibrary(ReadOnlyTargetRules Target) : base(Target) PublicAdditionalLibraries.AddRange(new string[] { + Path.Combine(libDirectory, $"rive_libwebp{libSuffix}.{extension}"), Path.Combine(libDirectory, $"rive_sheenbidi{libSuffix}.{extension}"), Path.Combine(libDirectory, $"rive_harfbuzz{libSuffix}.{extension}"), Path.Combine(libDirectory, $"rive_libwebp{libSuffix}.{extension}"),