From a412904e02ed310cedf71e8e54eb21c65727b0db Mon Sep 17 00:00:00 2001 From: Vladimir Sumarov Date: Wed, 10 Dec 2025 15:27:12 -0800 Subject: [PATCH 1/2] fix memory dump upload --- .github/workflows/build.yml | 3 +- .yarnrc.yml | 1 + README.md | 3 +- ci/build-aws-sdk.cmd | 10 +- crash-handler-process/CMakeLists.txt | 5 +- crash-handler-process/platforms/util-win.cpp | 212 +++++++++++++------ package.json | 3 +- 7 files changed, 166 insertions(+), 71 deletions(-) create mode 100644 .yarnrc.yml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3ebc9b8..6f1c1d2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,7 +21,7 @@ env: RELEASE_BUCKET: "slobs-crash-handler" ELECTRON_VERSION: "v29.4.3" BIN_DEPENDENCIES: "dependencies2019.0" - AWS_SDK_VERSION: "1.11.500" + AWS_SDK_VERSION: "1.11.704" AWS_SDK_PATH: "aws-sdk/awsi" permissions: @@ -84,7 +84,6 @@ jobs: run: cmake -H"${{ github.workspace }}" -B"${{env.BUILD_DIRECTORY}}" -G"Visual Studio 17 2022" -A x64 -DNODEJS_VERSION="${{env.ELECTRON_VERSION}}" -DDepsPath="${PWD}\build\deps\${{env.BIN_DEPENDENCIES}}\win64" -DBOOST_ROOT="${PWD}\build\deps\boost" -DCMAKE_INSTALL_PREFIX="${{env.INSTALL_PACKAGE_PATH}}" env: INSTALL_PACKAGE_PATH: "${{env.BUILD_DIRECTORY}}/${{env.DISTRIBUTE_DIRECTORY}}/${{env.PACKAGE_DIRECTORY}}" - AWS_CRASH_UPLOAD_BUCKET_KEY: ${{secrets.AWS_CRASH_UPLOAD_BUCKET_KEY}} - name: Build run: cmake --build "${{env.BUILD_DIRECTORY}}" --target install --config ${{env.BUILD_CONFIGURATION}} - name: Put version into package.json diff --git a/.yarnrc.yml b/.yarnrc.yml new file mode 100644 index 0000000..8b757b2 --- /dev/null +++ b/.yarnrc.yml @@ -0,0 +1 @@ +nodeLinker: node-modules \ No newline at end of file diff --git a/README.md b/README.md index e43f090..df85cd1 100644 --- a/README.md +++ b/README.md @@ -9,12 +9,11 @@ yarn install set BIN_DEPENDENCIES=dependencies2019.0 ci\install-bin-deps.cmd -set AWS_SDK_VERSION="1.11.500" +set AWS_SDK_VERSION="1.11.704" ci\build-aws-sdk.cmd ci\localization_prepare_binaries.cmd -set AWS_CRASH_UPLOAD_BUCKET_KEY=your_aws_bucket_key_for_memory_dumps set INSTALL_PACKAGE_PATH="../desktop/node_modules/crash-handler" cmake -B"build" -G"Visual Studio 17 2022" -A x64 -DDepsPath="%CD%\build\deps\%BIN_DEPENDENCIES%\win64" -DBOOST_ROOT="%CD%\build\deps\boost" -DCMAKE_INSTALL_PREFIX="%INSTALL_PACKAGE_PATH%" diff --git a/ci/build-aws-sdk.cmd b/ci/build-aws-sdk.cmd index 15811c5..9ec078b 100644 --- a/ci/build-aws-sdk.cmd +++ b/ci/build-aws-sdk.cmd @@ -1,5 +1,13 @@ :: run before cmake of main project to build aws cpp sdk locally :: it expect AWS_SDK_VERSION env variable to be set +@echo off + +if not defined AWS_SDK_VERSION ( + echo Error: AWS_SDK_VERSION environment variable is not set + exit /b 1 +) + +echo Building AWS SDK version: %AWS_SDK_VERSION% mkdir aws-sdk cd aws-sdk @@ -10,7 +18,7 @@ mkdir build cd build cmake -G "Visual Studio 17 2022" -A x64 ^ --DBUILD_ONLY="s3;sts" ^ +-DBUILD_ONLY="s3;sts;transfer;cognito-identity;identity-management" ^ -DENABLE_TESTING=OFF ^ -DBUILD_SHARED_LIBS=OFF ^ -DSTATIC_CRT=ON ^ diff --git a/crash-handler-process/CMakeLists.txt b/crash-handler-process/CMakeLists.txt index 3ff8461..806d0ad 100644 --- a/crash-handler-process/CMakeLists.txt +++ b/crash-handler-process/CMakeLists.txt @@ -123,12 +123,12 @@ IF(WIN32) #AWSSDK gives linking error if PkgCOnfig not found explicitly before awssdk find_package find_package(PkgConfig QUIET) - find_package(AWSSDK REQUIRED COMPONENTS s3 sts) + find_package(AWSSDK REQUIRED COMPONENTS s3 sts cognito-identity identity-management) if(MSVC AND BUILD_SHARED_LIBS) add_definitions(-DUSE_IMPORT_EXPORT) - list(APPEND SERVICE_LIST s3 sts) + list(APPEND SERVICE_LIST s3 sts cognito-identity identity-management) set(CMAKE_BUILD_TYPE Release) @@ -145,7 +145,6 @@ IF(WIN32) "${CMAKE_CURRENT_BINARY_DIR}/awsi/include/" "${nlohmannjson_SOURCE_DIR}/" "${ZLIB_INCLUDE_DIRS}") - add_compile_definitions(AWS_CRASH_UPLOAD_BUCKET_KEY=\"$ENV{AWS_CRASH_UPLOAD_BUCKET_KEY}\") target_sources(crash-handler-process PUBLIC "${PROJECT_SOURCE_DIR}/platforms/resource-win.rc") diff --git a/crash-handler-process/platforms/util-win.cpp b/crash-handler-process/platforms/util-win.cpp index feb8214..fb301e5 100644 --- a/crash-handler-process/platforms/util-win.cpp +++ b/crash-handler-process/platforms/util-win.cpp @@ -32,11 +32,20 @@ #include #include +#include +#include #include #include #include #include #include +#include +#include +#include +#include +#include +#include +#include #pragma comment(lib, "userenv.lib") #pragma comment(lib, "ws2_32.lib") @@ -82,12 +91,6 @@ struct unhandledHandlerObj { unhandledHandlerObj unhandledHandlerObj_Impl; -#ifndef AWS_CRASH_UPLOAD_BUCKET_KEY -#define AWS_CRASH_UPLOAD_BUCKET_KEY "KEY" -#endif - -#define GET_KEY []() { return AWS_CRASH_UPLOAD_BUCKET_KEY; }() - const std::wstring appStateFileName = L"\\appState"; std::wstring appCachePath = L""; @@ -472,71 +475,149 @@ std::condition_variable upload_variable; long long total_sent_amout = 0; std::chrono::steady_clock::time_point last_progress_update; std::unique_ptr s3_client_ptr; +bool upload_aborted = false; void PutObjectAsyncFinished(const Aws::S3::S3Client *s3Client, const Aws::S3::Model::PutObjectRequest &request, const Aws::S3::Model::PutObjectOutcome &outcome, const std::shared_ptr &context) { if (outcome.IsSuccess()) { log_info << "PutObjectAsyncFinished: Finished uploading '" << context->GetUUID() << "'." << std::endl; + log_info << "RequestId=" << outcome.GetResult().GetRequestId() << ", ETag=" << outcome.GetResult().GetETag() << std::endl; } else { total_sent_amout = -1; - log_error << "PutObjectAsyncFinished failed " << outcome.GetError().GetMessage() << std::endl; + const auto &err = outcome.GetError(); + log_error << "PutObjectAsyncFinished failed | Message=" << err.GetMessage() << ", ExceptionName=" << err.GetExceptionName() + << ", ResponseCode=" << (int)err.GetResponseCode() << ", Retryable=" << (err.ShouldRetry() ? "true" : "false") << std::endl; } upload_variable.notify_one(); } -bool PutObjectAsync(const Aws::S3::S3Client &s3_client, const Aws::String &bucket_name, const std::wstring &file_path, const std::wstring &file_name) +bool UploadFileMultipart(const Aws::S3::S3Client &s3_client, const Aws::String &bucket_name, const std::wstring &file_path, const std::wstring &file_name) { - log_info << "PutObjectAsync started " << std::endl; - Aws::S3::Model::PutObjectRequest request; - request.SetDataSentEventHandler([](const Aws::Http::HttpRequest *, long long amount) { - total_sent_amout += amount; - - std::chrono::steady_clock::time_point now_time = std::chrono::steady_clock::now(); - if (std::chrono::duration_cast(now_time - last_progress_update).count() > 500) { - last_progress_update = now_time; - UploadWindow::getInstance()->setUploadProgress(total_sent_amout); - } - }); + const size_t PART_SIZE = 10 * 1024 * 1024; // 10MB parts + Aws::String key = Aws::String("crash_memory_dumps/") + Aws::String(std::string(file_name.begin(), file_name.end())); - request.SetBucket(bucket_name); - - Aws::String aws_file_name = Aws::String(std::string(file_name.begin(), file_name.end())); - request.SetKey(Aws::String("crash_memory_dumps/") + aws_file_name); - - std::shared_ptr input_data; - std::fstream *fs = new std::fstream(); std::filesystem::path uploaded_file = file_path; uploaded_file.append(file_name); - input_data.reset(fs); - try { - fs->exceptions(std::fstream::failbit | std::fstream::badbit); - fs->open(uploaded_file, std::ios_base::in | std::ios_base::binary); - } catch (std::fstream::failure f) { - log_info << "PutObjectAsync failed open file " << uploaded_file.generic_string() << std::endl; + + std::ifstream file(uploaded_file, std::ios::binary | std::ios::ate); + if (!file.is_open()) { + log_error << "Failed to open file for multipart upload: " << uploaded_file.generic_string() << std::endl; return false; - } catch (std::exception e) { - log_info << "PutObjectAsync failed open file " << uploaded_file.generic_string() << std::endl; + } + + size_t file_size = file.tellg(); + file.seekg(0, std::ios::beg); + log_info << "Multipart upload: file size=" << file_size << " bytes, part size=" << PART_SIZE << std::endl; + + Aws::S3::Model::CreateMultipartUploadRequest create_request; + create_request.SetBucket(bucket_name); + create_request.SetKey(key); + create_request.SetContentType("application/zip"); + + auto create_outcome = s3_client.CreateMultipartUpload(create_request); + if (!create_outcome.IsSuccess()) { + log_error << "Failed to initiate multipart upload: " << create_outcome.GetError().GetMessage() << std::endl; + file.close(); + return false; + } + + Aws::String upload_id = create_outcome.GetResult().GetUploadId(); + log_info << "Multipart upload initiated: uploadId=" << std::string(upload_id.c_str()) << std::endl; + + Aws::Vector completed_parts; + int part_number = 1; + total_sent_amout = 0; + + while (file && !upload_aborted) { + std::vector buffer(PART_SIZE); + file.read(buffer.data(), PART_SIZE); + std::streamsize bytes_read = file.gcount(); + + if (bytes_read == 0) + break; + + log_info << "Uploading part " << part_number << ", size=" << bytes_read << " bytes" << std::endl; + + auto stream = Aws::MakeShared("UploadPartStream"); + stream->write(buffer.data(), bytes_read); + + Aws::S3::Model::UploadPartRequest part_request; + part_request.SetBucket(bucket_name); + part_request.SetKey(key); + part_request.SetUploadId(upload_id); + part_request.SetPartNumber(part_number); + part_request.SetBody(stream); + part_request.SetContentLength(bytes_read); + + auto part_outcome = s3_client.UploadPart(part_request); + if (!part_outcome.IsSuccess()) { + log_error << "Failed to upload part " << part_number << ": " << part_outcome.GetError().GetMessage() << std::endl; + + Aws::S3::Model::AbortMultipartUploadRequest abort_request; + abort_request.SetBucket(bucket_name); + abort_request.SetKey(key); + abort_request.SetUploadId(upload_id); + s3_client.AbortMultipartUpload(abort_request); + + file.close(); + return false; + } + + Aws::S3::Model::CompletedPart completed_part; + completed_part.SetPartNumber(part_number); + completed_part.SetETag(part_outcome.GetResult().GetETag()); + completed_parts.push_back(completed_part); + + total_sent_amout += bytes_read; + UploadWindow::getInstance()->setUploadProgress(total_sent_amout); + + part_number++; + } + + file.close(); + + if (upload_aborted) { + log_info << "Upload aborted by user" << std::endl; + Aws::S3::Model::AbortMultipartUploadRequest abort_request; + abort_request.SetBucket(bucket_name); + abort_request.SetKey(key); + abort_request.SetUploadId(upload_id); + s3_client.AbortMultipartUpload(abort_request); return false; } - fs->exceptions(std::fstream::goodbit); - request.SetBody(input_data); + Aws::S3::Model::CompletedMultipartUpload completed_upload; + completed_upload.SetParts(completed_parts); + + Aws::S3::Model::CompleteMultipartUploadRequest complete_request; + complete_request.SetBucket(bucket_name); + complete_request.SetKey(key); + complete_request.SetUploadId(upload_id); + complete_request.SetMultipartUpload(completed_upload); + + auto complete_outcome = s3_client.CompleteMultipartUpload(complete_request); + if (!complete_outcome.IsSuccess()) { + log_error << "Failed to complete multipart upload: " << complete_outcome.GetError().GetMessage() << std::endl; - log_info << "PutObjectAsync ready to call PutObjectAsync " << std::endl; - std::shared_ptr context = Aws::MakeShared("PutObjectAllocationTag"); - context->SetUUID(request.GetKey()); - s3_client.PutObjectAsync(request, PutObjectAsyncFinished, context); - log_info << "PutObjectAsync finished. Wait for async result." << std::endl; + Aws::S3::Model::AbortMultipartUploadRequest abort_request; + abort_request.SetBucket(bucket_name); + abort_request.SetKey(key); + abort_request.SetUploadId(upload_id); + s3_client.AbortMultipartUpload(abort_request); + + return false; + } + log_info << "Multipart upload completed successfully, ETag=" << std::string(complete_outcome.GetResult().GetETag().c_str()) << std::endl; return true; } void Util::abortUploadAWS() { std::lock_guard grd(s3_mutex); - + upload_aborted = true; if (s3_client_ptr != nullptr) s3_client_ptr->DisableRequestProcessing(); } @@ -545,15 +626,17 @@ bool Util::uploadToAWS(const std::wstring &wspath, const std::wstring &fileName) { UploadWindow::getInstance()->uploadStarted(); bool ret = false; + upload_aborted = false; Aws::SDKOptions options; + options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Warn; + options.loggingOptions.logger_create_fn = []() { + return Aws::MakeShared("AWSLog", Aws::Utils::Logging::LogLevel::Warn); + }; Aws::InitAPI(options); { const Aws::String bucket_name = "streamlabs-obs-user-cache"; const Aws::String region = "us-west-2"; - const Aws::String accessIDKey = "AKIAIAINC32O7I3KUJGQ"; - const Aws::String Key = GET_KEY; - - std::unique_lock lock(upload_mutex); + const Aws::String identityPoolId = "us-west-2:d5badad4-ff41-4a80-8677-e2757ab32263"; Aws::Client::ClientConfiguration config; @@ -563,30 +646,35 @@ bool Util::uploadToAWS(const std::wstring &wspath, const std::wstring &fileName) config.scheme = Aws::Http::Scheme::HTTPS; config.verifySSL = true; config.followRedirects = Aws::Client::FollowRedirectsPolicy::NEVER; - Aws::Auth::AWSCredentials aws_credentials; - aws_credentials.SetAWSAccessKeyId(accessIDKey); - aws_credentials.SetAWSSecretKey(Key); + config.enableTcpKeepAlive = true; + config.maxConnections = 25; + config.lowSpeedLimit = 1; + config.connectTimeoutMs = 30000; + config.requestTimeoutMs = 600000; + config.httpLibOverride = Aws::Http::TransferLibType::WIN_HTTP_CLIENT; + log_info << "AWS ClientConfig | region=" << std::string(config.region.c_str()) << ", connectTimeoutMs=" << config.connectTimeoutMs + << ", requestTimeoutMs=" << config.requestTimeoutMs + << ", httpLibOverride=WIN_HTTP, tcpKeepAlive=" << (config.enableTcpKeepAlive ? "true" : "false") << std::endl; + + auto cognitoClient = Aws::MakeShared("CognitoClient", config); + auto credentialsProvider = + Aws::MakeShared("CognitoProvider", identityPoolId, cognitoClient); { std::lock_guard grd(s3_mutex); - s3_client_ptr = - std::make_unique(aws_credentials, config, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, false); + s3_client_ptr = std::make_unique(credentialsProvider, config, + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::RequestDependent, false); } - log_info << "Upload to ASW ready to start upload" << std::endl; - if (!PutObjectAsync(*s3_client_ptr, bucket_name, wspath, fileName)) { - log_info << "Upload to ASW PutObjectAsync failed" << std::endl; + log_info << "Upload to ASW ready to start multipart upload" << std::endl; + if (!UploadFileMultipart(*s3_client_ptr, bucket_name, wspath, fileName)) { + log_info << "Upload to ASW multipart upload failed" << std::endl; UploadWindow::getInstance()->uploadFailed(); } else { - upload_variable.wait(lock); - if (total_sent_amout > 0) { - log_info << "Upload to ASW File upload attempt completed." << std::endl; - UploadWindow::getInstance()->uploadFinished(); - ret = true; - } else { - UploadWindow::getInstance()->uploadFailed(); - } + log_info << "Upload to ASW multipart upload completed successfully." << std::endl; + UploadWindow::getInstance()->uploadFinished(); + ret = true; } } diff --git a/package.json b/package.json index a429544..f6eed73 100644 --- a/package.json +++ b/package.json @@ -5,5 +5,6 @@ "node-addon-api": "^7.1.1", "path": "^0.12.7", "shelljs": "^0.8.5" - } + }, + "packageManager": "yarn@4.9.1+sha512.f95ce356460e05be48d66401c1ae64ef84d163dd689964962c6888a9810865e39097a5e9de748876c2e0bf89b232d583c33982773e9903ae7a76257270986538" } From 1a32fb35e15b1d75871de156bbce982ab08287c4 Mon Sep 17 00:00:00 2001 From: Vladimir Sumarov Date: Wed, 10 Dec 2025 15:42:45 -0800 Subject: [PATCH 2/2] revert yarn change --- package.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/package.json b/package.json index f6eed73..a429544 100644 --- a/package.json +++ b/package.json @@ -5,6 +5,5 @@ "node-addon-api": "^7.1.1", "path": "^0.12.7", "shelljs": "^0.8.5" - }, - "packageManager": "yarn@4.9.1+sha512.f95ce356460e05be48d66401c1ae64ef84d163dd689964962c6888a9810865e39097a5e9de748876c2e0bf89b232d583c33982773e9903ae7a76257270986538" + } }