From 076d0888051047782299fba5401652579d86f003 Mon Sep 17 00:00:00 2001 From: "azure-pipelines[bot]" <36771401+azure-pipelines[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 16:34:36 +0200 Subject: [PATCH] update canton to 20241003.14216.vfdbf1885 (#20055) Co-authored-by: Azure Pipelines Daml Build Co-authored-by: Remy Haemmerle --- sdk/canton/BUILD.bazel | 3 - .../canton/config/CantonConfig.scala | 2 +- .../config/CommunityConfigValidations.scala | 9 +- .../app/src/pack/config/participant.conf | 2 +- .../src/main/resources/rewrite-appender.xml | 29 - .../canton/concurrent/Threading.scala | 4 +- .../canton/config/CantonRequireTypes.scala | 114 +-- .../canton/config/StorageConfig.scala | 24 +- .../digitalasset/canton/crypto/Nonce.scala | 4 +- .../digitalasset/canton/crypto/Random.scala | 2 + .../store/db/DbCryptoPrivateStore.scala | 10 +- .../crypto/store/db/DbCryptoPublicStore.scala | 6 +- .../digitalasset/canton/protocol/Tags.scala | 7 - .../messages/AcsCommitmentApiHelpers.scala | 4 +- .../canton/pruning/PruningPhase.scala | 8 +- .../resource/DbExceptionRetryPolicy.scala | 49 +- .../canton/resource/DbVersionCheck.scala | 95 -- .../canton/resource/IdempotentInsert.scala | 47 +- .../canton/resource/Storage.scala | 111 +-- .../authentication/AuthenticationToken.scala | 4 +- .../store/db/DbCursorPreheadStore.scala | 51 +- .../store/db/DbIndexedStringStore.scala | 10 +- .../canton/store/db/DbPrunableByTime.scala | 28 +- .../store/db/DbSequencedEventStore.scala | 43 +- .../client/CachingDomainTopologyClient.scala | 2 +- .../canton/topology/store/TopologyStore.scala | 14 +- .../store/db/DbPartyMetadataStore.scala | 2 +- .../topology/store/db/DbTopologyStore.scala | 33 +- .../src/main/daml/CantonExamples/daml.yaml | 2 +- .../db/migration/canton/h2/dev/V999__dev.sql | 2 +- .../canton/h2/stable/V1_1__initial.sql | 158 ++-- .../canton/postgres/dev/V999__dev.sql | 2 +- .../canton/postgres/stable/V1_1__initial.sql | 162 ++-- .../postgres/stable/V1_2__initial_views.sql | 47 +- .../store/db/DbBulkUpdateProcessor.scala | 23 +- .../store/db/DbPruningSchedulerStore.scala | 2 +- .../store/db/DbSendTrackerStore_Unused.scala | 20 +- .../src/test/resources/logback-test.xml | 2 - .../com/digitalasset/canton/Generators.scala | 2 +- .../canton/SequentialTestByKey.scala | 1 - .../store/db/DatabaseDeadlockTest.scala | 2 - .../store/db/DbIndexedStringsStoreTest.scala | 8 +- .../demo/src/main/daml/ai-analysis/daml.yaml | 2 +- .../demo/src/main/daml/bank/daml.yaml | 2 +- .../demo/src/main/daml/doctor/daml.yaml | 2 +- .../src/main/daml/health-insurance/daml.yaml | 2 +- .../src/main/daml/medical-records/daml.yaml | 2 +- .../domain/block/LedgerBlockEvent.scala | 13 +- .../block/data/db/DbSequencerBlockStore.scala | 18 +- .../block/update/BlockChunkProcessor.scala | 85 +- .../block/update/BlockUpdateGenerator.scala | 30 +- .../update/SubmissionRequestValidator.scala | 2 +- .../update/TrafficControlValidator.scala | 14 +- .../canton/domain/mediator/Mediator.scala | 16 +- .../DbMediatorDomainConfigurationStore.scala | 20 - .../store/FinalizedResponseStore.scala | 22 +- .../domain/metrics/BftOrderingMetrics.scala | 1 - .../state/DbSequencerStateManagerStore.scala | 39 +- .../sequencer/SequencerFactory.scala | 1 - .../sequencer/SequencerWriterConfig.scala | 11 - .../sequencer/WriterStartupError.scala | 2 +- .../sequencer/block/BlockSequencer.scala | 2 +- .../DbSequencerDomainConfigurationStore.scala | 17 - .../sequencer/store/DbSequencerStore.scala | 191 +--- .../sequencer/store/SequencerStore.scala | 4 +- .../HasTopologyTransactionTestFactory.scala | 131 +++ .../update/BlockChunkProcessorTest.scala | 149 +++ .../update/BlockUpdateGeneratorImplTest.scala | 144 +++ .../sequencer/DatabaseSequencerApiTest.scala | 1 - .../DatabaseSequencerSnapshottingTest.scala | 1 - .../sequencing/sequencer/SequencerTest.scala | 1 - .../sequencer/block/BlockSequencerTest.scala | 2 +- .../store/DbSequencerStoreTest.scala | 6 - .../store/DbReferenceBlockOrderingStore.scala | 8 +- .../store/ReferenceBlockOrderingStore.scala | 2 +- .../ReferenceBlockOrderingStoreTest.scala | 2 +- .../canton/integration/HasCycleUtils.scala | 1 - .../platform/index/InMemoryStateUpdater.scala | 2 +- .../store/backend/UpdateToDbDto.scala | 881 ++++++++++-------- .../index/InMemoryStateUpdaterSpec.scala | 320 ++++--- .../src/main/daml/carbonv1/daml.yaml | 2 +- .../src/main/daml/carbonv2/daml.yaml | 2 +- .../src/main/daml/experimental/daml.yaml | 2 +- .../src/main/daml/model/daml.yaml | 2 +- .../ongoing_stream_package_upload/daml.yaml | 2 +- .../main/daml/package_management/daml.yaml | 2 +- .../src/main/daml/semantic/daml.yaml | 2 +- .../src/main/daml/upgrade/1.0.0/daml.yaml | 2 +- .../src/main/daml/upgrade/2.0.0/daml.yaml | 2 +- .../src/main/daml/upgrade/3.0.0/daml.yaml | 2 +- .../canton/http/EndpointsCompanion.scala | 15 +- .../daml/damldefinitionsservice/dep/daml.yaml | 2 +- .../damldefinitionsservice/main/daml.yaml | 2 +- .../src/test/daml/v2_1/daml.yaml | 2 +- .../src/test/daml/v2_dev/daml.yaml | 2 +- .../value/json/ApiCodecCompressedSpec.scala | 13 +- .../com/digitalasset/canton/SlickString.scala | 76 -- .../digitalasset/canton/SlickStringTest.scala | 91 -- .../participant/src/main/daml/daml.yaml | 2 +- .../canton/participant/ParticipantNode.scala | 5 +- .../admin/grpc/GrpcInspectionService.scala | 16 +- .../inspection/SyncStateInspection.scala | 22 +- .../admin/repair/RepairService.scala | 54 +- .../config/LocalParticipantConfig.scala | 50 - .../domain/DomainRegistryHelpers.scala | 5 +- .../domain/grpc/GrpcDomainRegistry.scala | 7 +- .../event/RecordOrderPublisher.scala | 15 +- .../ledger/api/LedgerApiJdbcUrl.scala | 5 - .../ledger/api/client/ValueRemapper.scala | 81 -- .../canton/participant/package.scala | 18 - .../protocol/MessageDispatcher.scala | 2 +- .../protocol/ProtocolProcessor.scala | 4 +- .../conflictdetection/LockableState.scala | 2 - .../conflictdetection/LockableStates.scala | 2 +- .../ReassignmentProcessingSteps.scala | 2 +- .../protocol/submission/ChangeId.scala | 1 - .../submission/CommandDeduplicator.scala | 8 +- .../submission/InFlightSubmission.scala | 16 +- .../InFlightSubmissionTracker.scala | 2 +- .../validation/RecipientsValidator.scala | 2 +- .../pruning/AcsCommitmentProcessor.scala | 68 +- .../pruning/PruningProcessor.scala | 2 +- .../store/ActiveContractStore.scala | 22 +- .../participant/store/DamlLfSerializers.scala | 110 --- .../participant/store/DamlPackageStore.scala | 1 - .../store/DomainConnectionConfigStore.scala | 1 - .../store/InFlightSubmissionStore.scala | 3 - .../ParticipantNodePersistentState.scala | 1 - .../participant/store/ReassignmentStore.scala | 6 +- .../store/SyncDomainEphemeralState.scala | 6 +- .../SyncDomainEphemeralStateFactory.scala | 6 +- .../store/SyncDomainPersistentState.scala | 2 +- .../store/db/DbAcsCommitmentStore.scala | 239 ++--- .../store/db/DbActiveContractStore.scala | 276 ++---- .../db/DbCommandDeduplicationStore.scala | 50 +- .../store/db/DbContractStore.scala | 150 +-- .../store/db/DbDamlPackageStore.scala | 79 +- .../db/DbDomainConnectionConfigStore.scala | 17 +- .../store/db/DbDomainParameterStore.scala | 15 +- .../store/db/DbInFlightSubmissionStore.scala | 100 +- .../DbParticipantPruningSchedulerStore.scala | 17 - .../store/db/DbParticipantPruningStore.scala | 10 +- .../store/db/DbParticipantSettingsStore.scala | 24 +- .../store/db/DbReassignmentStore.scala | 503 ++++++---- .../store/db/DbRequestJournalStore.scala | 88 +- .../store/db/DbSubmissionTrackerStore.scala | 46 +- .../db/DbSyncDomainPersistentState.scala | 33 +- .../memory/InMemoryActiveContractStore.scala | 2 - .../InMemoryInFlightSubmissionStore.scala | 2 +- .../memory/InMemoryReassignmentStore.scala | 2 +- .../InMemorySyncDomainPersistentState.scala | 13 +- .../store/memory/ReassignmentCache.scala | 4 +- .../participant/sync/CantonSyncService.scala | 28 +- .../canton/participant/sync/SyncDomain.scala | 4 +- .../sync/SyncDomainMigration.scala | 4 +- .../SyncDomainPersistentStateManager.scala | 12 +- .../sync/TimelyRejectNotifier.scala | 24 +- .../sync/UpstreamOffsetConvert.scala | 17 +- .../participant/topology/PackageOps.scala | 2 +- .../ParticipantTopologyDispatcher.scala | 3 +- .../participant/admin/PackageOpsTest.scala | 2 +- .../admin/inspection/AcsInspectionTest.scala | 2 +- .../protocol/MessageDispatcherTest.scala | 6 +- .../ConflictDetectionHelpers.scala | 10 +- .../ConflictDetectorTest.scala | 17 +- .../NaiveRequestTrackerTest.scala | 5 +- .../pruning/AcsCommitmentProcessorTest.scala | 4 +- .../participant/store/ContractStoreTest.scala | 2 +- .../store/DamlLfSerializersTest.scala | 36 - .../store/ReassignmentStoreTest.scala | 137 +-- .../store/db/DbActiveContractStoreTest.scala | 5 +- .../store/db/DbContractStoreTest.scala | 9 +- .../store/db/DbDamlPackageStoreTest.scala | 2 - .../db/DbInFlightSubmissionStoreTest.scala | 2 - .../store/db/DbReassignmentStoreTest.scala | 5 + .../store/db/DbRequestJournalStoreTest.scala | 2 - .../ActiveContractStoreTestInMemory.scala | 2 +- .../store/memory/ReassignmentCacheTest.scala | 29 +- .../ReassignmentStoreTestInMemory.scala | 5 +- .../sync/TimelyRejectNotifierTest.scala | 20 +- .../canton/domain/block/BlockFormat.scala | 8 +- .../canton/domain/block/SequencerDriver.scala | 3 + .../canton/store/db/DbStorageSetup.scala | 10 - .../topology/TestingIdentityFactory.scala | 5 +- .../canton/tracing/TraceContext.scala | 2 + sdk/canton/ref | 2 +- 186 files changed, 2710 insertions(+), 3457 deletions(-) create mode 100644 sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/HasTopologyTransactionTestFactory.scala create mode 100644 sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/block/update/BlockChunkProcessorTest.scala create mode 100644 sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/block/update/BlockUpdateGeneratorImplTest.scala delete mode 100644 sdk/canton/community/lib/wartremover/src/main/scala/com/digitalasset/canton/SlickString.scala delete mode 100644 sdk/canton/community/lib/wartremover/src/test/scala/com/digitalasset/canton/SlickStringTest.scala delete mode 100644 sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/ValueRemapper.scala delete mode 100644 sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/package.scala delete mode 100644 sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlLfSerializers.scala delete mode 100644 sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/DamlLfSerializersTest.scala diff --git a/sdk/canton/BUILD.bazel b/sdk/canton/BUILD.bazel index a59794e76ede..a3f1f36dbf1c 100644 --- a/sdk/canton/BUILD.bazel +++ b/sdk/canton/BUILD.bazel @@ -226,7 +226,6 @@ scala_macro_library( ], unused_dependency_checker_mode = "error", deps = [ - "@maven//:com_typesafe_slick_slick_2_13", "@maven//:org_typelevel_cats_core_2_13", "@maven//:org_typelevel_cats_kernel_2_13", "@maven//:org_wartremover_wartremover_2_13_11", @@ -1003,12 +1002,10 @@ scala_library( "@maven//:com_google_protobuf_protobuf_java", "@maven//:com_h2database_h2", "@maven//:com_lihaoyi_pprint_2_13", - "@maven//:com_oracle_database_jdbc_ojdbc8", "@maven//:com_thesamet_scalapb_scalapb_runtime_2_13", "@maven//:com_typesafe_config", "@maven//:com_typesafe_scala_logging_scala_logging_2_13", "@maven//:com_typesafe_slick_slick_2_13", - "@maven//:com_zaxxer_HikariCP", "@maven//:dev_optics_monocle_core_2_13", "@maven//:dev_optics_monocle_macro_2_13", "@maven//:io_circe_circe_core_2_13", diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala index 87bf901435cb..3983c8e0c830 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala @@ -561,7 +561,7 @@ object CantonConfig { lazy implicit val lengthLimitedStringReader: ConfigReader[LengthLimitedString] = ConfigReader.fromString[LengthLimitedString] { str => Either.cond( - str.nonEmpty && str.length <= defaultMaxLength, + str.nonEmpty && str.length <= defaultMaxLength.unwrap, new LengthLimitedStringVar(str, defaultMaxLength)(), InvalidLengthString(str), ) diff --git a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala index 97b897805021..ebcc9580b2b0 100644 --- a/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala +++ b/sdk/canton/community/app-base/src/main/scala/com/digitalasset/canton/config/CommunityConfigValidations.scala @@ -98,11 +98,10 @@ object CommunityConfigValidations server <- getPropStr("serverName") port <- getPropInt("portNumber") dbName <- getPropStr("databaseName") - url = dbConfig match { - case _: H2DbConfig => DbConfig.h2Url(dbName) - case _: PostgresDbConfig => DbConfig.postgresUrl(server, port, dbName) - // Assume Oracle - case _ => DbConfig.oracleUrl(server, port, dbName) + url <- dbConfig match { + case _: H2DbConfig => Some(DbConfig.h2Url(dbName)) + case _: PostgresDbConfig => Some(DbConfig.postgresUrl(server, port, dbName)) + case other => throw new IllegalArgumentException(s"Unsupported DbConfig: $other") } } yield url diff --git a/sdk/canton/community/app/src/pack/config/participant.conf b/sdk/canton/community/app/src/pack/config/participant.conf index fff3266e4aee..dbdda38b5a41 100644 --- a/sdk/canton/community/app/src/pack/config/participant.conf +++ b/sdk/canton/community/app/src/pack/config/participant.conf @@ -38,7 +38,7 @@ canton.participants.participant { num-writes = 6 } max-connections = 18 - // Optional define the ledger-api jdbc URL directly (used for Oracle backends) + // Optional define the ledger-api jdbc URL directly ledger-api-jdbc-url = ${?_shared.storage.ledger-api-jdbc-url} } diff --git a/sdk/canton/community/base/src/main/resources/rewrite-appender.xml b/sdk/canton/community/base/src/main/resources/rewrite-appender.xml index 92744e6e7f68..ac00780ba08b 100644 --- a/sdk/canton/community/base/src/main/resources/rewrite-appender.xml +++ b/sdk/canton/community/base/src/main/resources/rewrite-appender.xml @@ -122,14 +122,6 @@ true - - - com.zaxxer.hikari.pool.HikariPool - INFO - Exception during pool initialization - true - - @@ -167,27 +159,6 @@ true - - - com.digitalasset.canton.platform.indexer.RecoveringIndexer - Error while running indexer, restart scheduled - INFO - true - - - com.digitalasset.canton.platform.store.appendonlydao.DbDispatcher - Processing the request failed due to a non-transient database error: ORA-00001: unique constraint - ORA-00001: unique constraint - INFO - true - com.digitalasset.canton.platform.store.appendonlydao.DbDispatcher Processing the request failed due to a non-transient database error: Unique index or primary key violation diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/concurrent/Threading.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/concurrent/Threading.scala index 80386a97de63..562c1f38838b 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/concurrent/Threading.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/concurrent/Threading.scala @@ -13,7 +13,7 @@ import com.typesafe.scalalogging.Logger import java.util.concurrent.* import java.util.concurrent.atomic.AtomicInteger import java.util.function.Predicate -import scala.concurrent.{ExecutionContext, blocking} +import scala.concurrent.{ExecutionContextExecutor, blocking} /** Factories and utilities for dealing with threading. */ @@ -225,7 +225,7 @@ object Threading { } } - def directExecutionContext(logger: Logger): ExecutionContext = DirectExecutionContext( + def directExecutionContext(logger: Logger): ExecutionContextExecutor = DirectExecutionContext( logger ) diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/config/CantonRequireTypes.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/config/CantonRequireTypes.scala index 6a4c906a3c63..aa33d0fadf3b 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/config/CantonRequireTypes.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/config/CantonRequireTypes.scala @@ -11,7 +11,7 @@ import com.digitalasset.canton.ProtoDeserializationError.InvariantViolation as P import com.digitalasset.canton.checked import com.digitalasset.canton.config.CantonRequireTypes.InstanceName.InvalidInstanceName import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.InvalidLengthString -import com.digitalasset.canton.config.RequireTypes.InvariantViolation +import com.digitalasset.canton.config.RequireTypes.{InvariantViolation, NonNegativeInt, PositiveInt} import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.store.db.DbDeserializationException @@ -71,7 +71,7 @@ object CantonRequireTypes { * * Must not be confused with storage space, which can be up to 4*[[maxLength]] in a UTF8 encoding */ - def maxLength: Int + def maxLength: PositiveInt // optionally give a name for the type of String you are attempting to validate for nicer error messages protected def name: Option[String] = None @@ -91,7 +91,7 @@ object CantonRequireTypes { override def hashCode(): Int = str.hashCode() require( - str.length <= maxLength, + str.length <= maxLength.unwrap, s"The given ${name.getOrElse("string")} has a maximum length of $maxLength but a ${name .getOrElse("string")} of length ${str.length} ('$str') was given", ) @@ -108,43 +108,27 @@ object CantonRequireTypes { * Classes implementing this trait expose `create` and `tryCreate` methods to safely (and non-safely) construct * such a String. * - * The canonical use case for [[LengthLimitedString]]s is ensuring that we don't write too long strings into the database: - * Oracle has a length-limit of 1000 Unicode characters for the ordinary String type `NVARCHAR2` and we are trying to avoid - * the use of CLOB (as it has pitfalls regarding implicits). + * The canonical use case for [[LengthLimitedString]]s is ensuring that we don't write too long strings into the database. * This validation generally occurs on the server side and not on the client side. Concretely, this means that the * Admin API and Ledger API gRPC services is the point where we validate that the received Protobuf Strings are not too long * (and convert them into [[LengthLimitedString]]s). On the client side, e.g. at the console, we generally take normal String types. * The console command `set_display_name` and service [[com.digitalasset.canton.participant.admin.grpc.GrpcPartyNameManagementService]] * validating `request.displayName` illustrate this. * - * As a rule of thumb: whenever you want to create a column that uses a NVARCHAR2 in Oracle, the value you write to - * it should use a LengthLimitedString. - * - * Some more background on the Oracle issues: - * NVARCHAR and NVARCHAR2 have both by default a 4000 byte limit, but unicode uses 4-bytes per character (and nvarchar2 uses unicode) - * Therefore, NVARCHAR has a limit of 4000 and NVARCHAR2 has a limit of 1000 characters - * If need be, we can extend this to 32 KB by setting the Oracle database string size to 'extended mode' (ALTER SYSTEM SET MAX_STRING_SIZE=EXTENDED) - * * For longer strings, directly inherit from [[AbstractLengthLimitedString]]. */ sealed trait LengthLimitedString extends AbstractLengthLimitedString { - require( - maxLength > 0 && maxLength <= LengthLimitedString.maxOracleStringLength, - s"MaxLength needs to be positive and smaller equal than ${LengthLimitedString.maxOracleStringLength} but was $maxLength", - ) - def tryConcatenate(that: LengthLimitedString): LengthLimitedStringVar = new LengthLimitedStringVar(this.unwrap + that.unwrap, this.maxLength + that.maxLength)() def tryConcatenate(that: String): LengthLimitedStringVar = - new LengthLimitedStringVar(this.unwrap + that, this.maxLength + that.length)() + new LengthLimitedStringVar( + this.unwrap + that, + this.maxLength + NonNegativeInt.tryCreate(that.length), + )() } object LengthLimitedString { - // Max length of unicode strings we can save as String types in Oracle columns - this can be increased to - // 1000 for NVARCHAR2 but we set it to 300 for now since we don't need a higher limit and rather want to stay on the - // conservative side - val maxOracleStringLength = 300 // In general, if you would create a case class that would simply wrap a `LengthLimitedString`, use a type alias instead // Some very frequently-used classes (like `Identifier` or `DomainAlias`) are however given their 'own' case class // despite essentially being a wrapper around `LengthLimitedString255` (because the documentation UX is nicer this way, @@ -153,31 +137,34 @@ object CantonRequireTypes { type TopologyRequestId = String255 type DarName = String255 - def errorMsg(tooLongStr: String, maxLength: Int, name: Option[String] = None): String = + def errorMsg(tooLongStr: String, maxLength: PositiveInt, name: Option[String] = None): String = s"The given ${name.getOrElse("string")} has a maximum length of $maxLength but a ${name - .getOrElse("string")} of length ${tooLongStr.length} ('${tooLongStr.limit(maxLength + 50)}.') was given" + .getOrElse("string")} of length ${tooLongStr.length} ('${tooLongStr.limit(maxLength.unwrap + 50)}.') was given" - val defaultMaxLength = 255 + val defaultMaxLength: PositiveInt = PositiveInt.tryCreate(255) - def tryCreate(str: String, maxLength: Int, name: Option[String] = None): LengthLimitedString = + def tryCreate( + str: String, + maxLength: PositiveInt, + name: Option[String] = None, + ): LengthLimitedString = new LengthLimitedStringVar(str, maxLength)(name) def getUuid: String36 = String36.tryCreate(UUID.randomUUID().toString) def create( str: String, - maxLength: Int, + maxLength: PositiveInt, name: Option[String] = None, ): Either[String, LengthLimitedString] = Either.cond( - str.length <= maxLength, + str.length <= maxLength.unwrap, new LengthLimitedStringVar(str, maxLength)(name), errorMsg(str, maxLength, name), ) // Should be used rarely - most of the time SetParameter[String255] etc. // (defined through LengthLimitedStringCompanion) should be used - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) implicit val setParameterLengthLimitedString: SetParameter[LengthLimitedString] = (v, pp) => pp.setString(v.unwrap) // Commented out so this function never accidentally throws @@ -201,12 +188,12 @@ object CantonRequireTypes { final case class String1(str: String)(override val name: Option[String] = None) extends LengthLimitedString { - override def maxLength: Int = String1.maxLength + override def maxLength: PositiveInt = String1.maxLength } object String1 extends LengthLimitedStringCompanion[String1] { def fromChar(c: Char): String1 = checked(new String1(c.toString)(None)) - override def maxLength: Int = 1 + override def maxLength: PositiveInt = PositiveInt.one override protected def factoryMethod(str: String)(name: Option[String]): String1 = new String1(str)(name) @@ -215,11 +202,11 @@ object CantonRequireTypes { /** Limit used for enum names. */ final case class String3(str: String)(override val name: Option[String] = None) extends LengthLimitedString { - override def maxLength: Int = String3.maxLength + override def maxLength: PositiveInt = String3.maxLength } object String3 extends LengthLimitedStringCompanion[String3] { - override def maxLength: Int = 3 + override def maxLength: PositiveInt = PositiveInt.tryCreate(3) override protected def factoryMethod(str: String)(name: Option[String]): String3 = new String3(str)(name) @@ -228,13 +215,13 @@ object CantonRequireTypes { /** Limit used by a UUID. */ final case class String36(str: String)(override val name: Option[String] = None) extends LengthLimitedString { - override def maxLength: Int = String36.maxLength + override def maxLength: PositiveInt = String36.maxLength def asString255: String255 = String255.tryCreate(str, name) } object String36 extends LengthLimitedStringCompanion[String36] { - override def maxLength: Int = 36 + override def maxLength: PositiveInt = PositiveInt.tryCreate(36) override protected def factoryMethod(str: String)(name: Option[String]): String36 = new String36(str)(name) @@ -246,11 +233,11 @@ object CantonRequireTypes { */ final case class String68(str: String)(override val name: Option[String] = None) extends LengthLimitedString { - override def maxLength: Int = String68.maxLength + override def maxLength: PositiveInt = String68.maxLength } object String68 extends LengthLimitedStringCompanion[String68] { - override def maxLength: Int = 68 + override def maxLength: PositiveInt = PositiveInt.tryCreate(68) override def factoryMethod(str: String)(name: Option[String]): String68 = new String68(str)(name) @@ -259,11 +246,11 @@ object CantonRequireTypes { /** Limit used by a [[com.digitalasset.canton.sequencing.protocol.MessageId]]. */ final case class String73(str: String)(override val name: Option[String] = None) extends LengthLimitedString { - override def maxLength: Int = String73.maxLength + override def maxLength: PositiveInt = String73.maxLength } object String73 extends LengthLimitedStringCompanion[String73] { - override def maxLength: Int = 73 + override def maxLength: PositiveInt = PositiveInt.tryCreate(73) override protected def factoryMethod(str: String)(name: Option[String]): String73 = new String73(str)(name) @@ -271,10 +258,10 @@ object CantonRequireTypes { final case class String100(str: String)(override val name: Option[String] = None) extends LengthLimitedString { - override def maxLength: Int = String100.maxLength + override def maxLength: PositiveInt = String100.maxLength } object String100 extends LengthLimitedStringCompanion[String100] { - override def maxLength: Int = 100 + override def maxLength: PositiveInt = PositiveInt.tryCreate(100) override protected def factoryMethod(str: String)(name: Option[String]): String100 = new String100(str)(name) } @@ -285,11 +272,11 @@ object CantonRequireTypes { */ final case class String185(str: String)(override protected val name: Option[String] = None) extends LengthLimitedString { - override def maxLength: Int = String185.maxLength + override def maxLength: PositiveInt = String185.maxLength } object String185 extends LengthLimitedStringCompanion[String185] { - override def maxLength: Int = 185 + override def maxLength: PositiveInt = PositiveInt.tryCreate(185) override def factoryMethod(str: String)(name: Option[String]): String185 = new String185(str)(name) @@ -302,14 +289,14 @@ object CantonRequireTypes { */ final case class String255(str: String)(override val name: Option[String] = None) extends LengthLimitedString { - override def maxLength: Int = String255.maxLength + override def maxLength: PositiveInt = String255.maxLength def asString300: String300 = new String300(str)(name) def asString1GB: String256M = new String256M(str)(name) } object String255 extends LengthLimitedStringCompanion[String255] { - override def maxLength = 255 + override def maxLength = PositiveInt.tryCreate(255) override def factoryMethod(str: String)(name: Option[String]): String255 = new String255(str)(name) @@ -324,11 +311,11 @@ object CantonRequireTypes { */ final case class String300(str: String)(override val name: Option[String] = None) extends LengthLimitedString { - override def maxLength: Int = String300.maxLength + override def maxLength: PositiveInt = String300.maxLength } object String300 extends LengthLimitedStringCompanion[String300] { - override def maxLength = 300 + override def maxLength = PositiveInt.tryCreate(300) override def factoryMethod(str: String)(name: Option[String]): String300 = new String300(str)(name) @@ -342,43 +329,38 @@ object CantonRequireTypes { * - Two separating dots * Each [[com.digitalasset.daml.lf.data.Ref.DottedName]] can have 1000 chars ([[com.digitalasset.daml.lf.data.Ref.DottedName.maxLength]]). * So a [[com.digitalasset.canton.protocol.LfTemplateId]] serializes to 1000 + 1000 + 64 + 2 = 2066 chars. - * - * 2066 is beyond the string size for Oracle's `NVARCHAR2` column type unless `max_string_size` is set to `extended`. - * Such strings may therefore be written into `VARCHAR2` columns using an encoding that does not exceed the 4000 bytes limit. - * UTF8 is such an encoding for ASCII-only strings, but we do not yet test that `str` really contains only ASCII characters. */ final case class String2066(str: String)(override val name: Option[String] = None) extends AbstractLengthLimitedString { - override def maxLength: Int = String2066.maxLength + override def maxLength: PositiveInt = String2066.maxLength } object String2066 extends LengthLimitedStringCompanion[String2066] { - override def maxLength: Int = 4000 + override def maxLength: PositiveInt = PositiveInt.tryCreate(2066) override protected def factoryMethod(str: String)(name: Option[String]): String2066 = new String2066(str)(name) } - /** Length limitation of a `TEXT` or unbounded `VARCHAR` field in postgres or `CLOB` in Oracle. + /** Length limitation of a `TEXT` or unbounded `VARCHAR` field in postgres. * - Postgres `TEXT` or `VARCHAR` support up to 1GB storage. That is at least `2 ^ 28` characters * in UTF8 encoding as each character needs at most 4 bytes. - * - Oracle `CLOB` supports up to 4GB storage, i.e., at least `2 ^ 30` UTF8 characters * - * `TEXT`/`VARCHAR`/`CLOB` are only used for the following values (none are indices): + * `TEXT`/`VARCHAR` are only used for the following values (none are indices): * - daml_packages.source_description * - topology_transactions.ignore_reason */ final case class String256M(str: String)(override val name: Option[String] = None) extends AbstractLengthLimitedString { - override def maxLength: Int = String256M.maxLength + override def maxLength: PositiveInt = String256M.maxLength } object String256M extends LengthLimitedStringCompanion[String256M] { - override def maxLength: Int = 0x10000000 + override def maxLength: PositiveInt = PositiveInt.tryCreate(0x10000000) override protected def factoryMethod(str: String)(name: Option[String]): String256M = new String256M(str)(name) } - final case class LengthLimitedStringVar(override val str: String, maxLength: Int)( + final case class LengthLimitedStringVar(override val str: String, maxLength: PositiveInt)( override val name: Option[String] = None ) extends LengthLimitedString object LengthLimitedStringVar { @@ -392,7 +374,7 @@ object CantonRequireTypes { val empty: A = checked(factoryMethod("")(None)) /** The maximum string length. Should not be overwritten with `val` to avoid initialization issues. */ - def maxLength: Int + def maxLength: PositiveInt /** Factory method for creating a string. * @throws java.lang.IllegalArgumentException if `str` is longer than [[maxLength]] @@ -401,7 +383,7 @@ object CantonRequireTypes { def create(str: String, name: Option[String] = None): Either[String, A] = Either.cond( - str.length <= maxLength, + str.length <= maxLength.unwrap, factoryMethod(str)(name), LengthLimitedString.errorMsg(str, maxLength, name), ) @@ -421,13 +403,11 @@ object CantonRequireTypes { implicit val encodeLengthLimitedString: Encoder[A] = Encoder.encodeString.contramap[A](_.unwrap) - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) implicit val setParameterLengthLimitedString: SetParameter[A] = (v, pp) => pp.setString(v.unwrap) implicit val getResultLengthLimitedString: GetResult[A] = GetResult(r => tryCreate(r.nextString())) - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) implicit val setParameterOptLengthLimitedString: SetParameter[Option[A]] = (v, pp) => pp.setStringOption(v.map(_.unwrap)) implicit val getResultOptLengthLimitedString: GetResult[Option[A]] = @@ -436,7 +416,7 @@ object CantonRequireTypes { implicit val lengthLimitedStringReader: ConfigReader[A] = ConfigReader.fromString[A] { str => Either.cond( - str.nonEmpty && str.length <= maxLength, + str.nonEmpty && str.length <= maxLength.unwrap, factoryMethod(str)(None), InvalidLengthString(str), ) @@ -500,7 +480,6 @@ object CantonRequireTypes { Encoder.encodeString.contramap[Wrapper](_.unwrap) // Instances for slick (db) queries - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) implicit val setParameterWrapper: SetParameter[Wrapper] = (v, pp) => pp.setString(v.toProtoPrimitive) implicit val getResultWrapper: GetResult[Wrapper] = GetResult(r => @@ -509,7 +488,6 @@ object CantonRequireTypes { ) ) - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) implicit val setParameterOptionWrapper: SetParameter[Option[Wrapper]] = (v, pp) => pp.setStringOption(v.map(_.toProtoPrimitive)) implicit val getResultOptionWrapper: GetResult[Option[Wrapper]] = GetResult { r => diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/config/StorageConfig.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/config/StorageConfig.scala index edcd1bb37a91..2f1c12758dd2 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/config/StorageConfig.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/config/StorageConfig.scala @@ -21,7 +21,7 @@ import scala.jdk.CollectionConverters.* * @param connectionAllocation Overrides for the sizes of the connection pools managed by a canton node. * @param failFastOnStartup If true, the node will fail-fast when the database cannot be connected to * If false, the node will wait indefinitely for the database to come up - * @param migrationsPaths Where should database migrations be read from. Enables specialized DDL for different database servers (e.g. Postgres, Oracle). + * @param migrationsPaths Where should database migrations be read from. Enables specialized DDL for different database servers (e.g. Postgres). * @param connectionTimeout How long to wait for acquiring a database connection * @param warnOnSlowQuery Optional time when we start logging a query as slow. * @param warnOnSlowQueryInterval How often to repeat the logging statement for slow queries. @@ -65,7 +65,7 @@ final case class DbParametersConfig( /** Various settings to control batching behaviour related to db queries * - * @param maxItemsInSqlClause maximum number of items to place in sql "in clauses" + * @param maxItemsInBatch maximum number of items in a batch * @param maxPruningBatchSize maximum number of events to prune from a participant at a time, used to break up canton participant-internal batches * @param ledgerApiPruningBatchSize Number of events to prune from the ledger api server index-database at a time during automatic background pruning. * Canton-internal store pruning happens at the smaller batch size of "maxPruningBatchSize" to minimize memory usage @@ -75,7 +75,7 @@ final case class DbParametersConfig( * @param aggregator batching configuration for DB queries */ final case class BatchingConfig( - maxItemsInSqlClause: PositiveNumeric[Int] = BatchingConfig.defaultMaxItemsInSqlClause, + maxItemsInBatch: PositiveNumeric[Int] = BatchingConfig.defaultMaxItemsBatch, maxPruningBatchSize: PositiveNumeric[Int] = BatchingConfig.defaultMaxPruningBatchSize, ledgerApiPruningBatchSize: PositiveNumeric[Int] = BatchingConfig.defaultLedgerApiPruningBatchSize, @@ -84,7 +84,7 @@ final case class BatchingConfig( ) object BatchingConfig { - private val defaultMaxItemsInSqlClause: PositiveInt = PositiveNumeric.tryCreate(100) + private val defaultMaxItemsBatch: PositiveInt = PositiveNumeric.tryCreate(100) private val defaultBatchingParallelism: PositiveInt = PositiveNumeric.tryCreate(8) private val defaultMaxPruningBatchSize: PositiveInt = PositiveNumeric.tryCreate(1000) private val defaultLedgerApiPruningBatchSize: PositiveInt = PositiveNumeric.tryCreate(50000) @@ -309,13 +309,10 @@ object DbConfig extends NoTracing { private val devDir = "dev" private val basePostgresMigrationsPath: String = "classpath:db/migration/canton/postgres/" private val baseH2MigrationsPath: String = "classpath:db/migration/canton/h2/" - private val baseOracleMigrationPath: String = "classpath:db/migration/canton/oracle/" val postgresMigrationsPathStable: String = basePostgresMigrationsPath + stableDir val h2MigrationsPathStable: String = baseH2MigrationsPath + stableDir - val oracleMigrationPathStable: String = baseOracleMigrationPath + stableDir val postgresMigrationsPathDev: String = basePostgresMigrationsPath + devDir val h2MigrationsPathDev: String = baseH2MigrationsPath + devDir - val oracleMigrationPathDev: String = baseOracleMigrationPath + devDir def postgresUrl(host: String, port: Int, dbName: String): String = s"jdbc:postgresql://$host:$port/$dbName" @@ -323,18 +320,6 @@ object DbConfig extends NoTracing { def h2Url(dbName: String): String = s"jdbc:h2:mem:$dbName;MODE=PostgreSQL;LOCK_TIMEOUT=10000;DB_CLOSE_DELAY=-1" - def oracleUrl(host: String, port: Int, dbName: String): String = - s"jdbc:oracle:thin:@$host:$port/$dbName" - - def oracleUrl( - host: String, - port: Int, - dbName: String, - username: String, - password: String, - ): String = - s"jdbc:oracle:thin:$username/$password@$host:$port/$dbName" - def toConfig(map: Map[String, Any]): Config = ConfigFactory.parseMap(map.asJava) /** Apply default values to the given db config @@ -390,7 +375,6 @@ object DbConfig extends NoTracing { enforcePgMode(enforceSingleConnection(writeH2UrlIfNotSet(h2.config))) ).withFallback(h2.defaultConfig) case postgres: PostgresDbConfig => postgres.config - // TODO(i11009): this other is a workaround for supporting oracle without referencing the oracle config case other => other.config }).withFallback(commonDefaults) } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Nonce.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Nonce.scala index 219504de25bf..6bae4bbaeb58 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Nonce.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Nonce.scala @@ -23,8 +23,6 @@ final case class Nonce private (private val bytes: ByteString) extends HasCrypto object Nonce { /** As of now, the database schemas can only handle nonces up to a length of 150 bytes. Thus the length of a [[Nonce]] should never exceed that. - * If we ever want to create a [[Nonce]] larger than that, we can increase it up to 500 bytes after which we are limited by Oracle length limits. - * See the documentation at [[com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString]] for more details. */ val length: Int = 20 @@ -33,7 +31,7 @@ object Nonce { implicit val getNonceResult: GetResult[Nonce] = GetResult { r => val hexString = r.nextString() - if (hexString.length > String300.maxLength) + if (hexString.length > String300.maxLength.unwrap) throw new DbDeserializationException( s"Base16-encoded authentication token of length ${hexString.length} exceeds allowed limit of ${String300.maxLength}." ) diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Random.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Random.scala index 493be6f4e16a..99a82fe62129 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Random.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/Random.scala @@ -64,4 +64,6 @@ object PseudoRandom { def randomUnsigned(maxValue: Int): Int = rand.between(0, maxValue) + def randomLong(n: Long): Long = rand.nextLong(n) + } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStore.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStore.scala index 2fd2f4b5843a..d948d8e76d36 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStore.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPrivateStore.scala @@ -86,17 +86,9 @@ class DbCryptoPrivateStore( private def insertKeyUpdate( key: StoredPrivateKey ): DbAction.WriteOnly[Int] = - storage.profile match { - case _: DbStorage.Profile.Oracle => - sqlu"""insert - /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( CRYPTO_PRIVATE_KEYS ( key_id ) ) */ - into common_crypto_private_keys (key_id, purpose, data, name, wrapper_key_id) - values (${key.id}, ${key.purpose}, ${key.data}, ${key.name}, ${key.wrapperKeyId})""" - case _ => - sqlu"""insert into common_crypto_private_keys (key_id, purpose, data, name, wrapper_key_id) + sqlu"""insert into common_crypto_private_keys (key_id, purpose, data, name, wrapper_key_id) values (${key.id}, ${key.purpose}, ${key.data}, ${key.name}, ${key.wrapperKeyId}) on conflict do nothing""" - } private def insertKey(key: StoredPrivateKey)(implicit traceContext: TraceContext diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPublicStore.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPublicStore.scala index b2eed8124e6a..7c7591e27b96 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPublicStore.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/crypto/store/db/DbCryptoPublicStore.scala @@ -64,9 +64,9 @@ class DbCryptoPublicStore( ): FutureUnlessShutdown[Unit] = storage.queryAndUpdateUnlessShutdown( IdempotentInsert.insertVerifyingConflicts( - storage, - "common_crypto_public_keys ( key_id )", - sql"common_crypto_public_keys (key_id, purpose, data, name) values (${key.id}, ${key.purpose}, $key, $name)", + sql"""insert into common_crypto_public_keys (key_id, purpose, data, name) + values (${key.id}, ${key.purpose}, $key, $name) + on conflict do nothing""".asUpdate, queryKey(key.id, key.purpose), )( existingKey => existingKey.publicKey == key && existingKey.name == name, diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/Tags.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/Tags.scala index 3f1c950d091f..b36d355bf45e 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/Tags.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/Tags.scala @@ -202,13 +202,6 @@ final case class ReassignmentId(sourceDomain: SourceDomainId, unassignmentTs: Ca } object ReassignmentId { - implicit val reassignmentIdGetResult: GetResult[ReassignmentId] = GetResult { r => - ReassignmentId( - SourceDomainId(GetResult[DomainId].apply(r)), - GetResult[CantonTimestamp].apply(r), - ) - } - def fromProtoV30(reassignmentIdP: v30.ReassignmentId): ParsingResult[ReassignmentId] = reassignmentIdP match { case v30.ReassignmentId(originDomainP, requestTimestampP) => diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/AcsCommitmentApiHelpers.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/AcsCommitmentApiHelpers.scala index 8dd586b6f8dd..29ae81a52a10 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/AcsCommitmentApiHelpers.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/protocol/messages/AcsCommitmentApiHelpers.scala @@ -14,13 +14,13 @@ import com.digitalasset.canton.topology.{DomainId, ParticipantId} import slick.jdbc.{GetResult, SetParameter} final case class DomainSearchCommitmentPeriod( - domain: IndexedDomain, + indexedDomain: IndexedDomain, fromExclusive: CantonTimestamp, toInclusive: CantonTimestamp, ) extends PrettyPrinting { override protected def pretty: Pretty[DomainSearchCommitmentPeriod] = prettyOfClass( - param("domainId", _.domain.domainId), + param("domainId", _.indexedDomain.domainId), param("fromExclusive", _.fromExclusive), param("toInclusive", _.toInclusive), ) diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/pruning/PruningPhase.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/pruning/PruningPhase.scala index 04e31a4253be..87e753f7f862 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/pruning/PruningPhase.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/pruning/PruningPhase.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.pruning -import com.digitalasset.canton.config.CantonRequireTypes.String100 import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.store.db.DbSerializationException @@ -11,10 +10,6 @@ import slick.jdbc.{GetResult, SetParameter} sealed trait PruningPhase extends Product with Serializable with PrettyPrinting { def kind: String - // lazy val so that `kind` is initialized first in the subclasses - final lazy val toDbPrimitive: String100 = - // The Oracle DB schemas set a limit of 100 characters, Postgres and H2 map it to an enum - String100.tryCreate(kind) def index: Int override protected def pretty: Pretty[PruningPhase] = prettyOfParam(_.kind.unquoted) @@ -41,8 +36,7 @@ object PruningPhase { implicit val getResultPruningPhase: GetResult[PruningPhase] = GetResult(r => PruningPhase.tryFromDbPrimitive(r.nextString())) - implicit val setParameterPruningPhase: SetParameter[PruningPhase] = (d, pp) => - pp >> d.toDbPrimitive + implicit val setParameterPruningPhase: SetParameter[PruningPhase] = (d, pp) => pp >> d.kind } final case class PruningStatus( diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbExceptionRetryPolicy.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbExceptionRetryPolicy.scala index 257682c52d22..c2c9c73e94b8 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbExceptionRetryPolicy.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbExceptionRetryPolicy.scala @@ -29,7 +29,7 @@ object DbExceptionRetryPolicy extends ExceptionRetryPolicy { * Main use case is a transient unique constraint violation due to racy merge statements. * Should go away after a very limited amount of retries. * - * Value determined empirically in UpsertTestOracle. + * Value determined empirically in the now removed UpsertTestOracle. * For single row inserts, 1 is sufficient. * For batched inserts, 3 was more than sufficient in the test. */ @@ -87,9 +87,8 @@ object DbExceptionRetryPolicy extends ExceptionRetryPolicy { } case _: SQLIntegrityConstraintViolationException => - // Both H2 and Oracle may fail with spurious constraint violations, due to racy implementation of the MERGE statements. + // H2 may fail with spurious constraint violations, due to racy implementation of the MERGE statements. // In H2, this may also occur because it does not properly implement the serializable isolation level. - // See UpsertTestOracle // See https://github.com/h2database/h2database/issues/2167 TransientErrorKind(spuriousTransientErrorMaxRetries) @@ -100,51 +99,9 @@ object DbExceptionRetryPolicy extends ExceptionRetryPolicy { // Handle SQLException and all classes that derive from it (e.g. java.sql.BatchUpdateException) // Note that if the exception is not known but has a cause, we'll base the retry on the cause case ex: SQLException => - val code = ex.getErrorCode - if (ex.getErrorCode == 1) { - // Retry on ORA-00001: unique constraint violated exception - TransientErrorKind(spuriousTransientErrorMaxRetries) - } else if (ex.getMessage == "Connection is closed") { + if (ex.getMessage == "Connection is closed") { // May fail with a "Connection is closed" message if the db has gone down TransientErrorKind() - } else if (ex.getErrorCode == 4021) { - // ORA timeout occurred while waiting to lock object - TransientErrorKind() - } else if (ex.getErrorCode == 54) { - // ORA timeout occurred while waiting to lock object or because NOWAIT has been set - // e.g. as part of truncate table - TransientErrorKind() - } else if (ex.getErrorCode == 60) { - // Deadlock - // See DatabaseDeadlockTestOracle - TransientErrorKind() - } else if ( - ex.getErrorCode == 604 && - List("ORA-08176", "ORA-08177").exists(ex.getMessage.contains) - ) { - // Oracle failure in a batch operation - // For Oracle, the `cause` is not always set properly for exceptions. This is a problem for batched queries. - // So, look through an exception's `message` to see if it contains a retryable problem. - TransientErrorKind() - } else if (ex.getErrorCode == 8176) { - // consistent read failure; rollback data not available - // Cause: Encountered data changed by an operation that does not generate rollback data - // Action: In read/write transactions, retry the intended operation. - TransientErrorKind() - } else if (ex.getErrorCode == 8177) { - // failure to serialize transaction with serializable isolation level - TransientErrorKind() - } else if (ex.getErrorCode == 17410) { - // No more data to read from socket, can be caused by network problems - TransientErrorKind(spuriousTransientErrorMaxRetries) - } else if (code == 17002) { - // This has been observed as either IO Error: Connection reset by peer or IO Error: Broken pipe - // when straight-up killing an Oracle database server (`kill -9 `) - TransientErrorKind() - } else if (code == 1088 || code == 1089 || code == 1090 || code == 1092) { - // Often observed for orderly Oracle shutdowns - // https://docs.oracle.com/en/database/oracle/oracle-database/19/errmg/ORA-00910.html#GUID-D9EBDFFA-88C6-4185-BD2C-E1B959A97274 - TransientErrorKind() } else if (ex.getCause != null) { logger.info("Unable to retry on exception, checking cause.") determineExceptionErrorKind(ex.getCause, logger) diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbVersionCheck.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbVersionCheck.scala index 0363b50fe561..34eda49ea484 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbVersionCheck.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/DbVersionCheck.scala @@ -74,101 +74,6 @@ object DbVersionCheck extends HasLoggerName { } yield () either.leftMap(DbMigrations.DatabaseVersionError) - case Profile.Oracle(jdbc) => - def checkOracleVersion(): Either[String, Unit] = { - - val expectedOracleVersion = 19 - val expectedOracleVersionPrefix = - " 19." // Leading whitespace is intentional, see the example bannerString - - // See https://docs.oracle.com/en/database/oracle/oracle-database/18/refrn/V-VERSION.html - val oracleVersionQuery = sql"select banner from v$$version".as[String].headOption - val stringO = timeouts.network.await(functionFullName)(db.run(oracleVersionQuery)) - stringO match { - case Some(bannerString) => - // An example `bannerString` is "Oracle Database 18c Express Edition Release 18.0.0.0.0 - Production" - if (bannerString.contains(expectedOracleVersionPrefix)) { - loggingContext.debug( - s"Check for oracle version $expectedOracleVersion passed: using $bannerString" - ) - Right(()) - } else { - Left(s"Expected Oracle version $expectedOracleVersion but got $bannerString") - } - case None => - Left(s"Database version check failed: could not read Oracle version") - } - } - - // Checks that the NLS parameter `param` is set to one of the `expected` strings - // - The DB setting must be set - // - The session setting may be empty - def checkNlsParameter( - param: String, - expected: Seq[String], - ): Either[String, Unit] = { - def prettyExpected: String = - if (expected.sizeIs == 1) expected(0) - else s"one of ${expected.mkString(", ")}" - - loggingContext.debug(s"Checking NLS parameter $param") - - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) - val queryDbSetting = - sql"SELECT value from nls_database_parameters where parameter=$param" - .as[String] - .headOption - val dbSettingO = - timeouts.network.await(functionFullName + s"-database-$param")(db.run(queryDbSetting)) - - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) - val querySessionSetting = - sql"SELECT value from nls_session_parameters where parameter=$param" - .as[String] - .headOption - val sessionSettingO = timeouts.network.await(functionFullName + s"-session-$param")( - db.run(querySessionSetting) - ) - - for { - // Require to find the setting for the database, but leave it optional for the session - dbSetting <- dbSettingO.toRight( - s"Oracle NLS database parameter $param is not set, but should be $prettyExpected" - ) - _ <- Either.cond( - expected.contains(dbSetting.toUpperCase), - loggingContext.debug(s"NLS database parameter $param is set to $dbSetting"), - s"Oracle NLS database parameter $param is $dbSetting, but should be $prettyExpected", - ) - - _ <- sessionSettingO.fold( - Either.right[String, Unit]( - loggingContext.debug(s"NLS session parameter $param is unset") - ) - ) { sessionSetting => - Either.cond( - expected.contains(sessionSetting.toUpperCase), - loggingContext.debug(s"NLS session parameter $param is set to $sessionSetting"), - s"Oracle NLS session parameter $param is $sessionSetting, but should be $prettyExpected", - ) - } - } yield () - } - - // Check the NLS settings of the database so that Oracle uses the expected encodings and collations for - // string fields in tables. - def checkOracleNlsSetting(): Either[String, Unit] = - for { - _ <- checkNlsParameter("NLS_CHARACTERSET", Seq("AL32UTF8")) - _ <- checkNlsParameter("NLS_NCHAR_CHARACTERSET", Seq("AL32UTF8", "AL16UTF16")) - _ <- checkNlsParameter("NLS_SORT", Seq("BINARY")) - _ <- checkNlsParameter("NLS_COMP", Seq("BINARY")) - } yield () - - for { - _ <- checkOracleVersion().leftMap(DbMigrations.DatabaseVersionError) - _ <- checkOracleNlsSetting().leftMap(DbMigrations.DatabaseConfigError) - } yield () case Profile.H2(_) => // We don't perform version checks for H2 Right(()) diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/IdempotentInsert.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/IdempotentInsert.scala index 6f0a12bae9d0..d26521561889 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/IdempotentInsert.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/IdempotentInsert.scala @@ -4,9 +4,7 @@ package com.digitalasset.canton.resource import com.digitalasset.canton.logging.ErrorLoggingContext -import com.digitalasset.canton.resource.DbStorage.Profile.* import com.digitalasset.canton.util.ErrorUtil -import slick.jdbc.canton.SQLActionBuilder import scala.concurrent.ExecutionContext @@ -21,23 +19,10 @@ object IdempotentInsert { * If more rows are returned than `expectedRowsInserted` a [[java.lang.IllegalStateException]] is also thrown as * this indicates the insert query is doing something contrary to the developers expectations and is likely a bug. * - * To use safely use the `oracleIgnoreIndex` field it **must** be suitable for directly interpolating as a raw string - * into the query without causing any risk of SQL injection. - * It is recommended that this value should be hard coded and never come from a value based on the environment - * or user input. - * - * This method will generate the `insert into` prefix for the insert statement, so `insertBuilder` needs to only - * contain the body of the insert statement excluding the typical `insert into` prefix (this is so for oracle we - * can generate a suitable [[https://docs.oracle.com/cd/E11882_01/server.112/e41084/sql_elements006.htm#SQLRF30052 IGNORE_ROW_ON_DUPKEY_INDEX]] - * hint. - * * Typical usage will look like: * {{{ * insertVerifyingConflicts( - * logger, - * storage, - * "my_table ( pk_col )", // callers MUST ensure this is safe to interpolate directly into the sql query - * sql"my_table (pk_col, name) values ($$id, $$name)", // note the missing `insert into` prefix or any `on conflict` postfix. this will be generated appropriately for the target db. + * sql"insert into my_table (pk_col, name) values ($$id, $$name) on conflict do nothing".asUpdate, * sql"select name from my_table where pk_col = $$id".as[String].head // query values to check successfully exist in the target store. * )( * _ == name, @@ -54,9 +39,7 @@ object IdempotentInsert { * connectivity issues, and should be rare during normal healthy operation. So typically only the insert should be run. */ def insertVerifyingConflicts[A]( - storage: DbStorage, - oracleIgnoreIndex: String, - insertBuilder: => SQLActionBuilder, + insert: => DbStorage.DbAction.WriteOnly[Int], select: DbStorage.DbAction.ReadOnly[A], expectedRowsInserted: Int = 1, )(existingCondition: A => Boolean, errorMessage: A => String)(implicit @@ -69,7 +52,7 @@ object IdempotentInsert { } yield ErrorUtil.requireState(existingCondition(existing), errorMessage(existing)) for { - rowsInserted <- insertIgnoringConflicts(storage, oracleIgnoreIndex, insertBuilder) + rowsInserted <- insert _ <- if (rowsInserted < expectedRowsInserted) assertExisting() // check all of our expected rows exist @@ -85,28 +68,4 @@ object IdempotentInsert { } } yield () } - - /** Similar to [[insertVerifyingConflicts]] but without verifying that the existing data causing conflicts - * matches what we expect. Should only be use where the possibilities of conflicts is limited to retries of our insert. - */ - def insertIgnoringConflicts( - storage: DbStorage, - oracleIgnoreIndex: String, - insertBuilder: => SQLActionBuilder, - ): DbStorage.DbAction.WriteOnly[Int] = { - import DbStorage.Implicits.BuilderChain.* - import storage.api.* - - val insertInto: SQLActionBuilder = storage.profile match { - case _: Postgres | _: H2 => sql"insert into" - case _: Oracle => sql"insert /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( #$oracleIgnoreIndex ) */ into" - } - - val onConflictDoNothing: SQLActionBuilder = storage.profile match { - case _: Postgres | _: H2 => sql"on conflict do nothing" - case _: Oracle => sql"" // not supported - } - - (insertInto ++ sql" " ++ insertBuilder ++ sql" " ++ onConflictDoNothing).asUpdate - } } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala index e7b47ce683b4..2ab892481917 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/resource/Storage.scala @@ -10,7 +10,7 @@ import cats.{Eval, Functor, Monad} import com.daml.nameof.NameOf.functionFullName import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.CantonRequireTypes.String255 -import com.digitalasset.canton.config.RequireTypes.{PositiveInt, PositiveNumeric} +import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.config.* import com.digitalasset.canton.crypto.Salt import com.digitalasset.canton.health.{ @@ -18,13 +18,7 @@ import com.digitalasset.canton.health.{ CloseableHealthComponent, ComponentHealthState, } -import com.digitalasset.canton.lifecycle.{ - CloseContext, - FlagCloseable, - FutureUnlessShutdown, - HasCloseContext, - UnlessShutdown, -} +import com.digitalasset.canton.lifecycle.* import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{ ErrorLoggingContext, @@ -35,7 +29,7 @@ import com.digitalasset.canton.logging.{ import com.digitalasset.canton.metrics.{DbQueueMetrics, DbStorageMetrics} import com.digitalasset.canton.protocol.ContractIdSyntax.* import com.digitalasset.canton.protocol.{LfContractId, LfGlobalKey, LfHash} -import com.digitalasset.canton.resource.DbStorage.Profile.{H2, Oracle, Postgres} +import com.digitalasset.canton.resource.DbStorage.Profile.{H2, Postgres} import com.digitalasset.canton.resource.DbStorage.{DbAction, Profile} import com.digitalasset.canton.resource.StorageFactory.StorageCreationException import com.digitalasset.canton.serialization.ProtoConverter @@ -44,8 +38,8 @@ import com.digitalasset.canton.time.Clock import com.digitalasset.canton.time.EnrichedDurations.* import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ShowUtil.* +import com.digitalasset.canton.util.* import com.digitalasset.canton.util.retry.RetryEither -import com.digitalasset.canton.util.{Thereafter, *} import com.digitalasset.canton.{LfPackageId, LfPartyId} import com.google.protobuf.ByteString import com.typesafe.config.{Config, ConfigValueFactory} @@ -64,7 +58,6 @@ import slick.jdbc.{ActionBasedSQLInterpolation as _, SQLActionBuilder as _, *} import slick.lifted.Aliases import slick.util.{AsyncExecutor, AsyncExecutorWithMetrics, ClassLoaderUtil} -import java.io.ByteArrayInputStream import java.sql.{Blob, SQLException, SQLTransientException, Statement} import java.util.UUID import java.util.concurrent.atomic.AtomicInteger @@ -200,39 +193,19 @@ trait DbStorage extends Storage { self: NamedLogging => object DbStorageConverters { - /** We use `bytea` in Postgres and Oracle and `binary large object` in H2. + /** We use `bytea` in Postgres and `binary large object` in H2. * The reason is that starting from version 2.0, H2 imposes a limit of 1M * for the size of a `bytea`. Hence, depending on the profile, SetParameter - * and GetResult for `Array[Byte]` are different for H2 and Oracle/Postgres. + * and GetResult for `Array[Byte]` are different for H2 and Postgres. */ private lazy val byteArraysAreBlobs = profile match { case _: H2 => true case _ => false } - /** We use .setBinaryStream for Oracle instead of .setBytes, due to an ORA-03146 code which happens sometimes with: - * - BLOB sql field - * - MERGE query - * - new field value of size > 32K - * - * Canton #11644, support case #4136 - * Solution is based on: - * https://stackoverflow.com/questions/7794197/inserting-byte-array-as-blob-in-oracle-database-getting-ora-01460-unimplement - */ - private lazy val bytesArraysSetBinaryStream = profile match { - case _: Oracle => true - case _ => false - } - implicit val setParameterByteArray: SetParameter[Array[Byte]] = (v, pp) => if (byteArraysAreBlobs) pp.setBlob(bytesToBlob(v)) - else if (bytesArraysSetBinaryStream) { - val npos = pp.pos + 1 - pp.ps.setBinaryStream(npos, new ByteArrayInputStream(v)) - pp.pos = npos - } else { - pp.setBytes(v) - } + else pp.setBytes(v) implicit val getResultByteArray: GetResult[Array[Byte]] = if (byteArraysAreBlobs) GetResult(r => blobToBytes(r.nextBlob())) @@ -262,9 +235,6 @@ trait DbStorage extends Storage { self: NamedLogging => * Safe to use in a select slick query with #$... interpolation */ def limit(numberOfItems: Int, skipItems: Long = 0L): String = profile match { - case _: DbStorage.Profile.Oracle => - (if (skipItems != 0L) s"offset $skipItems rows " - else "") + s"fetch next $numberOfItems rows only" case _ => s"limit $numberOfItems" + (if (skipItems != 0L) s" offset $skipItems" else "") } @@ -285,8 +255,8 @@ trait DbStorage extends Storage { self: NamedLogging => case _: Profile.Postgres => val syncCommit = sqlu"set local synchronous_commit=on" syncCommit.andThen(query).transactionally - case _: Profile.H2 | _: Profile.Oracle => - // Don't do anything for H2/Oracle. According to our docs it is up to the user to enforce synchronous replication. + case _: Profile.H2 => + // Don't do anything for H2. According to our docs it is up to the user to enforce synchronous replication. // Any changes here are on a best-effort basis, but we won't guarantee they will be sufficient. query.transactionally } @@ -585,9 +555,6 @@ object DbStorage { final case class H2(jdbc: H2Profile) extends Profile { override protected def pretty: Pretty[H2] = prettyOfObject[H2] } - final case class Oracle(jdbc: OracleProfile) extends Profile with DbLockSupport { - override protected def pretty: Pretty[Oracle] = prettyOfObject[Oracle] - } final case class Postgres(jdbc: PostgresProfile) extends Profile with DbLockSupport { override protected def pretty: Pretty[Postgres] = prettyOfObject[Postgres] } @@ -632,10 +599,8 @@ object DbStorage { } implicit val getResultUuid: GetResult[UUID] = GetResult(r => UUID.fromString(r.nextString())) - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) // UUIDs are length-limited implicit val setParameterUuid: SetParameter[UUID] = (v, pp) => pp.setString(v.toString) - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) // LfPartyIds are length-limited implicit val setParameterLfPartyId: SetParameter[LfPartyId] = (v, pp) => pp.setString(v) implicit val getResultLfPartyId: GetResult[LfPartyId] = GetResult(r => r.nextString()).andThen { LfPartyId @@ -667,7 +632,6 @@ object DbStorage { } // LfPackageIds are length-limited - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) implicit val setParameterLfPackageId: SetParameter[LfPackageId] = (v, pp) => pp.setString(v) implicit val getResultPackageId: GetResult[LfPackageId] = GetResult(r => r.nextString()).andThen { @@ -771,8 +735,7 @@ object DbStorage { config match { case _: H2DbConfig => H2(H2Profile) case _: PostgresDbConfig => Postgres(PostgresProfile) - // TODO(i11009): assume unknown config is for oracle until we have proper oracle factory support - case _ => Oracle(OracleProfile) + case other => throw new IllegalArgumentException(s"Unsupported DbConfig: $other") } def createDatabase( @@ -936,9 +899,6 @@ object DbStorage { * partial update counts therein and those update counts are not taken into consideration. * * This operation is idempotent if the statement is idempotent for each value. - * - * @throws java.lang.IllegalArgumentException if `statement` contains `"IGNORE_ROW_ON_DUPKEY_INDEX"` - * (See UpsertTestOracle for details.) */ def bulkOperation[A]( statement: String, @@ -946,17 +906,7 @@ object DbStorage { profile: Profile, )( setParams: PositionedParameters => A => Unit - )(implicit loggingContext: ErrorLoggingContext): DBIOAction[Array[Int], NoStream, Effect.All] = { - // Bail out if the statement contains IGNORE_ROW_ON_DUPKEY_INDEX, because update counts are known to be broken. - // Use MERGE instead. - // Ignoring update counts is not an option, because the JDBC driver reads them internally and may fail with - // low-level exceptions. - // See UpsertTestOracle for details. - ErrorUtil.requireArgument( - !statement.toUpperCase.contains("IGNORE_ROW_ON_DUPKEY_INDEX"), - s"Illegal usage of bulkOperation with IGNORE_ROW_ON_DUPKEY_INDEX. $statement", - ) - + )(implicit loggingContext: ErrorLoggingContext): DBIOAction[Array[Int], NoStream, Effect.All] = if (values.isEmpty) DBIOAction.successful(Array.empty) else { val action = SimpleJdbcAction { session => @@ -988,11 +938,6 @@ object DbStorage { import profile.DbStorageAPI.* profile match { - case _: Oracle => - // Oracle has the habit of not properly rolling back, if an error occurs and - // there is no transaction (i.e. autoCommit = true). Further details on this can be found in UpsertTestOracle. - action.transactionally - case _ if values.sizeCompare(1) <= 0 => // Disable auto-commit for better performance. action @@ -1000,7 +945,6 @@ object DbStorage { case _ => action.transactionally } } - } /** Same as [[bulkOperation]] except that no update counts are returned. */ def bulkOperation_[A]( @@ -1022,41 +966,24 @@ object DbStorage { } } - /** Construct an in clause for a given field. If there are too many elements, - * splits the clause into several ones. We need to split into several terms - * because Oracle imposes a limit on the number of elements in an - * in-clause (currently: 1000). + /** Construct an in clause for a given field. * * @return An iterable of the grouped values and the in clause for the grouped values */ @nowarn("cat=unused") // somehow, f is wrongly reported as unused by the compiler - def toInClauses[T]( + def toInClause[T]( field: String, values: NonEmpty[Seq[T]], - maxValuesInSqlList: PositiveNumeric[Int], - )(implicit f: SetParameter[T]): immutable.Iterable[(Seq[T], SQLActionBuilder)] = { + )(implicit f: SetParameter[T]): SQLActionBuilder = { import DbStorage.Implicits.BuilderChain.* + sql"#$field in (" ++ + values + .map(value => sql"$value") + .forgetNE + .intercalate(sql", ") ++ sql")" - values - .grouped(maxValuesInSqlList.unwrap) - .map { groupedValues => - val inClause = sql"#$field in (" ++ - groupedValues - .map(value => sql"$value") - .intercalate(sql", ") ++ sql")" - - groupedValues -> inClause.toActionBuilder - } - .to(immutable.Iterable) } - def toInClauses_[T]( - field: String, - values: NonEmpty[Seq[T]], - maxValuesSqlInListSize: PositiveNumeric[Int], - )(implicit f: SetParameter[T]): immutable.Iterable[SQLActionBuilder] = - toInClauses(field, values, maxValuesSqlInListSize).map { case (_, builder) => builder } - class DbStorageCreationException(message: String) extends RuntimeException(message) final case class RetryConfig( diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationToken.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationToken.scala index 83f2252699c9..56fe0c7278f7 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationToken.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/AuthenticationToken.scala @@ -47,8 +47,6 @@ final case class AuthenticationToken private[authentication] (private val bytes: object AuthenticationToken { /** As of now, the database schemas can only handle authentication tokens up to a length of 150 bytes. Thus the length of an [[AuthenticationToken]] should never exceed that. - * If we ever want to create an [[AuthenticationToken]] larger than that, we can increase it up to 500 bytes after which we are limited by Oracle length limits. - * See the documentation at [[com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString]] for more details. */ val length: Int = 20 @@ -74,7 +72,7 @@ object AuthenticationToken { implicit val getAuthenticationTokenResult: GetResult[AuthenticationToken] = GetResult { r => val hexString = r.nextString() - if (hexString.length > String300.maxLength) + if (hexString.length > String300.maxLength.unwrap) throw new DbDeserializationException( s"Base16-encoded authentication token of length ${hexString.length} exceeds allowed limit of ${String300.maxLength}." ) diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbCursorPreheadStore.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbCursorPreheadStore.scala index c45c5646d048..ab44e8661a15 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbCursorPreheadStore.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbCursorPreheadStore.scala @@ -20,7 +20,7 @@ import scala.concurrent.{ExecutionContext, Future} * @param cursorTable The table name to store the cursor prehead. * The table must define the following columns: *
    - *
  • domain_id integer not null primary key
  • + *
  • domain_idx integer not null primary key
  • *
  • prehead_counter bigint not null
  • *
  • ts bigint not null
  • *
@@ -42,7 +42,7 @@ class DbCursorPreheadStore[Discr]( traceContext: TraceContext ): Future[Option[CursorPrehead[Discr]]] = { val preheadQuery = - sql"""select prehead_counter, ts from #$cursorTable where domain_id = $indexedDomain order by prehead_counter desc #${storage + sql"""select prehead_counter, ts from #$cursorTable where domain_idx = $indexedDomain order by prehead_counter desc #${storage .limit(2)}""" .as[(Counter[Discr], CantonTimestamp)] storage.query(preheadQuery, functionFullName).map { @@ -66,24 +66,10 @@ class DbCursorPreheadStore[Discr]( case Some(CursorPrehead(counter, timestamp)) => val query = storage.profile match { case _: DbStorage.Profile.H2 => - sqlu"merge into #$cursorTable (domain_id, prehead_counter, ts) values ($indexedDomain, $counter, $timestamp)" + sqlu"merge into #$cursorTable (domain_idx, prehead_counter, ts) values ($indexedDomain, $counter, $timestamp)" case _: DbStorage.Profile.Postgres => - sqlu"""insert into #$cursorTable (domain_id, prehead_counter, ts) values ($indexedDomain, $counter, $timestamp) - on conflict (domain_id) do update set prehead_counter = $counter, ts = $timestamp""" - case _: DbStorage.Profile.Oracle => - sqlu"""merge into #$cursorTable ct - using ( - select - $indexedDomain domain_id, - $counter counter, - $timestamp ts - from dual - ) val - on (val.domain_id = ct.domain_id) - when matched then - update set ct.prehead_counter = val.counter, ct.ts = val.ts - when not matched then - insert (domain_id, prehead_counter, ts) values (val.domain_id, val.counter, val.ts)""" + sqlu"""insert into #$cursorTable (domain_idx, prehead_counter, ts) values ($indexedDomain, $counter, $timestamp) + on conflict (domain_idx) do update set prehead_counter = $counter, ts = $timestamp""" } storage.update_(query, functionFullName) } @@ -99,34 +85,19 @@ class DbCursorPreheadStore[Discr]( sqlu""" merge into #$cursorTable as cursor_table using dual - on cursor_table.domain_id = $indexedDomain + on cursor_table.domain_idx = $indexedDomain when matched and cursor_table.prehead_counter < $counter then update set cursor_table.prehead_counter = $counter, cursor_table.ts = $timestamp - when not matched then insert (domain_id, prehead_counter, ts) values ($indexedDomain, $counter, $timestamp) + when not matched then insert (domain_idx, prehead_counter, ts) values ($indexedDomain, $counter, $timestamp) """ case _: DbStorage.Profile.Postgres => sqlu""" - insert into #$cursorTable as cursor_table (domain_id, prehead_counter, ts) + insert into #$cursorTable as cursor_table (domain_idx, prehead_counter, ts) values ($indexedDomain, $counter, $timestamp) - on conflict (domain_id) do + on conflict (domain_idx) do update set prehead_counter = $counter, ts = $timestamp where cursor_table.prehead_counter < $counter """ - case _: DbStorage.Profile.Oracle => - sqlu""" - merge into #$cursorTable cursor_table - using ( - select - $indexedDomain domain_id - from dual - ) val - on (cursor_table.domain_id = val.domain_id) - when matched then - update set cursor_table.prehead_counter = $counter, cursor_table.ts = $timestamp - where cursor_table.prehead_counter < $counter - when not matched then - insert (domain_id, prehead_counter, ts) values (val.domain_id, $counter, $timestamp) - """ } new TransactionalStoreUpdate.DbTransactionalStoreUpdate( query, @@ -146,14 +117,14 @@ class DbCursorPreheadStore[Discr]( sqlu""" update #$cursorTable set prehead_counter = $counter, ts = $timestamp - where domain_id = $indexedDomain and prehead_counter > $counter""" + where domain_idx = $indexedDomain and prehead_counter > $counter""" storage.update_(query, "rewind prehead") } } private[this] def delete()(implicit traceContext: TraceContext): Future[Unit] = storage.update_( - sqlu"""delete from #$cursorTable where domain_id = $indexedDomain""", + sqlu"""delete from #$cursorTable where domain_idx = $indexedDomain""", functionFullName, ) } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala index 9b8553bfa030..b0e23780580c 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbIndexedStringStore.scala @@ -51,14 +51,8 @@ class DbIndexedStringStore( // not sure how to get "last insert id" here in case the row was inserted // therefore, we're just querying the db again. this is a bit dorky, // but we'll hardly ever do this, so should be good - val query = storage.profile match { - case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 => - sqlu"insert into common_static_strings (string, source) values ($str, $dbType) ON CONFLICT DO NOTHING" - case _: DbStorage.Profile.Oracle => - sqlu"""INSERT - /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( common_static_strings (string, source) ) */ - INTO common_static_strings (string, source) VALUES ($str,$dbType)""" - } + val query = + sqlu"insert into common_static_strings (string, source) values ($str, $dbType) ON CONFLICT DO NOTHING" // and now query it storage.update_(query, functionFullName) } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbPrunableByTime.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbPrunableByTime.scala index 7b4d7d5d0b38..b536894b0cdd 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbPrunableByTime.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbPrunableByTime.scala @@ -13,7 +13,7 @@ import slick.jdbc.SetParameter import scala.concurrent.{ExecutionContext, Future} -/** Mixin for an db store that stores the latest point in time when +/** Mixin for a db store that stores the latest point in time when * pruning has started or finished. * * The pruning method of the store must use [[advancePruningTimestamp]] to signal the start end completion @@ -35,7 +35,7 @@ trait DbPrunableByTime extends PrunableByTime { */ protected[this] def pruning_status_table: String - protected[this] def partitionColumn: String = "domain_id" + protected[this] def partitionColumn: String = "domain_idx" protected[this] def partitionKey: IndexedDomain @@ -85,24 +85,6 @@ trait DbPrunableByTime extends PrunableByTime { update set phase = CAST($phase as pruning_phase), ts = $timestamp where pruning_status.ts < $timestamp """ - case (_: DbStorage.Profile.Oracle, PruningPhase.Started) => - sqlu""" - merge into #$pruning_status_table pruning_status - using ( - select - $partitionKey partitionKey, - $phase phase, - $timestamp timestamp - from - dual - ) val - on (pruning_status.#$partitionColumn = val.partitionKey) - when matched then - update set pruning_status.phase = val.phase, pruning_status.ts = val.timestamp - where pruning_status.ts < val.timestamp - when not matched then - insert (#$partitionColumn, phase, ts) values (val.partitionKey, val.phase, val.timestamp) - """ } logger.debug( @@ -129,12 +111,12 @@ trait DbPrunableByTime extends PrunableByTime { } } -/** Specialized [[DbPrunableByTime]] that uses the [[com.digitalasset.canton.topology.DomainId]] as discriminator */ +/** Specialized [[DbPrunableByTime]] that uses the domain as discriminator */ trait DbPrunableByTimeDomain extends DbPrunableByTime { this: DbStore => - protected[this] def domainId: IndexedDomain + protected[this] def indexedDomain: IndexedDomain - override protected[this] def partitionKey: IndexedDomain = domainId + override protected[this] def partitionKey: IndexedDomain = indexedDomain } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbSequencedEventStore.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbSequencedEventStore.scala index d9adbe9eb1c1..add8002e2cac 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbSequencedEventStore.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/store/db/DbSequencedEventStore.scala @@ -128,21 +128,10 @@ class DbSequencedEventStore( private def bulkInsertQuery( events: Seq[PossiblyIgnoredSerializedEvent] )(implicit traceContext: TraceContext): DBIOAction[Unit, NoStream, Effect.All] = { - val insertSql = storage.profile match { - case _: DbStorage.Profile.Oracle => - """merge /*+ INDEX ( common_sequenced_events ( ts, domain_id ) ) */ - |into common_sequenced_events - |using (select ? domain_id, ? ts from dual) input - |on (sequenced_events.ts = input.ts and common_sequenced_events.domain_id = input.domain_id) - |when not matched then - | insert (domain_id, ts, sequenced_event, type, sequencer_counter, trace_context, ignore) - | values (input.domain_id, input.ts, ?, ?, ?, ?, ?)""".stripMargin - - case _ => - "insert into common_sequenced_events (domain_id, ts, sequenced_event, type, sequencer_counter, trace_context, ignore) " + - "values (?, ?, ?, ?, ?, ?, ?) " + - "on conflict do nothing" - } + val insertSql = + "insert into common_sequenced_events (domain_idx, ts, sequenced_event, type, sequencer_counter, trace_context, ignore) " + + "values (?, ?, ?, ?, ?, ?, ?) " + + "on conflict do nothing" DbStorage.bulkOperation_(insertSql, events, storage.profile) { pp => event => pp >> partitionKey pp >> event.timestamp @@ -162,10 +151,10 @@ class DbSequencedEventStore( // The implementation assumes that we timestamps on sequenced events increases monotonically with the sequencer counter // It therefore is fine to take the first event that we find. sql"""select type, sequencer_counter, ts, sequenced_event, trace_context, ignore from common_sequenced_events - where domain_id = $partitionKey and ts = $timestamp""" + where domain_idx = $partitionKey and ts = $timestamp""" case LatestUpto(inclusive) => sql"""select type, sequencer_counter, ts, sequenced_event, trace_context, ignore from common_sequenced_events - where domain_id = $partitionKey and ts <= $inclusive + where domain_idx = $partitionKey and ts <= $inclusive order by ts desc #${storage.limit(1)}""" } @@ -183,7 +172,7 @@ class DbSequencedEventStore( for { events <- storage.query( sql"""select type, sequencer_counter, ts, sequenced_event, trace_context, ignore from common_sequenced_events - where domain_id = $partitionKey and $lowerInclusive <= ts and ts <= $upperInclusive + where domain_idx = $partitionKey and $lowerInclusive <= ts and ts <= $upperInclusive order by ts #${limit.fold("")(storage.limit(_))}""" .as[PossiblyIgnoredSerializedEvent], functionFullName, @@ -205,7 +194,7 @@ class DbSequencedEventStore( )(implicit traceContext: TraceContext): Future[Seq[PossiblyIgnoredSerializedEvent]] = storage.query( sql"""select type, sequencer_counter, ts, sequenced_event, trace_context, ignore from common_sequenced_events - where domain_id = $partitionKey + where domain_idx = $partitionKey order by ts #${limit.fold("")(storage.limit(_))}""" .as[PossiblyIgnoredSerializedEvent], functionFullName, @@ -216,12 +205,12 @@ class DbSequencedEventStore( lastPruning: Option[CantonTimestamp], )(implicit traceContext: TraceContext): Future[Int] = { val query = - sqlu"delete from common_sequenced_events where domain_id = $partitionKey and ts <= $untilInclusive" + sqlu"delete from common_sequenced_events where domain_idx = $partitionKey and ts <= $untilInclusive" storage .queryAndUpdate(query, functionFullName) .map { nrPruned => logger.info( - s"Pruned at least $nrPruned entries from the sequenced event store of domain_id $partitionKey older or equal to $untilInclusive" + s"Pruned at least $nrPruned entries from the sequenced event store of domain_idx $partitionKey older or equal to $untilInclusive" ) nrPruned } @@ -246,7 +235,7 @@ class DbSequencedEventStore( for { lastSequencerCounterAndTimestampO <- EitherT.right( storage.query( - sql"""select sequencer_counter, ts from common_sequenced_events where domain_id = $partitionKey + sql"""select sequencer_counter, ts from common_sequenced_events where domain_idx = $partitionKey order by sequencer_counter desc #${storage.limit(1)}""" .as[(SequencerCounter, CantonTimestamp)] .headOption, @@ -282,7 +271,7 @@ class DbSequencedEventStore( traceContext: TraceContext ): Future[Unit] = storage.update_( - sqlu"update common_sequenced_events set ignore = $ignore where domain_id = $partitionKey and $fromInclusive <= sequencer_counter and sequencer_counter <= $toInclusive", + sqlu"update common_sequenced_events set ignore = $ignore where domain_idx = $partitionKey and $fromInclusive <= sequencer_counter and sequencer_counter <= $toInclusive", functionFullName, ) @@ -303,7 +292,7 @@ class DbSequencedEventStore( lastNonEmptyEventSequencerCounter <- EitherT.right( storage.query( sql"""select sequencer_counter from common_sequenced_events - where domain_id = $partitionKey and type != ${SequencedEventDbType.IgnoredEvent} + where domain_idx = $partitionKey and type != ${SequencedEventDbType.IgnoredEvent} order by sequencer_counter desc #${storage.limit(1)}""" .as[SequencerCounter] .headOption, @@ -316,7 +305,7 @@ class DbSequencedEventStore( lastSequencerCounter <- EitherT.right( storage.query( sql"""select sequencer_counter from common_sequenced_events - where domain_id = $partitionKey + where domain_idx = $partitionKey order by sequencer_counter desc #${storage.limit(1)}""" .as[SequencerCounter] .headOption, @@ -332,7 +321,7 @@ class DbSequencedEventStore( _ <- EitherT.right( storage.update( sqlu"""delete from common_sequenced_events - where domain_id = $partitionKey and type = ${SequencedEventDbType.IgnoredEvent} + where domain_idx = $partitionKey and type = ${SequencedEventDbType.IgnoredEvent} and $fromEffective <= sequencer_counter and sequencer_counter <= $to""", functionFullName, ) @@ -343,7 +332,7 @@ class DbSequencedEventStore( from: SequencerCounter )(implicit traceContext: TraceContext): Future[Unit] = storage.update_( - sqlu"delete from common_sequenced_events where domain_id = $partitionKey and sequencer_counter >= $from", + sqlu"delete from common_sequenced_events where domain_idx = $partitionKey and sequencer_counter >= $from", functionFullName, ) } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingDomainTopologyClient.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingDomainTopologyClient.scala index 764fa132fa49..539d5604557c 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingDomainTopologyClient.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/client/CachingDomainTopologyClient.scala @@ -466,7 +466,7 @@ class CachingTopologySnapshot( // split up the request into separate chunks so that we don't block the cache for too long // when loading very large batches MonadUtil - .batchedSequentialTraverse(batchingConfig.parallelism, batchingConfig.maxItemsInSqlClause)( + .batchedSequentialTraverse(batchingConfig.parallelism, batchingConfig.maxItemsInBatch)( parties )(parties => partyCache.getAll(parties)(traceContext).map(_.toSeq)) .map(_.toMap) diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala index edc268c5a6d1..ae678fd2c688 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStore.scala @@ -10,7 +10,7 @@ import com.daml.nonempty.NonEmpty import com.digitalasset.canton.ProtoDeserializationError import com.digitalasset.canton.config.CantonRequireTypes.{LengthLimitedString, String255} import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown} import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} @@ -74,7 +74,10 @@ object TopologyStoreId { if (discriminator.isEmpty) dbStringWithoutDiscriminator else LengthLimitedString - .tryCreate(discriminator + "::", discriminator.length + 2) + .tryCreate( + discriminator + "::", + PositiveInt.two + NonNegativeInt.tryCreate(discriminator.length), + ) .tryConcatenate(dbStringWithoutDiscriminator) override protected def pretty: Pretty[this.type] = @@ -91,7 +94,10 @@ object TopologyStoreId { override def dbStringWithDaml2xUniquifier(uniquifier: String): LengthLimitedString = { require(uniquifier.nonEmpty) LengthLimitedString - .tryCreate(discriminator + uniquifier + "::", discriminator.length + uniquifier.length + 2) + .tryCreate( + discriminator + uniquifier + "::", + PositiveInt.two + NonNegativeInt.tryCreate(discriminator.length + uniquifier.length), + ) .tryConcatenate(dbStringWithoutDiscriminator) } @@ -106,7 +112,7 @@ object TopologyStoreId { override def dbStringWithDaml2xUniquifier(uniquifier: String): LengthLimitedString = { require(uniquifier.nonEmpty) LengthLimitedString - .tryCreate(uniquifier + "::", uniquifier.length + 2) + .tryCreate(uniquifier + "::", PositiveInt.two + NonNegativeInt.tryCreate(uniquifier.length)) .tryConcatenate(dbString) } diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbPartyMetadataStore.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbPartyMetadataStore.scala index 1a4de82825ba..20cb0f7420b3 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbPartyMetadataStore.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbPartyMetadataStore.scala @@ -90,7 +90,7 @@ class DbPartyMetadataStore( effective_at = $effectiveTimestamp, notified = false """ - case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Oracle => + case _: DbStorage.Profile.H2 => sqlu"""merge into common_party_metadata using dual on (party_id = $partyId) diff --git a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala index b712be8a6ea9..33172a2d01c9 100644 --- a/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala +++ b/sdk/canton/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStore.scala @@ -284,7 +284,6 @@ class DbTopologyStore[StoreId <: TopologyStoreId]( queryForTransactions(mappingProposalsAndPreviousFilter, "inspect") } - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) override def inspectKnownParties( asOfExclusive: CantonTimestamp, filterParty: String, @@ -618,28 +617,18 @@ class DbTopologyStore[StoreId <: TopologyStoreId]( val representativeProtocolVersion = signedTx.transaction.representativeProtocolVersion val hashOfSignatures = signedTx.hashOfSignatures.toLengthLimitedHexString - storage.profile match { - case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 => - sql"""($transactionStoreIdName, $sequencedTs, $validFrom, $validUntil, $transactionType, $namespace, - $identifier, $mappingHash, $serial, $operation, $signedTx, $txHash, $isProposal, $reason, $representativeProtocolVersion, $hashOfSignatures)""" - case _: DbStorage.Profile.Oracle => - throw new IllegalStateException("Oracle not supported by daml 3.0 yet") - } + sql"""($transactionStoreIdName, $sequencedTs, $validFrom, $validUntil, $transactionType, $namespace, + $identifier, $mappingHash, $serial, $operation, $signedTx, $txHash, $isProposal, $reason, $representativeProtocolVersion, $hashOfSignatures)""" } - storage.profile match { - case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 => - (sql"""INSERT INTO common_topology_transactions (store_id, sequenced, valid_from, valid_until, transaction_type, namespace, + (sql"""INSERT INTO common_topology_transactions (store_id, sequenced, valid_from, valid_until, transaction_type, namespace, identifier, mapping_key_hash, serial_counter, operation, instance, tx_hash, is_proposal, rejection_reason, representative_protocol_version, hash_of_signatures) VALUES""" ++ - transactions - .map(sqlTransactionParameters) - .toList - .intercalate(sql", ") - ++ sql" ON CONFLICT DO NOTHING" // idempotency-"conflict" based on common_topology_transactions unique constraint - ).asUpdate - case _: DbStorage.Profile.Oracle => - throw new IllegalStateException("Oracle not supported by daml 3.0 yet") - } + transactions + .map(sqlTransactionParameters) + .toList + .intercalate(sql", ") + ++ sql" ON CONFLICT DO NOTHING" // idempotency-"conflict" based on common_topology_transactions unique constraint + ).asUpdate } // Helper to break up large uid-filters into batches to limit the size of sql "in-clauses". @@ -832,7 +821,7 @@ class DbTopologyStore[StoreId <: TopologyStoreId]( set watermark_ts = $timestamp """ - case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Oracle => + case _: DbStorage.Profile.H2 => sqlu"""merge into common_topology_dispatching using dual on (store_id = $transactionStoreIdName) @@ -861,7 +850,6 @@ class DbTopologyStore[StoreId <: TopologyStoreId]( case None => sql" AND valid_until is NULL" } - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) private def getIdFilter( idFilter: Option[String] ): SQLActionBuilderChain = @@ -870,7 +858,6 @@ class DbTopologyStore[StoreId <: TopologyStoreId]( case _ => sql"" } - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) private def getNamespaceFilter(namespaceFilter: Option[String]): SQLActionBuilderChain = namespaceFilter match { case Some(value) if value.nonEmpty => sql" AND namespace LIKE ${value + "%"}" diff --git a/sdk/canton/community/common/src/main/daml/CantonExamples/daml.yaml b/sdk/canton/community/common/src/main/daml/CantonExamples/daml.yaml index 36f173250409..d625c5e1b0cf 100644 --- a/sdk/canton/community/common/src/main/daml/CantonExamples/daml.yaml +++ b/sdk/canton/community/common/src/main/daml/CantonExamples/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --target=2.1 name: CantonExamples diff --git a/sdk/canton/community/common/src/main/resources/db/migration/canton/h2/dev/V999__dev.sql b/sdk/canton/community/common/src/main/resources/db/migration/canton/h2/dev/V999__dev.sql index da4f721f5385..1b11c58d1148 100644 --- a/sdk/canton/community/common/src/main/resources/db/migration/canton/h2/dev/V999__dev.sql +++ b/sdk/canton/community/common/src/main/resources/db/migration/canton/h2/dev/V999__dev.sql @@ -3,5 +3,5 @@ -- This is a dummy column we are adding in order to test that adding dev version migrations -- works properly. DO NOT MOVE THIS TO STABLE -ALTER TABLE common_node_id ADD COLUMN test_column INT NOT NULL DEFAULT 0; +alter table common_node_id add column test_column int not null default 0; diff --git a/sdk/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql b/sdk/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql index b5b57ce764a5..b7dd7a5eb2ef 100644 --- a/sdk/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql +++ b/sdk/canton/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql @@ -47,7 +47,7 @@ create table common_crypto_private_keys ( ); -- Store metadata information about KMS keys -CREATE TABLE common_kms_metadata_store ( +create table common_kms_metadata_store ( fingerprint varchar(300) not null, kms_key_id varchar(300) not null, purpose smallint not null, @@ -67,8 +67,8 @@ create table common_crypto_public_keys ( -- Stores the immutable contracts, however a creation of a contract can be rolled back. create table par_contracts ( - -- As a participant can be connected to multiple domains, the transactions are stored under a domain id. - domain_id integer not null, + -- As a participant can be connected to multiple domains, the transactions are stored per domain + domain_idx integer not null, contract_id varchar(300) not null, -- The contract is serialized using the LF contract proto serializer. instance binary large object not null, @@ -85,17 +85,17 @@ create table par_contracts ( -- We store metadata of the contract instance for inspection package_id varchar(300) not null, template_id varchar not null, - primary key (domain_id, contract_id)); + primary key (domain_idx, contract_id)); -- Index to speedup ContractStore.find --- domain_id comes first, because there is always a constraint on it. +-- domain_idx comes first, because there is always a constraint on it. -- package_id comes before template_id, because queries with package_id and without template_id make more sense than vice versa. --- contract_id is left out, because a query with domain_id and contract_id can be served with the primary key. -create index idx_par_contracts_find on par_contracts(domain_id, package_id, template_id); +-- contract_id is left out, because a query with domain_idx and contract_id can be served with the primary key. +create index idx_par_contracts_find on par_contracts(domain_idx, package_id, template_id); -- Index for pruning -- Using an index on all elements because H2 does not support partial indices. -create index idx_par_contracts_request_counter on par_contracts(domain_id, request_counter); +create index idx_par_contracts_request_counter on par_contracts(domain_idx, request_counter); -- provides a serial enumeration of static strings so we don't store the same string over and over in the db -- currently only storing uids @@ -105,7 +105,7 @@ create table common_static_strings ( -- the expression string varchar(300) not null, -- the source (what kind of string are we storing here) - source int NOT NULL, + source integer not null, unique(string, source) ); @@ -153,8 +153,8 @@ create type operation_type as enum ('create', 'add', 'assign', 'archive', 'purge -- Maintains the status of contracts create table par_active_contracts ( - -- As a participant can be connected to multiple domains, the active contracts are stored under a domain id. - domain_id int not null, + -- As a participant can be connected to multiple domains, the active contracts are stored per domain. + domain_idx integer not null, contract_id varchar(300) not null, change change_type not null, operation operation_type not null, @@ -163,16 +163,16 @@ create table par_active_contracts ( -- Request counter of the time of change request_counter bigint not null, -- optional remote domain index in case of reassignments - remote_domain_idx int, + remote_domain_idx integer, reassignment_counter bigint default null, - primary key (domain_id, contract_id, ts, request_counter, change) + primary key (domain_idx, contract_id, ts, request_counter, change) ); -CREATE index idx_par_active_contracts_dirty_request_reset ON par_active_contracts (domain_id, request_counter); +create index idx_par_active_contracts_dirty_request_reset on par_active_contracts (domain_idx, request_counter); -CREATE index idx_par_active_contracts_contract_id ON par_active_contracts (contract_id); +create index idx_par_active_contracts_contract_id on par_active_contracts (contract_id); -CREATE index idx_par_active_contracts_ts_domain_id ON par_active_contracts (ts, domain_id); +create index idx_par_active_contracts_ts_domain_idx on par_active_contracts (ts, domain_idx); create table med_response_aggregations ( -- identified by the sequencer timestamp (UTC timestamp in microseconds relative to EPOCH) @@ -187,11 +187,11 @@ create table med_response_aggregations ( -- Stores the received sequencer messages create table common_sequenced_events ( -- discriminate between different users of the sequenced events tables - domain_id integer not null, + domain_idx integer not null, -- Proto serialized signed message sequenced_event binary large object not null, -- Explicit fields to query the messages, which are stored as blobs - type varchar(3) not null check(type IN ('del', 'err', 'ign')), + type varchar(3) not null check(type in ('del', 'err', 'ign')), -- Timestamp of the time of change in microsecond precision relative to EPOCH ts bigint not null, -- Sequencer counter of the time of change @@ -201,22 +201,22 @@ create table common_sequenced_events ( -- flag to skip problematic events ignore boolean not null, -- The sequencer ensures that the timestamp is unique - primary key (domain_id, ts) + primary key (domain_idx, ts) ); -create unique index idx_sequenced_events_sequencer_counter on common_sequenced_events(domain_id, sequencer_counter); +create unique index idx_sequenced_events_sequencer_counter on common_sequenced_events(domain_idx, sequencer_counter); -- Track what send requests we've made but have yet to observe being sequenced. -- If events are not observed by the max sequencing time we know that the send will never be processed. create table sequencer_client_pending_sends ( - -- domain id for distinguishing between different sequencer clients in the same node - domain_id integer not null, + -- domain (index) for distinguishing between different sequencer clients in the same node + domain_idx integer not null, -- the message id of the send being tracked (expected to be unique for the sequencer client while the send is in-flight) message_id varchar(300) not null, -- the message id should be unique for the sequencer client - primary key (domain_id, message_id), + primary key (domain_idx, message_id), -- the max sequencing time of the send request (UTC timestamp in microseconds relative to EPOCH) max_sequencing_time bigint not null @@ -237,13 +237,13 @@ create table par_domains( -- domain node id domain_id varchar(300) not null unique, status char(1) default 'A' not null, - CONSTRAINT par_domains_unique unique (alias, domain_id) + constraint par_domains_unique unique (alias, domain_id) ); create table par_reassignments ( -- reassignment id - target_domain varchar(300) not null, - origin_domain varchar(300) not null, + target_domain_idx integer not null, + source_domain_idx integer not null, -- reassignment data source_protocol_version integer not null, @@ -265,12 +265,12 @@ create table par_reassignments ( time_of_completion_request_counter bigint, -- UTC timestamp in microseconds relative to EPOCH time_of_completion_timestamp bigint, - primary key (target_domain, origin_domain, unassignment_timestamp) + primary key (target_domain_idx, source_domain_idx, unassignment_timestamp) ); -- stores all requests for the request journal create table par_journal_requests ( - domain_id integer not null, + domain_idx integer not null, request_counter bigint not null, request_state_index smallint not null, -- UTC timestamp in microseconds relative to EPOCH @@ -279,13 +279,13 @@ create table par_journal_requests ( -- is set only if the request is clean commit_time bigint, repair_context varchar(300), -- only set on manual repair requests outside of sync protocol - primary key (domain_id, request_counter)); -create index idx_par_journal_request_timestamp on par_journal_requests (domain_id, request_timestamp); -create index idx_par_journal_request_commit_time on par_journal_requests (domain_id, commit_time); + primary key (domain_idx, request_counter)); +create index idx_par_journal_request_timestamp on par_journal_requests (domain_idx, request_timestamp); +create index idx_par_journal_request_commit_time on par_journal_requests (domain_idx, commit_time); -- locally computed ACS commitments to a specific period, counter-participant and domain create table par_computed_acs_commitments ( - domain_id int not null, + domain_idx integer not null, counter_participant varchar(300) not null, -- UTC timestamp in microseconds relative to EPOCH from_exclusive bigint not null, @@ -293,13 +293,13 @@ create table par_computed_acs_commitments ( to_inclusive bigint not null, -- the "raw" cryptographic commitment (AcsCommitment.CommitmentType) in its serialized format commitment binary large object not null, - primary key (domain_id, counter_participant, from_exclusive, to_inclusive), + primary key (domain_idx, counter_participant, from_exclusive, to_inclusive), constraint check_nonempty_interval_computed check(to_inclusive > from_exclusive) ); -- ACS commitments received from counter-participants create table par_received_acs_commitments ( - domain_id int not null, + domain_idx integer not null, -- the counter-participant who sent the commitment sender varchar(300) not null, -- UTC timestamp in microseconds relative to EPOCH @@ -311,11 +311,11 @@ create table par_received_acs_commitments ( constraint check_to_after_from check(to_inclusive > from_exclusive) ); -create index idx_par_full_commitment on par_received_acs_commitments (domain_id, sender, from_exclusive, to_inclusive); +create index idx_par_full_commitment on par_received_acs_commitments (domain_idx, sender, from_exclusive, to_inclusive); -- the participants whose remote commitments are outstanding create table par_outstanding_acs_commitments ( - domain_id int not null, + domain_idx integer not null, -- UTC timestamp in microseconds relative to EPOCH from_exclusive bigint not null, -- UTC timestamp in microseconds relative to EPOCH @@ -325,40 +325,40 @@ create table par_outstanding_acs_commitments ( constraint check_nonempty_interval_outstanding check(to_inclusive > from_exclusive) ); -create unique index unique_interval_participant on par_outstanding_acs_commitments (domain_id, counter_participant, from_exclusive, to_inclusive); +create unique index unique_interval_participant on par_outstanding_acs_commitments (domain_idx, counter_participant, from_exclusive, to_inclusive); -create index idx_par_outstanding_acs_commitments_by_time on par_outstanding_acs_commitments (domain_id, from_exclusive); +create index idx_par_outstanding_acs_commitments_by_time on par_outstanding_acs_commitments (domain_idx, from_exclusive); -- the last timestamp for which the commitments have been computed, stored and sent create table par_last_computed_acs_commitments ( - domain_id int primary key, + domain_idx integer primary key, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null ); -- Stores the snapshot ACS commitments (per stakeholder set) create table par_commitment_snapshot ( - domain_id int not null, + domain_idx integer not null, -- A stable reference to a stakeholder set, that doesn't rely on the Protobuf encoding being deterministic -- a hex-encoded hash (not binary so that hash can be indexed in all db server types) stakeholders_hash varchar(300) not null, stakeholders binary large object not null, commitment binary large object not null, - primary key (domain_id, stakeholders_hash) + primary key (domain_idx, stakeholders_hash) ); -- Stores the time (along with a tie-breaker) of the ACS commitment snapshot create table par_commitment_snapshot_time ( - domain_id int not null, + domain_idx integer not null, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null, tie_breaker bigint not null, - primary key (domain_id) + primary key (domain_idx) ); -- Remote commitments that were received but could not yet be checked because the local participant is lagging behind create table par_commitment_queue ( - domain_id int not null, + domain_idx integer not null, sender varchar(300) not null, counter_participant varchar(300) not null, -- UTC timestamp in microseconds relative to EPOCH @@ -368,10 +368,10 @@ create table par_commitment_queue ( commitment binary large object not null, commitment_hash varchar(300) not null, -- A shorter hash (SHA-256) of the commitment for the primary key instead of the binary large object constraint check_nonempty_interval_queue check(to_inclusive > from_exclusive), - primary key (domain_id, sender, counter_participant, from_exclusive, to_inclusive, commitment_hash) + primary key (domain_idx, sender, counter_participant, from_exclusive, to_inclusive, commitment_hash) ); -create index idx_par_commitment_queue_by_time on par_commitment_queue (domain_id, to_inclusive); +create index idx_par_commitment_queue_by_time on par_commitment_queue (domain_idx, to_inclusive); -- the (current) domain parameters for the given domain create table par_static_domain_parameters ( @@ -414,42 +414,42 @@ create type pruning_phase as enum ('started', 'completed'); -- Maintains the latest timestamp (by domain) for which ACS pruning has started or finished create table par_active_contract_pruning ( - domain_id integer not null, + domain_idx integer not null, phase pruning_phase not null, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null, succeeded bigint null, - primary key (domain_id) + primary key (domain_idx) ); -- Maintains the latest timestamp (by domain) for which ACS commitment pruning has started or finished create table par_commitment_pruning ( - domain_id integer not null, + domain_idx integer not null, phase pruning_phase not null, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null, succeeded bigint null, - primary key (domain_id) + primary key (domain_idx) ); -- Maintains the latest timestamp (by domain) for which contract key journal pruning has started or finished create table par_contract_key_pruning ( - domain_id integer not null, + domain_idx integer not null, phase pruning_phase not null, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null, succeeded bigint null, - primary key (domain_id) + primary key (domain_idx) ); -- Maintains the latest timestamp (by sequencer client) for which the sequenced event store pruning has started or finished create table common_sequenced_event_store_pruning ( - domain_id integer not null, + domain_idx integer not null, phase pruning_phase not null, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null, succeeded bigint null, - primary key (domain_id) + primary key (domain_idx) ); -- table to contain the values provided by the domain to the mediator node for initialization. @@ -466,7 +466,7 @@ create table mediator_domain_configuration ( -- the last recorded head clean sequencer counter for each domain create table common_head_sequencer_counters ( -- discriminate between different users of the sequencer counter tracker tables - domain_id integer not null primary key, + domain_idx integer not null primary key, prehead_counter bigint not null, -- sequencer counter before the first unclean sequenced event -- UTC timestamp in microseconds relative to EPOCH ts bigint not null @@ -536,7 +536,7 @@ create table sequencer_events ( node_index smallint not null, -- single char to indicate the event type: D for deliver event, E for deliver error, R for deliver receipt event_type char(1) not null - constraint event_type_enum check (event_type IN ('D', 'E', 'R')), + constraint event_type_enum check (event_type in ('D', 'E', 'R')), message_id varchar null, sender integer null, -- null if event goes to everyone, otherwise specify member ids of recipients @@ -562,7 +562,7 @@ create table par_in_flight_submission ( submission_id varchar(300) null, - submission_domain varchar(300) not null, + submission_domain_id varchar(300) not null, message_id varchar(300) not null, -- Sequencer timestamp after which this submission will not be sequenced any more, in microsecond precision relative to EPOCH @@ -579,13 +579,13 @@ create table par_in_flight_submission ( -- Optional; omitted if other code paths ensure that an event is produced -- Must be null if sequencing_timeout is not set. tracking_data binary large object, - root_hash_hex varchar(300) DEFAULT NULL, + root_hash_hex varchar(300) default null, trace_context binary large object not null ); -CREATE INDEX idx_par_in_flight_submission_root_hash ON par_in_flight_submission (root_hash_hex); -create index idx_par_in_flight_submission_timeout on par_in_flight_submission (submission_domain, sequencing_timeout); -create index idx_par_in_flight_submission_sequencing on par_in_flight_submission (submission_domain, sequencing_time); -create index idx_par_in_flight_submission_message_id on par_in_flight_submission (submission_domain, message_id); +create index idx_par_in_flight_submission_root_hash on par_in_flight_submission (root_hash_hex); +create index idx_par_in_flight_submission_timeout on par_in_flight_submission (submission_domain_id, sequencing_timeout); +create index idx_par_in_flight_submission_sequencing on par_in_flight_submission (submission_domain_id, sequencing_time); +create index idx_par_in_flight_submission_message_id on par_in_flight_submission (submission_domain_id, message_id); create table par_settings( client integer primary key, -- dummy field to enforce at most one row @@ -643,7 +643,7 @@ create table sequencer_domain_configuration ( static_domain_parameters binary large object not null ); -CREATE TABLE common_pruning_schedules( +create table common_pruning_schedules( -- node_type is one of "MED", or "SEQ" -- since mediator and sequencer sometimes share the same db node_type varchar(3) not null primary key, @@ -653,34 +653,34 @@ CREATE TABLE common_pruning_schedules( ); -- Tables for new submission tracker -CREATE TABLE par_fresh_submitted_transaction ( - domain_id integer not null, +create table par_fresh_submitted_transaction ( + domain_idx integer not null, root_hash_hex varchar(300) not null, request_id bigint not null, max_sequencing_time bigint not null, - primary key (domain_id, root_hash_hex) + primary key (domain_idx, root_hash_hex) ); -CREATE TABLE par_fresh_submitted_transaction_pruning ( - domain_id integer not null, +create table par_fresh_submitted_transaction_pruning ( + domain_idx integer not null, phase pruning_phase not null, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null, succeeded bigint null, - primary key (domain_id) + primary key (domain_idx) ); -- pruning_schedules with pruning flag specific to participant pruning -CREATE TABLE par_pruning_schedules ( +create table par_pruning_schedules ( -- this lock column ensures that there can only ever be a single row: https://stackoverflow.com/questions/3967372/sql-server-how-to-constrain-a-table-to-contain-a-single-row lock char(1) not null default 'X' primary key check (lock = 'X'), cron varchar(300) not null, max_duration bigint not null, -- positive number of seconds retention bigint not null, -- positive number of seconds - prune_internally_only boolean NOT NULL DEFAULT false -- whether to prune only canton-internal stores not visible to ledger api + prune_internally_only boolean not null default false -- whether to prune only canton-internal stores not visible to ledger api ); -CREATE TABLE seq_in_flight_aggregation( +create table seq_in_flight_aggregation( aggregation_id varchar(300) not null primary key, -- UTC timestamp in microseconds relative to EPOCH max_sequencing_time bigint not null, @@ -688,9 +688,9 @@ CREATE TABLE seq_in_flight_aggregation( aggregation_rule binary large object not null ); -CREATE INDEX idx_seq_in_flight_aggregation_max_sequencing_time on seq_in_flight_aggregation(max_sequencing_time); +create index idx_seq_in_flight_aggregation_max_sequencing_time on seq_in_flight_aggregation(max_sequencing_time); -CREATE TABLE seq_in_flight_aggregated_sender( +create table seq_in_flight_aggregated_sender( aggregation_id varchar(300) not null, sender varchar(300) not null, -- UTC timestamp in microseconds relative to EPOCH @@ -701,7 +701,7 @@ CREATE TABLE seq_in_flight_aggregated_sender( ); -- stores the topology-x state transactions -CREATE TABLE common_topology_transactions ( +create table common_topology_transactions ( -- serial identifier used to preserve insertion order id bigserial not null primary key, -- the id of the store @@ -710,7 +710,7 @@ CREATE TABLE common_topology_transactions ( -- UTC timestamp in microseconds relative to EPOCH sequenced bigint not null, -- type of transaction (refer to TopologyMapping.Code) - transaction_type int not null, + transaction_type integer not null, -- the namespace this transaction is operating on namespace varchar(300) not null, -- the optional identifier this transaction is operating on (yields a uid together with namespace) @@ -722,7 +722,7 @@ CREATE TABLE common_topology_transactions ( mapping_key_hash varchar(300) not null, -- the serial_counter describes the change order within transactions of the same mapping_key_hash -- (redundant also embedded in instance) - serial_counter int not null, + serial_counter integer not null, -- validity window, UTC timestamp in microseconds relative to EPOCH -- so `TopologyChangeOp.Replace` transactions have an effect for valid_from < t <= valid_until -- a `TopologyChangeOp.Remove` will have valid_from = valid_until @@ -731,7 +731,7 @@ CREATE TABLE common_topology_transactions ( -- operation -- 1: Remove -- 2: Replace (upsert/merge semantics) - operation int not null, + operation integer not null, -- The raw transaction, serialized using the proto serializer. instance binary large object not null, -- The transaction hash, to uniquify and aid efficient lookups. @@ -752,7 +752,7 @@ CREATE TABLE common_topology_transactions ( unique (store_id, mapping_key_hash, serial_counter, valid_from, operation, representative_protocol_version, hash_of_signatures, tx_hash) ); -CREATE INDEX idx_common_topology_transactions ON common_topology_transactions (store_id, transaction_type, namespace, identifier, valid_until, valid_from); +create index idx_common_topology_transactions on common_topology_transactions (store_id, transaction_type, namespace, identifier, valid_until, valid_from); -- Stores the traffic balance updates create table seq_traffic_control_balance_updates ( diff --git a/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/dev/V999__dev.sql b/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/dev/V999__dev.sql index 92234088ac14..23617c66e877 100644 --- a/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/dev/V999__dev.sql +++ b/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/dev/V999__dev.sql @@ -3,4 +3,4 @@ -- This is a dummy column we are adding in order to test that adding dev version migrations -- works properly. DO NOT MOVE THIS TO STABLE -ALTER TABLE common_node_id ADD COLUMN test_column INT NOT NULL DEFAULT 0; +alter table common_node_id add column test_column int not null default 0; diff --git a/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql b/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql index 489739250518..7e7fac7ef25f 100644 --- a/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql +++ b/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql @@ -47,7 +47,7 @@ create table common_crypto_private_keys ( ); -- Store metadata information about KMS keys -CREATE TABLE common_kms_metadata_store ( +create table common_kms_metadata_store ( fingerprint varchar(300) collate "C" not null, kms_key_id varchar(300) collate "C" not null, purpose smallint not null, @@ -68,8 +68,8 @@ create table common_crypto_public_keys ( -- Stores the immutable contracts, however a creation of a contract can be rolled back. create table par_contracts ( - -- As a participant can be connected to multiple domains, the transactions are stored under a domain id. - domain_id integer not null, + -- As a participant can be connected to multiple domains, the transactions are stored under a domain. + domain_idx integer not null, contract_id varchar(300) collate "C" not null, -- The contract is serialized using the LF contract proto serializer. instance bytea not null, @@ -86,17 +86,17 @@ create table par_contracts ( package_id varchar(300) collate "C" not null, template_id varchar collate "C" not null, contract_salt bytea, - primary key (domain_id, contract_id) + primary key (domain_idx, contract_id) ); -- Index to speedup ContractStore.find --- domain_id comes first, because there is always a constraint on it. +-- domain_idx comes first, because there is always a constraint on it. -- package_id comes before template_id, because queries with package_id and without template_id make more sense than vice versa. --- contract_id is left out, because a query with domain_id and contract_id can be served with the primary key. -create index idx_par_contracts_find on par_contracts(domain_id, package_id, template_id); +-- contract_id is left out, because a query with domain_idx and contract_id can be served with the primary key. +create index idx_par_contracts_find on par_contracts(domain_idx, package_id, template_id); -- Partial index for pruning -create index idx_par_contracts_request_counter on par_contracts(domain_id, request_counter) where creating_transaction_id is null; +create index idx_par_contracts_request_counter on par_contracts(domain_idx, request_counter) where creating_transaction_id is null; -- provides a serial enumeration of static strings so we don't store the same string over and over in the db -- currently only storing uids @@ -106,7 +106,7 @@ create table common_static_strings ( -- the expression string varchar(300) collate "C" not null, -- the source (what kind of string are we storing here) - source int NOT NULL, + source integer not null, unique(string, source) ); @@ -155,7 +155,7 @@ create type operation_type as enum ('create', 'add', 'assign', 'archive', 'purge -- Maintains the status of contracts create table par_active_contracts ( -- As a participant can be connected to multiple domains, the active contracts are stored under a domain id. - domain_id int not null, + domain_idx integer not null, contract_id varchar(300) collate "C" not null, change change_type not null, operation operation_type not null, @@ -164,34 +164,34 @@ create table par_active_contracts ( -- Request counter of the time of change request_counter bigint not null, -- optional remote domain index in case of reassignments - remote_domain_idx int, + remote_domain_idx integer, reassignment_counter bigint default null, - primary key (domain_id, contract_id, ts, request_counter, change) + primary key (domain_idx, contract_id, ts, request_counter, change) ); -CREATE index idx_par_active_contracts_dirty_request_reset ON par_active_contracts (domain_id, request_counter); -CREATE index idx_par_active_contracts_contract_id ON par_active_contracts (contract_id); -CREATE index idx_par_active_contracts_ts_domain_id ON par_active_contracts (ts, domain_id); -CREATE INDEX idx_par_active_contracts_pruning on par_active_contracts (domain_id, ts) WHERE change = 'deactivation'; +create index idx_par_active_contracts_dirty_request_reset on par_active_contracts (domain_idx, request_counter); +create index idx_par_active_contracts_contract_id on par_active_contracts (contract_id); +create index idx_par_active_contracts_ts_domain_idx on par_active_contracts (ts, domain_idx); +create index idx_par_active_contracts_pruning on par_active_contracts (domain_idx, ts) where change = 'deactivation'; -- Tables for new submission tracker -CREATE TABLE par_fresh_submitted_transaction ( - domain_id integer not null, +create table par_fresh_submitted_transaction ( + domain_idx integer not null, root_hash_hex varchar(300) collate "C" not null, request_id bigint not null, max_sequencing_time bigint not null, - primary key (domain_id, root_hash_hex) + primary key (domain_idx, root_hash_hex) ); create type pruning_phase as enum ('started', 'completed'); -CREATE TABLE par_fresh_submitted_transaction_pruning ( - domain_id integer not null, +create table par_fresh_submitted_transaction_pruning ( + domain_idx integer not null, phase pruning_phase not null, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null, succeeded bigint null, - primary key (domain_id) + primary key (domain_idx) ); create table med_response_aggregations ( @@ -207,11 +207,11 @@ create table med_response_aggregations ( -- Stores the received sequencer messages create table common_sequenced_events ( -- discriminate between different users of the sequenced events tables - domain_id integer not null, + domain_idx integer not null, -- Proto serialized signed message sequenced_event bytea not null, -- Explicit fields to query the messages, which are stored as blobs - type varchar(3) collate "C" not null check(type IN ('del', 'err', 'ign')), + type varchar(3) collate "C" not null check(type in ('del', 'err', 'ign')), -- Timestamp of the time of change in microsecond precision relative to EPOCH ts bigint not null, -- Sequencer counter of the time of change @@ -221,22 +221,22 @@ create table common_sequenced_events ( -- flag to skip problematic events ignore boolean not null, -- The sequencer ensures that the timestamp is unique - primary key (domain_id, ts) + primary key (domain_idx, ts) ); -create unique index idx_common_sequenced_events_sequencer_counter on common_sequenced_events(domain_id, sequencer_counter); +create unique index idx_common_sequenced_events_sequencer_counter on common_sequenced_events(domain_idx, sequencer_counter); -- Track what send requests we've made but have yet to observe being sequenced. -- If events are not observed by the max sequencing time we know that the send will never be processed. create table sequencer_client_pending_sends ( - -- domain id for distinguishing between different sequencer clients in the same node - domain_id integer not null, + -- domain (index) for distinguishing between different sequencer clients in the same node + domain_idx integer not null, -- the message id of the send being tracked (expected to be unique for the sequencer client while the send is in-flight) message_id varchar(300) collate "C" not null, -- the message id should be unique for the sequencer client - primary key (domain_id, message_id), + primary key (domain_idx, message_id), -- the max sequencing time of the send request (UTC timestamp in microseconds relative to EPOCH) max_sequencing_time bigint not null @@ -245,7 +245,7 @@ create table sequencer_client_pending_sends ( create table par_domain_connection_configs( domain_alias varchar(300) collate "C" not null primary key, config bytea, -- the protobuf-serialized versioned domain connection config - status CHAR(1) DEFAULT 'A' NOT NULL + status char(1) default 'A' not null ); -- used to register all domains that a participant connects to @@ -256,16 +256,16 @@ create table par_domains( alias varchar(300) collate "C" not null unique, -- domain node id domain_id varchar(300) collate "C" not null unique, - status CHAR(1) DEFAULT 'A' NOT NULL, + status char(1) default 'A' not null, unique (alias, domain_id) ); create table par_reassignments ( -- reassignment id - target_domain varchar(300) collate "C" not null, - origin_domain varchar(300) collate "C" not null, + target_domain_idx integer not null, + source_domain_idx integer not null, - primary key (target_domain, origin_domain, unassignment_timestamp), + primary key (target_domain_idx, source_domain_idx, unassignment_timestamp), unassignment_global_offset bigint, assignment_global_offset bigint, @@ -285,12 +285,12 @@ create table par_reassignments ( time_of_completion_request_counter bigint, -- UTC timestamp in microseconds relative to EPOCH time_of_completion_timestamp bigint, - source_protocol_version integer NOT NULL + source_protocol_version integer not null ); -- stores all requests for the request journal create table par_journal_requests ( - domain_id integer not null, + domain_idx integer not null, request_counter bigint not null, request_state_index smallint not null, -- UTC timestamp in microseconds relative to EPOCH @@ -299,14 +299,14 @@ create table par_journal_requests ( -- is set only if the request is clean commit_time bigint, repair_context varchar(300) collate "C", -- only set on manual repair requests outside of sync protocol - primary key (domain_id, request_counter) + primary key (domain_idx, request_counter) ); -create index idx_journal_request_timestamp on par_journal_requests (domain_id, request_timestamp); -create index idx_journal_request_commit_time on par_journal_requests (domain_id, commit_time); +create index idx_journal_request_timestamp on par_journal_requests (domain_idx, request_timestamp); +create index idx_journal_request_commit_time on par_journal_requests (domain_idx, commit_time); -- locally computed ACS commitments to a specific period, counter-participant and domain create table par_computed_acs_commitments ( - domain_id int not null, + domain_idx integer not null, counter_participant varchar(300) collate "C" not null, -- UTC timestamp in microseconds relative to EPOCH from_exclusive bigint not null, @@ -314,13 +314,13 @@ create table par_computed_acs_commitments ( to_inclusive bigint not null, -- the "raw" cryptographic commitment (AcsCommitment.CommitmentType) in its serialized format commitment bytea not null, - primary key (domain_id, counter_participant, from_exclusive, to_inclusive), + primary key (domain_idx, counter_participant, from_exclusive, to_inclusive), constraint check_nonempty_interval_computed check(to_inclusive > from_exclusive) ); -- ACS commitments received from counter-participants create table par_received_acs_commitments ( - domain_id int not null, + domain_idx integer not null, -- the counter-participant who sent the commitment sender varchar(300) collate "C" not null, -- UTC timestamp in microseconds relative to EPOCH @@ -332,11 +332,11 @@ create table par_received_acs_commitments ( constraint check_to_after_from check(to_inclusive > from_exclusive) ); -create index idx_par_full_commitment on par_received_acs_commitments (domain_id, sender, from_exclusive, to_inclusive); +create index idx_par_full_commitment on par_received_acs_commitments (domain_idx, sender, from_exclusive, to_inclusive); -- the participants whose remote commitments are outstanding create table par_outstanding_acs_commitments ( - domain_id int not null, + domain_idx integer not null, -- UTC timestamp in microseconds relative to EPOCH from_exclusive bigint not null, -- UTC timestamp in microseconds relative to EPOCH @@ -346,40 +346,40 @@ create table par_outstanding_acs_commitments ( constraint check_nonempty_interval_outstanding check(to_inclusive > from_exclusive) ); -create unique index idx_par_acs_unique_interval on par_outstanding_acs_commitments(domain_id, counter_participant, from_exclusive, to_inclusive); +create unique index idx_par_acs_unique_interval on par_outstanding_acs_commitments(domain_idx, counter_participant, from_exclusive, to_inclusive); -create index idx_par_outstanding_acs_commitments_by_time on par_outstanding_acs_commitments (domain_id, from_exclusive); +create index idx_par_outstanding_acs_commitments_by_time on par_outstanding_acs_commitments (domain_idx, from_exclusive); -- the last timestamp for which the commitments have been computed, stored and sent create table par_last_computed_acs_commitments ( - domain_id int primary key, + domain_idx integer primary key, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null ); -- Stores the snapshot ACS commitments (per stakeholder set) create table par_commitment_snapshot ( - domain_id int not null, + domain_idx integer not null, -- A stable reference to a stakeholder set, that doesn't rely on the Protobuf encoding being deterministic -- a hex-encoded hash (not binary so that hash can be indexed in all db server types) stakeholders_hash varchar(300) collate "C" not null, stakeholders bytea not null, commitment bytea not null, - primary key (domain_id, stakeholders_hash) + primary key (domain_idx, stakeholders_hash) ); -- Stores the time (along with a tie-breaker) of the ACS commitment snapshot create table par_commitment_snapshot_time ( - domain_id int not null, + domain_idx integer not null, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null, tie_breaker bigint not null, - primary key (domain_id) + primary key (domain_idx) ); -- Remote commitments that were received but could not yet be checked because the local participant is lagging behind create table par_commitment_queue ( - domain_id int not null, + domain_idx integer not null, sender varchar(300) collate "C" not null, counter_participant varchar(300) collate "C" not null, -- UTC timestamp in microseconds relative to EPOCH @@ -389,10 +389,10 @@ create table par_commitment_queue ( commitment bytea not null, commitment_hash varchar(300) collate "C" not null, -- A shorter hash (SHA-256) of the commitment for the primary key instead of the bytea constraint check_nonempty_interval_queue check(to_inclusive > from_exclusive), - primary key (domain_id, sender, counter_participant, from_exclusive, to_inclusive, commitment_hash) + primary key (domain_idx, sender, counter_participant, from_exclusive, to_inclusive, commitment_hash) ); -create index idx_par_commitment_queue_by_time on par_commitment_queue (domain_id, to_inclusive); +create index idx_par_commitment_queue_by_time on par_commitment_queue (domain_idx, to_inclusive); -- the (current) domain parameters for the given domain create table par_static_domain_parameters ( @@ -429,42 +429,42 @@ create table seq_block_height ( -- Maintains the latest timestamp (by domain) for which ACS pruning has started or finished create table par_active_contract_pruning ( - domain_id integer not null, + domain_idx integer not null, phase pruning_phase not null, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null, succeeded bigint null, - primary key (domain_id) + primary key (domain_idx) ); -- Maintains the latest timestamp (by domain) for which ACS commitment pruning has started or finished create table par_commitment_pruning ( - domain_id integer not null, + domain_idx integer not null, phase pruning_phase not null, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null, succeeded bigint null, - primary key (domain_id) + primary key (domain_idx) ); -- Maintains the latest timestamp (by domain) for which contract key journal pruning has started or finished create table par_contract_key_pruning ( - domain_id integer not null, + domain_idx integer not null, phase pruning_phase not null, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null, succeeded bigint null, - primary key (domain_id) + primary key (domain_idx) ); -- Maintains the latest timestamp (by sequencer client) for which the sequenced event store pruning has started or finished create table common_sequenced_event_store_pruning ( - domain_id integer not null, + domain_idx integer not null, phase pruning_phase not null, -- UTC timestamp in microseconds relative to EPOCH ts bigint not null, succeeded bigint null, - primary key (domain_id) + primary key (domain_idx) ); -- table to contain the values provided by the domain to the mediator node for initialization. @@ -481,7 +481,7 @@ create table mediator_domain_configuration ( -- the last recorded head clean sequencer counter for each domain create table common_head_sequencer_counters ( -- discriminate between different users of the sequencer counter tracker tables - domain_id integer not null primary key, + domain_idx integer not null primary key, prehead_counter bigint not null, -- sequencer counter before the first unclean sequenced event -- UTC timestamp in microseconds relative to EPOCH ts bigint not null @@ -556,7 +556,7 @@ create table sequencer_events ( node_index smallint not null, -- single char to indicate the event type: D for deliver event, E for deliver error, R for deliver receipt event_type char(1) not null - constraint event_type_enum check (event_type IN ('D', 'E', 'R')), + constraint event_type_enum check (event_type in ('D', 'E', 'R')), message_id varchar(300) collate "C" null, sender integer null, -- null if event goes to everyone, otherwise specify member ids of recipients @@ -575,13 +575,13 @@ create table sequencer_events ( create sequence participant_event_publisher_local_offsets minvalue 0 start with 0; -- pruning_schedules with pruning flag specific to participant pruning -CREATE TABLE par_pruning_schedules ( +create table par_pruning_schedules ( -- this lock column ensures that there can only ever be a single row: https://stackoverflow.com/questions/3967372/sql-server-how-to-constrain-a-table-to-contain-a-single-row lock char(1) not null default 'X' primary key check (lock = 'X'), cron varchar(300) collate "C" not null, max_duration bigint not null, -- positive number of seconds retention bigint not null, -- positive number of seconds - prune_internally_only boolean NOT NULL DEFAULT false -- whether to prune only canton-internal stores not visible to ledger api + prune_internally_only boolean not null default false -- whether to prune only canton-internal stores not visible to ledger api ); -- store in-flight submissions @@ -591,7 +591,7 @@ create table par_in_flight_submission ( submission_id varchar(300) collate "C" null, - submission_domain varchar(300) collate "C" not null, + submission_domain_id varchar(300) collate "C" not null, message_id varchar(300) collate "C" not null, -- Sequencer timestamp after which this submission will not be sequenced any more, in microsecond precision relative to EPOCH @@ -610,15 +610,15 @@ create table par_in_flight_submission ( tracking_data bytea, -- Add root hash to in-flight submission tracker store - root_hash_hex varchar(300) collate "C" DEFAULT NULL, + root_hash_hex varchar(300) collate "C" default null, trace_context bytea not null ); -create index idx_par_in_flight_submission_root_hash ON par_in_flight_submission (root_hash_hex); -create index idx_par_in_flight_submission_timeout on par_in_flight_submission (submission_domain, sequencing_timeout); -create index idx_par_in_flight_submission_sequencing on par_in_flight_submission (submission_domain, sequencing_time); -create index idx_par_in_flight_submission_message_id on par_in_flight_submission (submission_domain, message_id); +create index idx_par_in_flight_submission_root_hash on par_in_flight_submission (root_hash_hex); +create index idx_par_in_flight_submission_timeout on par_in_flight_submission (submission_domain_id, sequencing_timeout); +create index idx_par_in_flight_submission_sequencing on par_in_flight_submission (submission_domain_id, sequencing_time); +create index idx_par_in_flight_submission_message_id on par_in_flight_submission (submission_domain_id, message_id); create table par_settings( client integer primary key, -- dummy field to enforce at most one row @@ -684,7 +684,7 @@ create table mediator_deduplication_store ( ); create index idx_mediator_deduplication_store_expire_after on mediator_deduplication_store(expire_after, mediator_id); -CREATE TABLE common_pruning_schedules( +create table common_pruning_schedules( -- node_type is one of "MED", or "SEQ" -- since mediator and sequencer sometimes share the same db node_type varchar(3) collate "C" not null primary key, @@ -693,7 +693,7 @@ CREATE TABLE common_pruning_schedules( retention bigint not null -- positive number of seconds ); -CREATE TABLE seq_in_flight_aggregation( +create table seq_in_flight_aggregation( aggregation_id varchar(300) collate "C" not null primary key, -- UTC timestamp in microseconds relative to EPOCH max_sequencing_time bigint not null, @@ -701,9 +701,9 @@ CREATE TABLE seq_in_flight_aggregation( aggregation_rule bytea not null ); -CREATE INDEX idx_seq_in_flight_aggregation_max_sequencing_time on seq_in_flight_aggregation(max_sequencing_time); +create index idx_seq_in_flight_aggregation_max_sequencing_time on seq_in_flight_aggregation(max_sequencing_time); -CREATE TABLE seq_in_flight_aggregated_sender( +create table seq_in_flight_aggregated_sender( aggregation_id varchar(300) collate "C" not null, sender varchar(300) collate "C" not null, -- UTC timestamp in microseconds relative to EPOCH @@ -714,7 +714,7 @@ CREATE TABLE seq_in_flight_aggregated_sender( ); -- stores the topology-x state transactions -CREATE TABLE common_topology_transactions ( +create table common_topology_transactions ( -- serial identifier used to preserve insertion order id bigserial not null primary key, -- the id of the store @@ -723,7 +723,7 @@ CREATE TABLE common_topology_transactions ( -- UTC timestamp in microseconds relative to EPOCH sequenced bigint not null, -- type of transaction (refer to TopologyMapping.Code) - transaction_type int not null, + transaction_type integer not null, -- the namespace this transaction is operating on namespace varchar(300) collate "C" not null, -- the optional identifier this transaction is operating on (yields a uid together with namespace) @@ -735,7 +735,7 @@ CREATE TABLE common_topology_transactions ( mapping_key_hash varchar(300) collate "C" not null, -- the serial_counter describes the change order within transactions of the same mapping_key_hash -- (redundant also embedded in instance) - serial_counter int not null, + serial_counter integer not null, -- validity window, UTC timestamp in microseconds relative to EPOCH -- so `TopologyChangeOp.Replace` transactions have an effect for valid_from < t <= valid_until -- a `TopologyChangeOp.Remove` will have valid_from = valid_until @@ -744,7 +744,7 @@ CREATE TABLE common_topology_transactions ( -- operation -- 1: Remove -- 2: Replace (upsert/merge semantics) - operation int not null, + operation integer not null, -- The raw transaction, serialized using the proto serializer. instance bytea not null, -- The transaction hash, to uniquify and aid efficient lookups. @@ -764,7 +764,7 @@ CREATE TABLE common_topology_transactions ( -- index used for idempotency during crash recovery unique (store_id, mapping_key_hash, serial_counter, valid_from, operation, representative_protocol_version, hash_of_signatures, tx_hash) ); -CREATE INDEX idx_common_topology_transactions ON common_topology_transactions (store_id, transaction_type, namespace, identifier, valid_until, valid_from); +create index idx_common_topology_transactions on common_topology_transactions (store_id, transaction_type, namespace, identifier, valid_until, valid_from); -- Stores the traffic purchased entry updates create table seq_traffic_control_balance_updates ( diff --git a/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sql b/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sql index dc7e63b1cf16..05ff00fd1236 100644 --- a/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sql +++ b/sdk/canton/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_2__initial_views.sql @@ -154,7 +154,7 @@ create or replace view debug.common_crypto_public_keys as create or replace view debug.par_contracts as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, contract_id, instance, metadata, @@ -190,17 +190,18 @@ create or replace view debug.common_topology_dispatching as create or replace view debug.par_active_contracts as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, contract_id, change, operation, debug.canton_timestamp(ts) as ts, request_counter, - debug.resolve_common_static_string(remote_domain_idx) as remote_domain_idx, reassignment_counter + debug.resolve_common_static_string(remote_domain_idx) as remote_domain_idx, + reassignment_counter from par_active_contracts; create or replace view debug.par_fresh_submitted_transaction as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, root_hash_hex, debug.canton_timestamp(request_id) as request_id, debug.canton_timestamp(max_sequencing_time) as max_sequencing_time @@ -208,7 +209,7 @@ create or replace view debug.par_fresh_submitted_transaction as create or replace view debug.par_fresh_submitted_transaction_pruning as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, phase, debug.canton_timestamp(ts) as ts, debug.canton_timestamp(succeeded) as succeeded @@ -225,7 +226,7 @@ create or replace view debug.med_response_aggregations as create or replace view debug.common_sequenced_events as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, sequenced_event, type, debug.canton_timestamp(ts) as ts, @@ -236,7 +237,7 @@ create or replace view debug.common_sequenced_events as create or replace view debug.sequencer_client_pending_sends as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, message_id, debug.canton_timestamp(max_sequencing_time) as max_sequencing_time from sequencer_client_pending_sends; @@ -258,8 +259,8 @@ create or replace view debug.par_domains as create or replace view debug.par_reassignments as select - target_domain, - origin_domain, + debug.resolve_common_static_string(target_domain_idx) as target_domain_idx, + debug.resolve_common_static_string(source_domain_idx) as source_domain_idx, unassignment_global_offset, assignment_global_offset, debug.canton_timestamp(unassignment_timestamp) as unassignment_timestamp, @@ -277,7 +278,7 @@ create or replace view debug.par_reassignments as create or replace view debug.par_journal_requests as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, request_counter, request_state_index, debug.canton_timestamp(request_timestamp) as request_timestamp, @@ -287,7 +288,7 @@ create or replace view debug.par_journal_requests as create or replace view debug.par_computed_acs_commitments as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, counter_participant, debug.canton_timestamp(from_exclusive) as from_exclusive, debug.canton_timestamp(to_inclusive) as to_inclusive, @@ -297,7 +298,7 @@ create or replace view debug.par_computed_acs_commitments as create or replace view debug.par_received_acs_commitments as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, sender, debug.canton_timestamp(from_exclusive) as from_exclusive, debug.canton_timestamp(to_inclusive) as to_inclusive, @@ -306,7 +307,7 @@ create or replace view debug.par_received_acs_commitments as create or replace view debug.par_outstanding_acs_commitments as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, counter_participant, debug.canton_timestamp(from_exclusive) as from_exclusive, debug.canton_timestamp(to_inclusive) as to_inclusive, @@ -315,13 +316,13 @@ create or replace view debug.par_outstanding_acs_commitments as create or replace view debug.par_last_computed_acs_commitments as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, debug.canton_timestamp(ts) as ts from par_last_computed_acs_commitments; create or replace view debug.par_commitment_snapshot as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, stakeholders_hash, stakeholders, commitment @@ -329,14 +330,14 @@ create or replace view debug.par_commitment_snapshot as create or replace view debug.par_commitment_snapshot_time as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, debug.canton_timestamp(ts) as ts, tie_breaker from par_commitment_snapshot_time; create or replace view debug.par_commitment_queue as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, sender, counter_participant, debug.canton_timestamp(from_exclusive) as from_exclusive, @@ -367,7 +368,7 @@ create or replace view debug.seq_block_height as create or replace view debug.par_active_contract_pruning as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, phase, debug.canton_timestamp(ts) as ts, debug.canton_timestamp(succeeded) as succeeded @@ -375,7 +376,7 @@ create or replace view debug.par_active_contract_pruning as create or replace view debug.par_commitment_pruning as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, phase, debug.canton_timestamp(ts) as ts, debug.canton_timestamp(succeeded) as succeeded @@ -383,7 +384,7 @@ create or replace view debug.par_commitment_pruning as create or replace view debug.par_contract_key_pruning as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, phase, debug.canton_timestamp(ts) as ts, debug.canton_timestamp(succeeded) as succeeded @@ -391,7 +392,7 @@ create or replace view debug.par_contract_key_pruning as create or replace view debug.common_sequenced_event_store_pruning as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, phase, debug.canton_timestamp(ts) as ts, debug.canton_timestamp(succeeded) as succeeded @@ -407,7 +408,7 @@ create or replace view debug.mediator_domain_configuration as create or replace view debug.common_head_sequencer_counters as select - debug.resolve_common_static_string(domain_id) as domain_id, + debug.resolve_common_static_string(domain_idx) as domain_idx, prehead_counter, debug.canton_timestamp(ts) as ts from common_head_sequencer_counters; @@ -481,7 +482,7 @@ create or replace view debug.par_in_flight_submission as select change_id_hash, submission_id, - submission_domain, + submission_domain_id, message_id, debug.canton_timestamp(sequencing_timeout) as sequencing_timeout, sequencer_counter, diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbBulkUpdateProcessor.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbBulkUpdateProcessor.scala index 304fc55a2902..fd32fbdfb5d0 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbBulkUpdateProcessor.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbBulkUpdateProcessor.scala @@ -124,17 +124,16 @@ trait DbBulkUpdateProcessor[A, B] extends BatchAggregator.Processor[A, Try[B]] { case None => Future.unit case Some(toCheckNE) => val ids = toCheckNE.map(x => itemIdentifier(x.target.value)) - val lookupQueries = checkQuery(ids) - storage.sequentialQueryAndCombine(lookupQueries, s"$queryBaseName lookup").map { - foundDatas => - val foundData = foundDatas.map(data => dataIdentifier(data) -> data).toMap - toCheck.foreach { case BulkUpdatePendingCheck(item, cell) => - val response = - analyzeFoundData(item.value, foundData.get(itemIdentifier(item.value)))( - item.traceContext - ) - cell.putIfAbsent(response).discard[Option[Try[B]]] - } + val lookupQuery = checkQuery(ids) + storage.query(lookupQuery, s"$queryBaseName lookup").map { foundDatas => + val foundData = foundDatas.map(data => dataIdentifier(data) -> data).toMap + toCheck.foreach { case BulkUpdatePendingCheck(item, cell) => + val response = + analyzeFoundData(item.value, foundData.get(itemIdentifier(item.value)))( + item.traceContext + ) + cell.putIfAbsent(response).discard[Option[Try[B]]] + } } } @@ -153,7 +152,7 @@ trait DbBulkUpdateProcessor[A, B] extends BatchAggregator.Processor[A, Try[B]] { /** A list of queries for the items that we want to check for */ protected def checkQuery(itemsToCheck: NonEmpty[Seq[ItemIdentifier]])(implicit batchTraceContext: TraceContext - ): immutable.Iterable[DbAction.ReadOnly[immutable.Iterable[CheckData]]] + ): DbAction.ReadOnly[immutable.Iterable[CheckData]] /** Compare the item against the data that was found in the store and produce a result. * It is called for each item that the update command returned an update counter not equal to 1. diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbPruningSchedulerStore.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbPruningSchedulerStore.scala index 5411c9df7da1..9bda23f6bcf9 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbPruningSchedulerStore.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbPruningSchedulerStore.scala @@ -39,7 +39,7 @@ final class DbPruningSchedulerStore( on conflict (node_type) do update set cron = ${schedule.cron}, max_duration = ${schedule.maxDuration}, retention = ${schedule.retention} """ - case _: Profile.Oracle | _: Profile.H2 => + case _: Profile.H2 => sqlu"""merge into common_pruning_schedules using dual on (node_type = $nodeCode) when matched then diff --git a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbSendTrackerStore_Unused.scala b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbSendTrackerStore_Unused.scala index a452ab931b24..a744327a6695 100644 --- a/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbSendTrackerStore_Unused.scala +++ b/sdk/canton/community/common/src/main/scala/com/digitalasset/canton/store/db/DbSendTrackerStore_Unused.scala @@ -34,17 +34,9 @@ class DbSendTrackerStore_Unused( for { rowsUpdated <- EitherT.right( storage.update( - storage.profile match { - case _: DbStorage.Profile.Oracle => - sqlu"""insert - /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( sequencer_client_pending_sends ( message_id, domain_id ) ) */ - into sequencer_client_pending_sends (domain_id, message_id, max_sequencing_time) - values ($indexedDomain, $messageId, $maxSequencingTime)""" - case _ => - sqlu"""insert into sequencer_client_pending_sends (domain_id, message_id, max_sequencing_time) - values ($indexedDomain, $messageId, $maxSequencingTime) - on conflict do nothing""" - }, + sqlu"""insert into sequencer_client_pending_sends (domain_idx, message_id, max_sequencing_time) + values ($indexedDomain, $messageId, $maxSequencingTime) + on conflict do nothing""", operationName = s"${this.getClass}: save pending send", ) ) @@ -56,7 +48,7 @@ class DbSendTrackerStore_Unused( EitherT(for { existingMaxSequencingTimeO <- storage.query( sql"""select max_sequencing_time from sequencer_client_pending_sends - where domain_id = $indexedDomain and message_id = $messageId""" + where domain_idx = $indexedDomain and message_id = $messageId""" .as[CantonTimestamp] .headOption, functionFullName, @@ -76,7 +68,7 @@ class DbSendTrackerStore_Unused( ): Future[Map[MessageId, CantonTimestamp]] = for { items <- storage.query( - sql"select message_id, max_sequencing_time from sequencer_client_pending_sends where domain_id = $indexedDomain" + sql"select message_id, max_sequencing_time from sequencer_client_pending_sends where domain_idx = $indexedDomain" .as[(MessageId, CantonTimestamp)], functionFullName, ) @@ -86,7 +78,7 @@ class DbSendTrackerStore_Unused( messageId: MessageId )(implicit traceContext: TraceContext): Future[Unit] = storage.update_( - sqlu"delete from sequencer_client_pending_sends where domain_id = $indexedDomain and message_id = $messageId", + sqlu"delete from sequencer_client_pending_sends where domain_idx = $indexedDomain and message_id = $messageId", functionFullName, ) diff --git a/sdk/canton/community/common/src/test/resources/logback-test.xml b/sdk/canton/community/common/src/test/resources/logback-test.xml index c7398dee1a04..840d6edaf3fe 100644 --- a/sdk/canton/community/common/src/test/resources/logback-test.xml +++ b/sdk/canton/community/common/src/test/resources/logback-test.xml @@ -57,8 +57,6 @@ - - diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/Generators.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/Generators.scala index 51491b3c1a40..89683fe87082 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/Generators.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/Generators.scala @@ -37,7 +37,7 @@ object Generators { def lengthLimitedStringGen[A <: AbstractLengthLimitedString]( companion: LengthLimitedStringCompanion[A] ): Gen[A] = for { - length <- Gen.choose(1, companion.maxLength) + length <- Gen.choose(1, companion.maxLength.unwrap) str <- Gen.stringOfN(length, Gen.alphaNumChar) } yield companion.tryCreate(str) diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/SequentialTestByKey.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/SequentialTestByKey.scala index ef660bc1d8d2..c456bfb316d4 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/SequentialTestByKey.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/SequentialTestByKey.scala @@ -43,5 +43,4 @@ object TestSemaphoreUtil { // pre-defined semaphore keys here val SEQUENCER_DB_H2 = Some("sequencer-db-h2") val SEQUENCER_DB_PG = Some("sequencer-db-pg") - val SEQUENCER_DB_ORACLE = Some("sequencer-db-oracle") } diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DatabaseDeadlockTest.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DatabaseDeadlockTest.scala index 86f9d1339fd7..db9ffbebdf87 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DatabaseDeadlockTest.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DatabaseDeadlockTest.scala @@ -154,8 +154,6 @@ trait DatabaseDeadlockTest def assertSQLException(body: => Try[_]): Assertion = forAtLeast(1, 0 until roundsNegative) { _ => - // Note that we can also hit spurious constraint violation errors here, as the query may be MERGE (see UpsertTestOracle). - // This is no problem as long as there is at least one deadlock. inside(body) { case Failure(e: SQLException) => assertDeadlock(e) } diff --git a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DbIndexedStringsStoreTest.scala b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DbIndexedStringsStoreTest.scala index f36d3293978e..4e44adabbf9b 100644 --- a/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DbIndexedStringsStoreTest.scala +++ b/sdk/canton/community/common/src/test/scala/com/digitalasset/canton/store/db/DbIndexedStringsStoreTest.scala @@ -24,12 +24,8 @@ trait DbIndexedStringsStoreTest override def cleanDb(storage: DbStorage): Future[Unit] = { import storage.api.* - val query = storage.profile match { - case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 => - sqlu"truncate table common_static_strings restart identity" - case _: DbStorage.Profile.Oracle => - sqlu"truncate table common_static_strings" - } + val query = + sqlu"truncate table common_static_strings restart identity" storage.update( DBIO.seq(query), functionFullName, diff --git a/sdk/canton/community/demo/src/main/daml/ai-analysis/daml.yaml b/sdk/canton/community/demo/src/main/daml/ai-analysis/daml.yaml index fd6a7abdd7ee..24d34d8189e8 100644 --- a/sdk/canton/community/demo/src/main/daml/ai-analysis/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/ai-analysis/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --target=2.1 name: ai-analysis diff --git a/sdk/canton/community/demo/src/main/daml/bank/daml.yaml b/sdk/canton/community/demo/src/main/daml/bank/daml.yaml index 12a76ab4e1a8..da6fa31e0da0 100644 --- a/sdk/canton/community/demo/src/main/daml/bank/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/bank/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --target=2.1 name: bank diff --git a/sdk/canton/community/demo/src/main/daml/doctor/daml.yaml b/sdk/canton/community/demo/src/main/daml/doctor/daml.yaml index de5840e96b97..fb5af6838893 100644 --- a/sdk/canton/community/demo/src/main/daml/doctor/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/doctor/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --target=2.1 name: doctor diff --git a/sdk/canton/community/demo/src/main/daml/health-insurance/daml.yaml b/sdk/canton/community/demo/src/main/daml/health-insurance/daml.yaml index 62428fb29663..023ab9670f10 100644 --- a/sdk/canton/community/demo/src/main/daml/health-insurance/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/health-insurance/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --target=2.1 name: health-insurance diff --git a/sdk/canton/community/demo/src/main/daml/medical-records/daml.yaml b/sdk/canton/community/demo/src/main/daml/medical-records/daml.yaml index b821b097ac5a..7985fbe56a84 100644 --- a/sdk/canton/community/demo/src/main/daml/medical-records/daml.yaml +++ b/sdk/canton/community/demo/src/main/daml/medical-records/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --target=2.1 name: medical-records diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/LedgerBlockEvent.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/LedgerBlockEvent.scala index 7ed32e661e09..4f3591e93790 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/LedgerBlockEvent.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/LedgerBlockEvent.scala @@ -24,9 +24,7 @@ import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{LfTimestamp, ProtoDeserializationError} import com.google.protobuf.ByteString -/** Trait that generalizes over the kind of events that could be observed in a blockchain integration. - * - * Used by Ethereum and Fabric. +/** Trait that generalizes over the kind of events that could be observed from a [[com.digitalasset.canton.domain.sequencing.sequencer.block.BlockOrderer]]. */ sealed trait LedgerBlockEvent extends Product with Serializable @@ -38,7 +36,8 @@ object LedgerBlockEvent extends HasLoggerName { originalPayloadSize: Int = 0, // default is 0 for testing as this value is only used for metrics ) extends LedgerBlockEvent { - lazy val signedSubmissionRequest = signedOrderingRequest.signedSubmissionRequest + lazy val signedSubmissionRequest: SignedContent[SubmissionRequest] = + signedOrderingRequest.signedSubmissionRequest } final case class Acknowledgment(request: SignedContent[AcknowledgeRequest]) extends LedgerBlockEvent @@ -105,4 +104,8 @@ object LedgerBlockEvent extends HasLoggerName { ) } -final case class BlockEvents(height: Long, events: Seq[Traced[LedgerBlockEvent]]) +final case class BlockEvents( + height: Long, + events: Seq[Traced[LedgerBlockEvent]], + tickTopology: Boolean, +) diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/db/DbSequencerBlockStore.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/db/DbSequencerBlockStore.scala index 5f12065a1568..cc38a8cf2acb 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/db/DbSequencerBlockStore.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/db/DbSequencerBlockStore.scala @@ -21,7 +21,6 @@ import com.digitalasset.canton.domain.sequencing.sequencer.{ SequencerInitialState, } import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.DbStorage.Profile.{H2, Oracle, Postgres} import com.digitalasset.canton.resource.IdempotentInsert.insertVerifyingConflicts import com.digitalasset.canton.resource.{DbStorage, DbStore} import com.digitalasset.canton.tracing.TraceContext @@ -77,13 +76,9 @@ class DbSequencerBlockStore( ) private def safeWaterMarkDBIO: DBIOAction[Option[CantonTimestamp], NoStream, Effect.Read] = { - val query = storage.profile match { - case _: H2 | _: Postgres => - // TODO(#18401): Below only works for a single instance database sequencer - sql"select min(watermark_ts) from sequencer_watermarks" - case _: Oracle => - sql"select min(watermark_ts) from sequencer_watermarks" - } + val query = + // TODO(#18401): Below only works for a single instance database sequencer + sql"select min(watermark_ts) from sequencer_watermarks" // `min` may return null that is wrapped into None query.as[Option[CantonTimestamp]].headOption.map(_.flatten) } @@ -154,10 +149,9 @@ class DbSequencerBlockStore( private def updateBlockHeightDBIO(block: BlockInfo)(implicit traceContext: TraceContext) = insertVerifyingConflicts( - storage, - "seq_block_height ( height )", - sql"""seq_block_height (height, latest_event_ts, latest_sequencer_event_ts) - values (${block.height}, ${block.lastTs}, ${block.latestSequencerEventTimestamp})""", + sql"""insert into seq_block_height (height, latest_event_ts, latest_sequencer_event_ts) + values (${block.height}, ${block.lastTs}, ${block.latestSequencerEventTimestamp}) + on conflict do nothing""".asUpdate, sql"select latest_event_ts, latest_sequencer_event_ts from seq_block_height where height = ${block.height}" .as[(CantonTimestamp, Option[CantonTimestamp])] .head, diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/BlockChunkProcessor.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/BlockChunkProcessor.scala index 91c993b9ceca..9a46b3a2fd6d 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/BlockChunkProcessor.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/BlockChunkProcessor.scala @@ -34,6 +34,7 @@ import com.digitalasset.canton.domain.sequencing.sequencer.traffic.SequencerRate import com.digitalasset.canton.error.BaseAlarm import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencing.GroupAddressResolver import com.digitalasset.canton.sequencing.client.SequencedEventValidator import com.digitalasset.canton.sequencing.protocol.* import com.digitalasset.canton.topology.* @@ -56,6 +57,7 @@ private[update] final class BlockChunkProcessor( override val loggerFactory: NamedLoggerFactory, metrics: SequencerMetrics, memberValidator: SequencerMemberValidator, + createTopologyTickMessageId: () => MessageId = () => MessageId.randomMessageId(), // For testing )(implicit closeContext: CloseContext) extends NamedLogging { @@ -71,7 +73,7 @@ private[update] final class BlockChunkProcessor( memberValidator = memberValidator, ) - def processChunk( + def processDataChunk( state: BlockUpdateGeneratorImpl.State, height: Long, index: Int, @@ -85,7 +87,7 @@ private[update] final class BlockChunkProcessor( // TODO(i18438): verify the signature of the sequencer on the SendEvent val orderingRequests = fixedTsChanges.collect { case (ts, ev @ Traced(sendEvent: Send)) => - // Discard the timestamp of the `Send` event as this one is obsolete + // Discard the timestamp of the `Send` event as we're using the adjusted timestamp (ts, ev.map(_ => sendEvent.signedOrderingRequest)) } @@ -154,6 +156,84 @@ private[update] final class BlockChunkProcessor( } yield (newState, chunkUpdate) } + def emitTick( + state: BlockUpdateGeneratorImpl.State + )(implicit ec: ExecutionContext, tc: TraceContext): FutureUnlessShutdown[(State, ChunkUpdate)] = + // The block orderer marks a block to request a topology tick only when it assesses that it may need to retrieve an + // up-to-date topology; this will result in a single `TopologyTick` ledger block event in the chunk events of the + // last chunk (associated with the last part of the block). + if (state.latestSequencerEventTimestamp.contains(state.lastChunkTs)) { + // If the latest sequencer event timestamp is the same as the last chunk's final timestamp + // then the last chunk's event was sequencer-addressed, and it passed validation, + // so it's safe for the block orderer to query the topology snapshot on its sequencing timestamp, + // and we don't need to add a `Deliver` for the tick. + FutureUnlessShutdown.pure( + ( + state, + ChunkUpdate( + acknowledgements = Map.empty, + invalidAcknowledgements = Seq.empty, + inFlightAggregationUpdates = Map.empty, + lastSequencerEventTimestamp = Some(state.lastChunkTs), + inFlightAggregations = state.inFlightAggregations, + submissionsOutcomes = Seq.empty, + ), + ) + ) + } else { + // TODO(#19118) Adjust BFT time to avoid collisions (or let it assign one and propagate it) + val tickSequencingTimestamp = state.lastChunkTs.immediateSuccessor + for { + snapshot <- SyncCryptoClient.getSnapshotForTimestampUS( + domainSyncCryptoApi, + tickSequencingTimestamp, + state.latestSequencerEventTimestamp, + protocolVersion, + warnIfApproximate = false, + ) + sequencerRecipients <- FutureUnlessShutdown.outcomeF( + GroupAddressResolver.resolveGroupsToMembers( + Set(SequencersOfDomain), + snapshot.ipsSnapshot, + ) + ) + + tickSubmissionOutcome = SubmissionRequestOutcome( + Map.empty, // Sequenced events are legacy and will be removed, so no need to generate them + None, + outcome = SubmissionOutcome.Deliver( + SubmissionRequest.tryCreate( + sender = sequencerId, + messageId = createTopologyTickMessageId(), + batch = Batch.empty(protocolVersion), + maxSequencingTime = tickSequencingTimestamp, + topologyTimestamp = None, + aggregationRule = None, + submissionCost = None, + protocolVersion = protocolVersion, + ), + sequencingTime = tickSequencingTimestamp, + deliverToMembers = sequencerRecipients(SequencersOfDomain), + batch = Batch.empty(protocolVersion), + submissionTraceContext = TraceContext.createNew(), + ), + ) + } yield ( + state.copy( + lastChunkTs = tickSequencingTimestamp, + latestSequencerEventTimestamp = Some(tickSequencingTimestamp), + ), + ChunkUpdate( + acknowledgements = Map.empty, + invalidAcknowledgements = Seq.empty, + inFlightAggregationUpdates = Map.empty, + lastSequencerEventTimestamp = Some(tickSequencingTimestamp), + inFlightAggregations = state.inFlightAggregations, + submissionsOutcomes = Seq(tickSubmissionOutcome), + ), + ) + } + private def fixTimestamps( height: Long, index: Int, @@ -343,6 +423,7 @@ private[update] final class BlockChunkProcessor( ) metrics.block.blockEvents.mark()(mc) metrics.block.blockEventBytes.mark(payloadSize.longValue)(mc) + case LedgerBlockEvent.Acknowledgment(request) => // record the event metrics.block.blockEvents diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/BlockUpdateGenerator.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/BlockUpdateGenerator.scala index 613097df6e79..e4cae9f138a3 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/BlockUpdateGenerator.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/BlockUpdateGenerator.scala @@ -84,6 +84,7 @@ object BlockUpdateGenerator { chunkIndex: Int, events: NonEmpty[Seq[Traced[LedgerBlockEvent]]], ) extends BlockChunk + case object TopologyTickChunk extends BlockChunk final case class EndOfBlock(blockHeight: Long) extends BlockChunk } @@ -136,23 +137,34 @@ class BlockUpdateGeneratorImpl( Some(Traced(value)) } } - BlockEvents(block.blockHeight, ledgerBlockEvents) + + BlockEvents( + block.blockHeight, + ledgerBlockEvents, + block.tickTopology, + ) } override def chunkBlock( - block: BlockEvents + blockEvents: BlockEvents )(implicit traceContext: TraceContext): immutable.Iterable[BlockChunk] = { - val blockHeight = block.height + val blockHeight = blockEvents.height metrics.block.height.updateValue(blockHeight) - // We must start a new chunk whenever the chunk processing advances lastSequencerEventTimestamp - // Otherwise the logic for retrieving a topology snapshot or traffic state could deadlock + val tickChunk = + Option.when(blockEvents.tickTopology) { + Some(TopologyTickChunk) + } + + // We must start a new chunk whenever the chunk processing advances lastSequencerEventTimestamp, + // otherwise the logic for retrieving a topology snapshot or traffic state could deadlock. + IterableUtil - .splitAfter(block.events)(event => isAddressingSequencers(event.value)) + .splitAfter(blockEvents.events)(event => isAddressingSequencers(event.value)) .zipWithIndex .map { case (events, index) => NextChunk(blockHeight, index, events) - } ++ Seq(EndOfBlock(blockHeight)) + } ++ tickChunk.flatten ++ Seq(EndOfBlock(blockHeight)) } private def isAddressingSequencers(event: LedgerBlockEvent): Boolean = @@ -177,7 +189,9 @@ class BlockUpdateGeneratorImpl( ) FutureUnlessShutdown.pure(newState -> update) case NextChunk(height, index, chunksEvents) => - blockChunkProcessor.processChunk(state, height, index, chunksEvents) + blockChunkProcessor.processDataChunk(state, height, index, chunksEvents) + case TopologyTickChunk => + blockChunkProcessor.emitTick(state) } } diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/SubmissionRequestValidator.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/SubmissionRequestValidator.scala index e4e28716078d..8d52936a26b7 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/SubmissionRequestValidator.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/SubmissionRequestValidator.scala @@ -55,7 +55,7 @@ private[update] final class SubmissionRequestValidator( )(implicit closeContext: CloseContext) extends NamedLogging { - val trafficControlValidator = new TrafficControlValidator( + private val trafficControlValidator = new TrafficControlValidator( domainId, protocolVersion, rateLimitManager, diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/TrafficControlValidator.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/TrafficControlValidator.scala index 60cfa524a3f0..685771682c9b 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/TrafficControlValidator.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/block/update/TrafficControlValidator.scala @@ -127,9 +127,17 @@ private[update] class TrafficControlValidator( // we replace it with the failed outcome from traffic validation val updated = result.outcome.outcome match { case _: DeliverableSubmissionOutcome => - result.copy(outcome = trafficConsumptionErrorOutcome) + result.copy( + outcome = trafficConsumptionErrorOutcome, + latestSequencerEventTimestamp = None, + ) // Otherwise we keep the existing outcome - case SubmissionOutcome.Discard => result + case SubmissionOutcome.Discard => result.copy(latestSequencerEventTimestamp = None) + } + if (result.latestSequencerEventTimestamp.isDefined) { + logger.debug( + s"An event addressed to the sequencer (likely a topology event) was rejected due to a traffic control error. For that reason the lastSequencerEventTimestamp was not updated, as the event will not be delivered to the sequencer. ${trafficConsumptionErrorOutcome.outcome}" + ) } recordSequencingWasted( signedOrderingRequest, @@ -195,7 +203,7 @@ private[update] class TrafficControlValidator( case error: SequencerRateLimitError.OutdatedEventCost => logger.info( s"Event cost for event at $sequencingTimestamp from sender ${request.content.sender} sent" + - s" to sequencer ${orderingRequest.signature.signedBy} was outdated: $error." + s" to sequencer ${orderingRequest.content.sequencerId} was outdated: $error." ) invalidSubmissionRequest( request.content, diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/Mediator.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/Mediator.scala index 9a318f7f2aa5..6fbd462f6aaf 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/Mediator.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/Mediator.scala @@ -30,12 +30,7 @@ import com.digitalasset.canton.protocol.{DynamicDomainParametersWithValidity, Re import com.digitalasset.canton.sequencing.* import com.digitalasset.canton.sequencing.client.RichSequencerClient import com.digitalasset.canton.sequencing.handlers.DiscardIgnoredEvents -import com.digitalasset.canton.sequencing.protocol.{ - ClosedEnvelope, - Envelope, - OpenEnvelope, - SequencedEvent, -} +import com.digitalasset.canton.sequencing.protocol.{ClosedEnvelope, OpenEnvelope, SequencedEvent} import com.digitalasset.canton.store.CursorPrehead.SequencerCounterCursorPrehead import com.digitalasset.canton.store.SequencedEventStore.OrdinarySequencedEvent import com.digitalasset.canton.store.{SequencedEventStore, SequencerCounterTrackerStore} @@ -243,12 +238,9 @@ private[mediator] class Mediator( } yield () } - private def handler: ApplicationHandler[Lambda[ - `+X <: Envelope[_]` => Traced[Seq[OrdinarySequencedEvent[X]]] - ], ClosedEnvelope] = - new ApplicationHandler[Lambda[ - `+X <: Envelope[_]` => Traced[Seq[OrdinarySequencedEvent[X]]] - ], ClosedEnvelope] { + private def handler: ApplicationHandler[OrdinaryEnvelopeBox, ClosedEnvelope] = + new ApplicationHandler[OrdinaryEnvelopeBox, ClosedEnvelope] { + override def name: String = s"mediator-$mediatorId" override def subscriptionStartsAt( diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/store/DbMediatorDomainConfigurationStore.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/store/DbMediatorDomainConfigurationStore.scala index 45710f8a3d41..0a87133b6f0e 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/store/DbMediatorDomainConfigurationStore.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/store/DbMediatorDomainConfigurationStore.scala @@ -79,26 +79,6 @@ class DbMediatorDomainConfigurationStore( domain_id = excluded.domain_id, static_domain_parameters = excluded.static_domain_parameters, sequencer_connection = excluded.sequencer_connection""" - case _: DbStorage.Profile.Oracle => - sqlu"""merge into mediator_domain_configuration mdc - using ( - select - $domainId domain_id, - $domainParameters static_domain_parameters, - $sequencerConnection sequencer_connection - from dual - ) excluded - on (mdc."LOCK" = 'X') - when matched then - update set - mdc.domain_id = excluded.domain_id, - mdc.static_domain_parameters = excluded.static_domain_parameters, - mdc.sequencer_connection = excluded.sequencer_connection - when not matched then - insert (domain_id, static_domain_parameters, sequencer_connection) - values (excluded.domain_id, excluded.static_domain_parameters, excluded.sequencer_connection) - """ - }, "save-configuration", ) diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/store/FinalizedResponseStore.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/store/FinalizedResponseStore.scala index 9537cf0d5251..7f9b736cd084 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/store/FinalizedResponseStore.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/store/FinalizedResponseStore.scala @@ -208,22 +208,12 @@ private[mediator] class DbFinalizedResponseStore( traceContext: TraceContext, callerCloseContext: CloseContext, ): FutureUnlessShutdown[Unit] = { - val insert = storage.profile match { - case _: DbStorage.Profile.Oracle => - sqlu"""insert - /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( med_response_aggregations ( request_id ) ) */ - into med_response_aggregations(request_id, mediator_confirmation_request, version, verdict, request_trace_context) - values ( - ${finalizedResponse.requestId},${finalizedResponse.request},${finalizedResponse.version},${finalizedResponse.verdict}, - ${SerializableTraceContext(finalizedResponse.requestTraceContext)} - )""" - case _ => - sqlu"""insert into med_response_aggregations(request_id, mediator_confirmation_request, version, verdict, request_trace_context) - values ( - ${finalizedResponse.requestId},${finalizedResponse.request},${finalizedResponse.version},${finalizedResponse.verdict}, - ${SerializableTraceContext(finalizedResponse.requestTraceContext)} - ) on conflict do nothing""" - } + val insert = + sqlu"""insert into med_response_aggregations(request_id, mediator_confirmation_request, version, verdict, request_trace_context) + values ( + ${finalizedResponse.requestId},${finalizedResponse.request},${finalizedResponse.version},${finalizedResponse.verdict}, + ${SerializableTraceContext(finalizedResponse.requestTraceContext)} + ) on conflict do nothing""" CloseContext.withCombinedContext(callerCloseContext, closeContext, timeouts, logger) { closeContext => diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/BftOrderingMetrics.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/BftOrderingMetrics.scala index 02b0c318b668..168e98fb2774 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/BftOrderingMetrics.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/metrics/BftOrderingMetrics.scala @@ -193,7 +193,6 @@ class BftOrderingMetrics( object labels { val Tag: String = "tag" val Sender: String = "sender" - val ForSequencer: String = "for-sequencer" object outcome { val Key: String = "outcome" diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/integrations/state/DbSequencerStateManagerStore.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/integrations/state/DbSequencerStateManagerStore.scala index f2a064606dec..6dd83b18c460 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/integrations/state/DbSequencerStateManagerStore.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/integrations/state/DbSequencerStateManagerStore.scala @@ -111,22 +111,10 @@ class DbSequencerStateManagerStore( // First add all aggregation ids with their expiry timestamp and rules, // then add the information about the aggregated senders. - val addAggregationIdsQ = storage.profile match { - case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Postgres => - """insert into seq_in_flight_aggregation(aggregation_id, max_sequencing_time, aggregation_rule) - values (?, ?, ?) - on conflict do nothing - """ - case _: DbStorage.Profile.Oracle => - """merge /*+ INDEX ( seq_in_flight_aggregation ( aggregation_id ) ) */ - into seq_in_flight_aggregation ifa - using (select ? aggregation_id, ? max_sequencing_time, ? aggregation_rule from dual) input - on (ifa.aggregation_id = input.aggregation_id) - when not matched then - insert (aggregation_id, max_sequencing_time, aggregation_rule) - values (input.aggregation_id, input.max_sequencing_time, input.aggregation_rule) - """ - } + val addAggregationIdsQ = + """insert into seq_in_flight_aggregation(aggregation_id, max_sequencing_time, aggregation_rule) + values (?, ?, ?) + on conflict do nothing""" implicit val setParameterAggregationRule: SetParameter[AggregationRule] = AggregationRule.getVersionedSetParameter val freshAggregations = updates @@ -143,21 +131,10 @@ class DbSequencerStateManagerStore( pp.>>(rule) } - val addSendersQ = storage.profile match { - case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Postgres => - """insert into seq_in_flight_aggregated_sender(aggregation_id, sender, sequencing_timestamp, signatures) - values (?, ?, ?, ?) - on conflict do nothing""" - case _: DbStorage.Profile.Oracle => - """merge /*+ INDEX ( seq_in_flight_aggregated_sender ( aggregation_id, sender ) ) */ - into seq_in_flight_aggregated_sender ifas - using (select ? aggregation_id, ? sender, ? sequencing_timestamp, ? signatures from dual) input - on (ifas.aggregation_id = input.aggregation_id and ifas.sender = input.sender) - when not matched then - insert (aggregation_id, sender, sequencing_timestamp, signatures) - values (input.aggregation_id, input.sender, input.sequencing_timestamp, input.signatures) - """ - } + val addSendersQ = + """insert into seq_in_flight_aggregated_sender(aggregation_id, sender, sequencing_timestamp, signatures) + values (?, ?, ?, ?) + on conflict do nothing""" implicit val setParameterAggregatedSignaturesOfSender : SetParameter[AggregatedSignaturesOfSender] = AggregatedSignaturesOfSender.getVersionedSetParameter diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerFactory.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerFactory.scala index cbe6e5a78839..ad47435c75b7 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerFactory.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerFactory.scala @@ -65,7 +65,6 @@ abstract class DatabaseSequencerFactory( SequencerStore( storage, protocolVersion, - config.writer.maxSqlInListSize, timeouts, loggerFactory, sequencerId, diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterConfig.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterConfig.scala index 21529c49e9cc..b6aa82d52108 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterConfig.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerWriterConfig.scala @@ -7,7 +7,6 @@ import cats.syntax.option.* import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config import com.digitalasset.canton.config.NonNegativeFiniteDuration -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric sealed trait CommitMode { private[sequencer] val postgresSettings: NonEmpty[Seq[String]] @@ -41,7 +40,6 @@ object CommitMode { * @param eventWriteBatchMaxSize max event batch size to flush to the database. * @param eventWriteBatchMaxDuration max duration to collect events for a batch before triggering a write. * @param commitMode optional commit mode that if set will be validated to ensure that the connection/db settings have been configured. Defaults to [[CommitMode.Synchronous]]. - * @param maxSqlInListSize will limit the number of items in a SQL in clause. useful for databases where this may have a low limit (e.g. Oracle). * @param checkpointInterval an interval at which to generate sequencer counter checkpoints for all members. */ sealed trait SequencerWriterConfig { @@ -55,7 +53,6 @@ sealed trait SequencerWriterConfig { eventWriteBatchMaxSize: Int, eventWriteBatchMaxDuration: NonNegativeFiniteDuration, commitModeValidation: Option[CommitMode], - maxSqlInListSize: PositiveNumeric[Int], checkpointInterval: NonNegativeFiniteDuration, ): SequencerWriterConfig } => @@ -68,7 +65,6 @@ sealed trait SequencerWriterConfig { val eventWriteBatchMaxSize: Int val eventWriteBatchMaxDuration: NonNegativeFiniteDuration val commitModeValidation: Option[CommitMode] - val maxSqlInListSize: PositiveNumeric[Int] /** how frequently to generate counter checkpoints for all members */ val checkpointInterval: NonNegativeFiniteDuration @@ -82,7 +78,6 @@ sealed trait SequencerWriterConfig { eventWriteBatchMaxSize: Int = this.eventWriteBatchMaxSize, eventWriteBatchMaxDuration: NonNegativeFiniteDuration = this.eventWriteBatchMaxDuration, commitModeValidation: Option[CommitMode] = this.commitModeValidation, - maxSqlInListSize: PositiveNumeric[Int] = this.maxSqlInListSize, checkpointInterval: NonNegativeFiniteDuration = this.checkpointInterval, ): SequencerWriterConfig = copy( @@ -94,7 +89,6 @@ sealed trait SequencerWriterConfig { eventWriteBatchMaxSize, eventWriteBatchMaxDuration, commitModeValidation, - maxSqlInListSize, checkpointInterval, ) } @@ -106,9 +100,6 @@ sealed trait SequencerWriterConfig { object SequencerWriterConfig { val DefaultPayloadTimestampMargin: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(60L) - // the Oracle limit is likely 1000 however this is currently only used for payload lookups on conflicts (savePayloads) - // so just set a bit above the default max payload batch size (50) - val DefaultMaxSqlInListSize: PositiveNumeric[Int] = PositiveNumeric.tryCreate(250) val DefaultCheckpointInterval: config.NonNegativeFiniteDuration = config.NonNegativeFiniteDuration.ofSeconds(30) @@ -127,7 +118,6 @@ object SequencerWriterConfig { override val eventWriteBatchMaxDuration: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofMillis(20), override val commitModeValidation: Option[CommitMode] = CommitMode.Default.some, - override val maxSqlInListSize: PositiveNumeric[Int] = DefaultMaxSqlInListSize, override val checkpointInterval: NonNegativeFiniteDuration = DefaultCheckpointInterval, ) extends SequencerWriterConfig @@ -146,7 +136,6 @@ object SequencerWriterConfig { override val eventWriteBatchMaxDuration: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofMillis(5), override val commitModeValidation: Option[CommitMode] = CommitMode.Default.some, - override val maxSqlInListSize: PositiveNumeric[Int] = DefaultMaxSqlInListSize, override val checkpointInterval: NonNegativeFiniteDuration = DefaultCheckpointInterval, ) extends SequencerWriterConfig } diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/WriterStartupError.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/WriterStartupError.scala index 4973ca0f2296..d188917f134b 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/WriterStartupError.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/WriterStartupError.scala @@ -27,7 +27,7 @@ object WriterStartupError { false // likely set at the database or user level, so retrying won't make a difference } - /** We only support running some features with certain types of databases (locking for HA requires postgres or oracle), + /** We only support running some features with certain types of databases (locking for HA requires postgres), * an enterprise config validation should prevent starting a node with a bad configuration however if we reach creating * a Writer with this config this error will be returned. */ diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala index a950c5543f30..d1a39bdde35d 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala @@ -286,7 +286,7 @@ class BlockSequencer( SendAsyncError.TrafficControlError( TrafficControlErrorReason.Error( TrafficControlErrorReason.Error.Reason.OutdatedTrafficCost( - s"Submission was refused because traffic cost was outdated. Re-submit after the having observed the validation timestamp and processed its topology state: $outdated" + s"Submission was refused because traffic cost was outdated. Re-submit after having observed the validation timestamp and processed its topology state: $outdated" ) ) ): SendAsyncError diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerDomainConfigurationStore.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerDomainConfigurationStore.scala index 46c80322db2c..511e64b0d589 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerDomainConfigurationStore.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerDomainConfigurationStore.scala @@ -73,23 +73,6 @@ class DbSequencerDomainConfigurationStore( values ($domainId, $domainParameters) on conflict (lock) do update set domain_id = excluded.domain_id, static_domain_parameters = excluded.static_domain_parameters""" - case _: DbStorage.Profile.Oracle => - sqlu"""merge into sequencer_domain_configuration mdc - using ( - select - $domainId domain_id, - $domainParameters static_domain_parameters - from dual - ) excluded - on (mdc."LOCK" = 'X') - when matched then - update set mdc.domain_id = excluded.domain_id, - mdc.static_domain_parameters = excluded.static_domain_parameters - when not matched then - insert (domain_id, static_domain_parameters) - values (excluded.domain_id, excluded.static_domain_parameters) - """ - }, "save-configuration", ) diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStore.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStore.scala index 12a616c610ef..c18e9337cd8d 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStore.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStore.scala @@ -15,7 +15,7 @@ import com.daml.nonempty.catsinstances.* import com.daml.nonempty.{NonEmpty, NonEmptyUtil} import com.digitalasset.canton.SequencerCounter import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveNumeric} +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.domain.block.UninitializedBlockHeight import com.digitalasset.canton.domain.sequencing.sequencer.{ @@ -29,7 +29,7 @@ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.resource.DbStorage.DbAction.ReadOnly import com.digitalasset.canton.resource.DbStorage.Implicits.BuilderChain.* -import com.digitalasset.canton.resource.DbStorage.Profile.{H2, Oracle, Postgres} +import com.digitalasset.canton.resource.DbStorage.Profile.{H2, Postgres} import com.digitalasset.canton.resource.DbStorage.* import com.digitalasset.canton.sequencing.protocol.MessageId import com.digitalasset.canton.store.db.DbDeserializationException @@ -40,13 +40,11 @@ import com.digitalasset.canton.util.{EitherTUtil, retry} import com.digitalasset.canton.version.ProtocolVersion import com.google.common.annotations.VisibleForTesting import com.google.protobuf.ByteString -import com.zaxxer.hikari.pool.HikariProxyConnection -import oracle.jdbc.{OracleArray, OracleConnection} import org.h2.api.ErrorCode as H2ErrorCode import org.postgresql.util.PSQLState import slick.jdbc.* -import java.sql.{Connection, JDBCType, SQLException, SQLNonTransientException} +import java.sql.{JDBCType, SQLException, SQLNonTransientException} import java.util.UUID import scala.annotation.tailrec import scala.collection.immutable.SortedSet @@ -59,7 +57,6 @@ import scala.util.{Failure, Success} class DbSequencerStore( storage: DbStorage, protocolVersion: ProtocolVersion, - maxInClauseSize: PositiveNumeric[Int], override protected val timeouts: ProcessingTimeout, override protected val loggerFactory: NamedLoggerFactory, sequencerMember: Member, @@ -82,46 +79,11 @@ class DbSequencerStore( private implicit val setRecipientsArrayOParameter : SetParameter[Option[NonEmpty[SortedSet[SequencerMemberId]]]] = (v, pp) => { - storage.profile match { - case _: Oracle => - val OracleIntegerArray = "INTEGER_ARRAY" - - val maybeArray: Option[Array[Int]] = v.map(_.toArray.map(_.unwrap)) - - // make sure we do the right thing whether we are using a connection pooled connection or not - val jdbcArray = maybeArray.map { - pp.ps.getConnection match { - case hikari: HikariProxyConnection => - hikari.unwrap(classOf[OracleConnection]).createARRAY(OracleIntegerArray, _) - case oracle: OracleConnection => - oracle.createARRAY(OracleIntegerArray, _) - case c: Connection => - sys.error( - s"Unsupported connection type for creating Oracle integer array: ${c.getClass.getSimpleName}" - ) - } - } + val jdbcArray = v + .map(_.toArray.map(id => Int.box(id.unwrap): AnyRef)) + .map(pp.ps.getConnection.createArrayOf("integer", _)) - // we need to bypass the slick wrapper because we need to call the setNull method below tailored for - // user defined types since we are using a custom oracle array - def setOracleArrayOption(value: Option[AnyRef], sqlType: Int): Unit = { - val npos = pp.pos + 1 - value match { - case Some(v) => pp.ps.setObject(npos, v, sqlType) - case None => pp.ps.setNull(npos, sqlType, OracleIntegerArray) - } - pp.pos = npos - } - setOracleArrayOption(jdbcArray, JDBCType.ARRAY.getVendorTypeNumber) - - case _ => - val jdbcArray = v - .map(_.toArray.map(id => Int.box(id.unwrap): AnyRef)) - .map(pp.ps.getConnection.createArrayOf("integer", _)) - - pp.setObjectOption(jdbcArray, JDBCType.ARRAY.getVendorTypeNumber) - - } + pp.setObjectOption(jdbcArray, JDBCType.ARRAY.getVendorTypeNumber) } @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf", "org.wartremover.warts.Null")) @@ -139,11 +101,6 @@ class DbSequencerStore( } storage.profile match { - case _: Oracle => - GetResult(r => Option(r.rs.getArray(r.skip.currentPos))) - .andThen(_.map(_.asInstanceOf[OracleArray].getIntArray)) - .andThen(_.map(_.map(SequencerMemberId(_)))) - .andThen(_.map(arr => NonEmptyUtil.fromUnsafe(SortedSet(arr.toSeq*)))) case _: H2 => GetResult(r => Option(r.rs.getArray(r.skip.currentPos))) .andThen(_.map(_.getArray.asInstanceOf[Array[AnyRef]].map(toInt))) @@ -175,7 +132,6 @@ class DbSequencerStore( all.find(_.value == value).toRight(s"Event type discriminator for value [$value] not found") } - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) private implicit val setEventTypeDiscriminatorParameter: SetParameter[EventTypeDiscriminator] = (etd, pp) => pp >> etd.value.toString @@ -361,7 +317,6 @@ class DbSequencerStore( ("", " = any(events.recipients)") case _: H2 => ("array_contains(events.recipients, ", ")") - case _: Oracle => sys.error("Oracle no longer supported") } override def registerMember(member: Member, timestamp: CantonTimestamp)(implicit @@ -381,11 +336,6 @@ class DbSequencerStore( values ($member, $timestamp) on conflict (member) do nothing """ - case _: Oracle => - sqlu"""insert /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( sequencer_members ( member ) ) */ - into sequencer_members (member, registered_ts) - values ($member, $timestamp) - """ } id <- sql"select id from sequencer_members where member = $member" .as[SequencerMemberId] @@ -473,10 +423,6 @@ class DbSequencerStore( def insert(payloadsToInsert: NonEmpty[Seq[Payload]]): Future[Boolean] = { def isConstraintViolation(batchUpdateException: SQLException): Boolean = profile match { case Postgres(_) => batchUpdateException.getSQLState == PSQLState.UNIQUE_VIOLATION.getState - case Oracle(_) => - // error code for a unique constraint violation - // see: https://docs.oracle.com/en/database/oracle/oracle-database/19/errmg/ORA-00000.html#GUID-27437B7F-F0C3-4F1F-9C6E-6780706FB0F6 - batchUpdateException.getMessage.contains("ORA-00001") case H2(_) => batchUpdateException.getSQLState == H2ErrorCode.DUPLICATE_KEY_1.toString } @@ -527,19 +473,14 @@ class DbSequencerStore( // has inserted a conflicting value. def listMissing(): EitherT[Future, SavePayloadsError, Seq[Payload]] = { val payloadIds = payloads.map(_.id) - // the max default config for number of payloads is around 50 and the max number of clauses that oracle supports is around 1000 - // so we're really unlikely to need to this IN clause splitting, but lets support it just in case as Matthias has - // already done the heavy lifting :) - val queries = DbStorage - .toInClauses_("id", payloadIds, maxInClauseSize) - .map { in => - (sql"select id, instance_discriminator from sequencer_payloads where " ++ in) - .as[(PayloadId, UUID)] - } + val query = + (sql"select id, instance_discriminator from sequencer_payloads where " ++ DbStorage + .toInClause("id", payloadIds)) + .as[(PayloadId, UUID)] for { inserted <- EitherT.right { - storage.sequentialQueryAndCombine(queries, functionFullName) + storage.query(query, functionFullName) } map (_.toMap) // take all payloads we were expecting and then look up from inserted whether they are present and if they have // a matching instance discriminator (meaning we put them there) @@ -596,24 +537,13 @@ class DbSequencerStore( override def saveEvents(instanceIndex: Int, events: NonEmpty[Seq[Sequenced[PayloadId]]])(implicit traceContext: TraceContext ): Future[Unit] = { - val saveSql = storage.profile match { - case _: H2 | _: Postgres => - """insert into sequencer_events ( - | ts, node_index, event_type, message_id, sender, recipients, - | payload_id, topology_timestamp, trace_context, error - |) - | values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - | on conflict do nothing""".stripMargin - case _: Oracle => - """merge /*+ INDEX ( sequencer_events ( ts ) ) */ - |into sequencer_events - |using (select ? ts from dual) input - |on (sequencer_events.ts = input.ts) - |when not matched then - | insert (ts, node_index, event_type, message_id, sender, recipients, payload_id, - | topology_timestamp, trace_context, error) - | values (input.ts, ?, ?, ?, ?, ?, ?, ?, ?, ?)""".stripMargin - } + val saveSql = + """insert into sequencer_events ( + | ts, node_index, event_type, message_id, sender, recipients, + | payload_id, topology_timestamp, trace_context, error + |) + | values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + | on conflict do nothing""".stripMargin storage.queryAndUpdate( DbStorage.bulkOperation_(saveSql, events, storage.profile) { pp => event => @@ -667,14 +597,6 @@ class DbSequencerStore( when not matched then insert (node_index, watermark_ts, sequencer_online) values ($instanceIndex, $ts, ${false}) """ - case _: Oracle => - sqlu"""merge into sequencer_watermarks using dual - on (node_index = $instanceIndex) - when matched and watermark_ts >= $ts then - update set watermark_ts = $ts, sequencer_online = ${false} - when not matched then - insert (node_index, watermark_ts, sequencer_online) values ($instanceIndex, $ts, ${false}) - """ } for { @@ -717,14 +639,6 @@ class DbSequencerStore( when not matched then insert (node_index, watermark_ts, sequencer_online) values ($instanceIndex, $ts, ${true}) """ - case _: Oracle => - sqlu"""merge into sequencer_watermarks using dual - on (node_index = $instanceIndex) - when matched then - update set watermark_ts = $ts where sequencer_online = ${true} - when not matched then - insert (node_index, watermark_ts, sequencer_online) values ($instanceIndex, $ts, ${true}) - """ } for { @@ -764,18 +678,7 @@ class DbSequencerStore( from sequencer_watermarks where node_index = $instanceIndex""" def watermark(row: (CantonTimestamp, Boolean)) = Watermark(row._1, row._2) - - profile match { - case _: H2 | _: Postgres => - query.as[(CantonTimestamp, Boolean)].headOption.map(_.map(watermark)) - case _: Oracle => - query - .as[(CantonTimestamp, Int)] - .headOption - .map(_.map { case (ts, onlineN) => - watermark((ts, onlineN != 0)) - }) - } + query.as[(CantonTimestamp, Boolean)].headOption.map(_.map(watermark)) }, functionFullName, maxRetries, @@ -784,12 +687,7 @@ class DbSequencerStore( override def goOffline(instanceIndex: Int)(implicit traceContext: TraceContext): Future[Unit] = storage.update_( - profile match { - case _: H2 | _: Postgres => - sqlu"update sequencer_watermarks set sequencer_online = false where node_index = $instanceIndex" - case _: Oracle => - sqlu"update sequencer_watermarks set sequencer_online = 0 where node_index = $instanceIndex" - }, + sqlu"update sequencer_watermarks set sequencer_online = false where node_index = $instanceIndex", functionFullName, ) @@ -810,7 +708,7 @@ class DbSequencerStore( on conflict (node_index) do update set watermark_ts = $watermark, sequencer_online = true """ - case _: H2 | _: Oracle => + case _: H2 => sqlu"""merge into sequencer_watermarks using dual on (node_index = $instanceIndex) when matched then @@ -885,29 +783,6 @@ class DbSequencerStore( case _: H2 => h2PostgresQueryEvents("array_contains(events.recipients, ", ")", safeWatermark) - - case _: Oracle => - sql""" - select events.ts, events.node_index, events.event_type, events.message_id, events.sender, - events.recipients, payloads.id, payloads.content, events.topology_timestamp, - events.trace_context, events.error - from sequencer_events events - left join sequencer_payloads payloads - on events.payload_id = payloads.id - inner join sequencer_watermarks watermarks - on events.node_index = watermarks.node_index - where - ((events.recipients is null) or $memberId IN (SELECT * FROM TABLE(events.recipients))) - and ( - -- inclusive timestamp bound that defaults to MinValue if unset - events.ts >= $inclusiveFromTimestamp - -- only consider events within the safe watermark - and events.ts <= $safeWatermark - -- if the sequencer that produced the event is offline, only consider up until its offline watermark - and (watermarks.sequencer_online <> 0 or events.ts <= watermarks.watermark_ts) - ) - order by events.ts asc - fetch next $limit rows only""".stripMargin } query.as[Sequenced[Payload]] @@ -930,8 +805,6 @@ class DbSequencerStore( val query = profile match { case _: H2 | _: Postgres => sql"select min(watermark_ts) from sequencer_watermarks where sequencer_online = true" - case _: Oracle => - sql"select min(watermark_ts) from sequencer_watermarks where sequencer_online <> 0" } // `min` may return null that is wrapped into None query.as[Option[CantonTimestamp]].headOption.map(_.flatten) @@ -1261,15 +1134,6 @@ class DbSequencerStore( when matched and ts <= $ts then update set ts = $ts, latest_sequencer_event_ts = $latestSequencerEventTimestamp """ - case _: Oracle => - sqlu"""merge into sequencer_counter_checkpoints using dual - on (member = $memberId and counter = $counter) - when matched and ts <= $ts then - update set ts = $ts, latest_sequencer_event_ts = $latestSequencerEventTimestamp - when not matched then - insert (member, counter, ts, latest_sequencer_event_ts) - values ($memberId, $counter, $ts, $latestSequencerEventTimestamp) - """ } } @@ -1357,14 +1221,6 @@ class DbSequencerStore( when not matched then insert values ($member, $timestamp) """ - case _: Oracle => - sqlu"""merge into sequencer_acknowledgements using dual - on (member = $member) - when matched then - update set ts = $timestamp where $timestamp > ts - when not matched then - insert (member, ts) values ($member, $timestamp) - """ }, functionFullName, ) @@ -1542,9 +1398,6 @@ class DbSequencerStore( case H2(_) => // we don't worry about replicas or commit modes in h2 EitherTUtil.unit - case Oracle(_) => - // TODO(#6942): unknown how to query the current commit mode for oracle - EitherTUtil.unit case Postgres(_) => for { settingO <- EitherT.right( diff --git a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStore.scala b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStore.scala index c157103260be..792abcd7e4ad 100644 --- a/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStore.scala +++ b/sdk/canton/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/SequencerStore.scala @@ -11,7 +11,7 @@ import cats.syntax.parallel.* import cats.{Functor, Show} import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveNumeric} +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.domain.sequencing.sequencer.PruningError.UnsafePruningPoint import com.digitalasset.canton.domain.sequencing.sequencer.* @@ -814,7 +814,6 @@ object SequencerStore { def apply( storage: Storage, protocolVersion: ProtocolVersion, - maxInClauseSize: PositiveNumeric[Int], timeouts: ProcessingTimeout, loggerFactory: NamedLoggerFactory, sequencerMember: Member, @@ -833,7 +832,6 @@ object SequencerStore { new DbSequencerStore( dbStorage, protocolVersion, - maxInClauseSize, timeouts, loggerFactory, sequencerMember, diff --git a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/HasTopologyTransactionTestFactory.scala b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/HasTopologyTransactionTestFactory.scala new file mode 100644 index 000000000000..51fe6c2f3f41 --- /dev/null +++ b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/HasTopologyTransactionTestFactory.scala @@ -0,0 +1,131 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.domain + +import com.digitalasset.canton.crypto.{Fingerprint, HashPurpose, Signature} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.sequencing.sequencer.OrderingRequest +import com.digitalasset.canton.domain.sequencing.sequencer.Sequencer.SignedOrderingRequest +import com.digitalasset.canton.sequencing.protocol.* +import com.digitalasset.canton.topology.processing.TopologyTransactionTestFactory +import com.digitalasset.canton.topology.{DefaultTestIdentities, Member} +import com.digitalasset.canton.{BaseTest, HasExecutionContext, HasExecutorService} +import com.google.protobuf.ByteString + +import scala.concurrent.Future + +trait HasTopologyTransactionTestFactory { + self: BaseTest & HasExecutionContext & HasExecutorService => + + protected final lazy val topologyTransactionFactory = + new TopologyTransactionTestFactory(loggerFactory, executorService) + // using lazy val's to avoid eager initialization of topologyTransactionFactory, in case the protocol version is < dev + protected final lazy val SigningKeys = topologyTransactionFactory.SigningKeys + protected final val participant1Key = topologyTransactionFactory.SigningKeys.key5 + + protected final val ts0 = CantonTimestamp.Epoch + protected final val ts1 = ts0.plusSeconds(10) + + protected final def sequencerSignedAndSenderSignedSubmissionRequest( + sender: Member + ): Future[SignedOrderingRequest] = + sequencerSignedAndSenderSignedSubmissionRequest(sender, Recipients.cc(sender)) + + protected final def sequencerSignedAndSenderSignedSubmissionRequest( + sender: Member, + recipients: Recipients, + messageId: MessageId = MessageId.randomMessageId(), + topologyTimestamp: Option[CantonTimestamp] = None, + badEnvelopeSignature: Boolean = false, + signingKey: Fingerprint = participant1Key.fingerprint, + maxSequencingTime: CantonTimestamp = CantonTimestamp.MaxValue, + aggregationRule: Option[AggregationRule] = None, + ): Future[SignedOrderingRequest] = + for { + request <- submissionRequest( + sender, + recipients, + messageId, + topologyTimestamp, + badEnvelopeSignature, + signingKey, + maxSequencingTime, + aggregationRule, + ) + hash = + topologyTransactionFactory.cryptoApi.crypto.pureCrypto.digest( + HashPurpose.SubmissionRequestSignature, + request.getCryptographicEvidence, + ) + signed <- topologyTransactionFactory.cryptoApi.crypto.privateCrypto + .sign(hash, signingKey) + .map(signature => + SignedContent( + request, + signature, + Some(ts1), + testedProtocolVersion, + ) + ) + .leftMap(_.toString) + .value + .failOnShutdown + .map(_.value) + } yield SignedContent( + OrderingRequest.create(DefaultTestIdentities.sequencerId, signed, testedProtocolVersion), + Signature.noSignature, + Some(ts0.immediateSuccessor), + testedProtocolVersion, + ) + + protected final def submissionRequest( + sender: Member, + recipients: Recipients, + messageId: MessageId = MessageId.randomMessageId(), + topologyTimestamp: Option[CantonTimestamp] = None, + badEnvelopeSignature: Boolean = false, + envelopeSigningKey: Fingerprint = topologyTransactionFactory.SigningKeys.key5.fingerprint, + maxSequencingTime: CantonTimestamp = CantonTimestamp.MaxValue, + aggregationRule: Option[AggregationRule] = None, + ): Future[SubmissionRequest] = + for { + envelope <- signEnvelope( + ClosedEnvelope.create(ByteString.EMPTY, recipients, Seq.empty, testedProtocolVersion), + badEnvelopeSignature, + envelopeSigningKey, + ) + } yield SubmissionRequest.tryCreate( + sender, + messageId, + Batch[ClosedEnvelope]( + List( + envelope + ), + testedProtocolVersion, + ), + maxSequencingTime, + topologyTimestamp, + aggregationRule, + Option.empty[SequencingSubmissionCost], + testedProtocolVersion, + ) + + protected final def signEnvelope( + envelope: ClosedEnvelope, + badEnvelopeSignature: Boolean = false, + envelopeSigningKey: Fingerprint = topologyTransactionFactory.SigningKeys.key5.fingerprint, + ): Future[ClosedEnvelope] = { + val bytes = if (badEnvelopeSignature) { + ByteString.copyFromUtf8("wrong content") + } else { + envelope.bytes + } + val hash = topologyTransactionFactory.cryptoApi.crypto.pureCrypto + .digest(HashPurpose.SignedProtocolMessageSignature, bytes) + topologyTransactionFactory.cryptoApi.crypto.privateCrypto + .sign(hash, envelopeSigningKey) + .valueOrFailShutdown(s"Failed to sign $envelope") + .map(sig => envelope.copy(signatures = Seq(sig))) + } +} diff --git a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/block/update/BlockChunkProcessorTest.scala b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/block/update/BlockChunkProcessorTest.scala new file mode 100644 index 000000000000..6c5379d23ac1 --- /dev/null +++ b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/block/update/BlockChunkProcessorTest.scala @@ -0,0 +1,149 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.domain.block.update + +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.metrics.SequencerTestMetrics +import com.digitalasset.canton.domain.sequencing.sequencer.block.BlockSequencerFactory.OrderingTimeFixMode +import com.digitalasset.canton.domain.sequencing.sequencer.store.SequencerMemberValidator +import com.digitalasset.canton.domain.sequencing.sequencer.traffic.SequencerRateLimitManager +import com.digitalasset.canton.domain.sequencing.sequencer.{ + SubmissionOutcome, + SubmissionRequestOutcome, +} +import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable} +import com.digitalasset.canton.sequencing.protocol.{MessageId, SubmissionRequest} +import com.digitalasset.canton.topology.DefaultTestIdentities.{domainId, sequencerId} +import com.digitalasset.canton.topology.TestingIdentityFactory +import org.scalatest.wordspec.AsyncWordSpec + +import java.time.Instant + +class BlockChunkProcessorTest extends AsyncWordSpec with BaseTest { + + implicit val closeContext: CloseContext = CloseContext( + FlagCloseable.withCloseContext(logger, ProcessingTimeout()) + ) + + private val aTimestamp = + CantonTimestamp.assertFromInstant(Instant.parse("2024-03-08T12:00:00.000Z")) + private val aMessageId = MessageId.randomMessageId() + + "BlockChunkProcessor.processBlockChunk" should { + + "create the correct chunked update for the tick" when { + "processing a tick chunk and the last validated event was addressed to the sequencer" in { + val tickSequencingTimestamp = aTimestamp.immediateSuccessor + val syncCryptoApiFake = + TestingIdentityFactory(loggerFactory).forOwnerAndDomain( + sequencerId, + domainId, + tickSequencingTimestamp, + ) + val rateLimitManagerMock = mock[SequencerRateLimitManager] + val memberValidatorMock = mock[SequencerMemberValidator] + + val blockChunkProcessor = + new BlockChunkProcessor( + domainId, + testedProtocolVersion, + syncCryptoApiFake, + sequencerId, + rateLimitManagerMock, + OrderingTimeFixMode.ValidateOnly, + loggerFactory, + SequencerTestMetrics, + memberValidatorMock, + () => aMessageId, + ) + + blockChunkProcessor + .emitTick( + state = BlockUpdateGeneratorImpl.State( + lastBlockTs = aTimestamp, + lastChunkTs = aTimestamp, + latestSequencerEventTimestamp = None, + inFlightAggregations = Map.empty, + ) + ) + .map { case (state, update) => + state.lastChunkTs shouldBe tickSequencingTimestamp + state.latestSequencerEventTimestamp shouldBe Some(tickSequencingTimestamp) + update.submissionsOutcomes should matchPattern { + case Seq( + SubmissionRequestOutcome( + _, + None, + SubmissionOutcome.Deliver( + SubmissionRequest( + `sequencerId`, + `aMessageId`, + _, + `tickSequencingTimestamp`, + None, + None, + None, + ), + `tickSequencingTimestamp`, + deliverToMembers, + batch, + _, + ), + ) + ) + if deliverToMembers == Set(sequencerId) && + batch.envelopes.isEmpty => + } + } + .failOnShutdown + } + } + } + + "create an empty update" when { + "processing a tick chunk but the last validated event was addressed to the sequencer" in { + val tickSequencingTimestamp = aTimestamp.immediateSuccessor + val syncCryptoApiFake = + TestingIdentityFactory(loggerFactory).forOwnerAndDomain( + sequencerId, + domainId, + tickSequencingTimestamp, + ) + val rateLimitManagerMock = mock[SequencerRateLimitManager] + val memberValidatorMock = mock[SequencerMemberValidator] + + val blockChunkProcessor = + new BlockChunkProcessor( + domainId, + testedProtocolVersion, + syncCryptoApiFake, + sequencerId, + rateLimitManagerMock, + OrderingTimeFixMode.ValidateOnly, + loggerFactory, + SequencerTestMetrics, + memberValidatorMock, + () => aMessageId, + ) + + blockChunkProcessor + .emitTick( + state = BlockUpdateGeneratorImpl.State( + lastBlockTs = aTimestamp, + lastChunkTs = aTimestamp, + latestSequencerEventTimestamp = Some(aTimestamp), + inFlightAggregations = Map.empty, + ) + ) + .map { case (state, update) => + state.lastChunkTs shouldBe aTimestamp + state.latestSequencerEventTimestamp shouldBe Some(aTimestamp) + update.submissionsOutcomes shouldBe empty + } + .failOnShutdown + } + } +} diff --git a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/block/update/BlockUpdateGeneratorImplTest.scala b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/block/update/BlockUpdateGeneratorImplTest.scala new file mode 100644 index 000000000000..b82eb664d201 --- /dev/null +++ b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/block/update/BlockUpdateGeneratorImplTest.scala @@ -0,0 +1,144 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.domain.block.update + +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.HasTopologyTransactionTestFactory +import com.digitalasset.canton.domain.block.update.BlockUpdateGenerator.{ + EndOfBlock, + NextChunk, + TopologyTickChunk, +} +import com.digitalasset.canton.domain.block.{BlockEvents, LedgerBlockEvent, RawLedgerBlock} +import com.digitalasset.canton.domain.metrics.SequencerTestMetrics +import com.digitalasset.canton.domain.sequencing.sequencer.block.BlockSequencerFactory.OrderingTimeFixMode +import com.digitalasset.canton.domain.sequencing.sequencer.store.SequencerMemberValidator +import com.digitalasset.canton.domain.sequencing.sequencer.traffic.SequencerRateLimitManager +import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable, FutureUnlessShutdown} +import com.digitalasset.canton.sequencing.protocol.{AllMembersOfDomain, Recipients} +import com.digitalasset.canton.topology.DefaultTestIdentities.{domainId, sequencerId} +import com.digitalasset.canton.topology.TestingIdentityFactory +import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.{BaseTest, HasExecutionContext, HasExecutorService} +import org.scalatest.wordspec.AsyncWordSpec + +import java.time.Instant + +class BlockUpdateGeneratorImplTest + extends AsyncWordSpec + with BaseTest + with HasExecutionContext + with HasExecutorService + with HasTopologyTransactionTestFactory { + + implicit val closeContext: CloseContext = CloseContext( + FlagCloseable.withCloseContext(logger, ProcessingTimeout()) + ) + + private val aTimestamp = + CantonTimestamp.assertFromInstant(Instant.parse("2024-03-08T12:00:00.000Z")) + + "BlockUpdateGeneratorImpl.extractBlockEvents" should { + "append a topology tick event only" when { + "the block requires one" in { + val rateLimitManagerMock = mock[SequencerRateLimitManager] + val memberValidatorMock = mock[SequencerMemberValidator] + val syncCryptoApiFake = + TestingIdentityFactory(loggerFactory).forOwnerAndDomain( + sequencerId, + domainId, + aTimestamp, + ) + + val blockUpdateGenerator = + new BlockUpdateGeneratorImpl( + domainId, + testedProtocolVersion, + syncCryptoApiFake, + sequencerId, + rateLimitManagerMock, + OrderingTimeFixMode.ValidateOnly, + SequencerTestMetrics, + loggerFactory, + memberValidatorMock, + ) + + blockUpdateGenerator.extractBlockEvents( + RawLedgerBlock(1L, Seq.empty, tickTopology = true) + ) shouldBe BlockEvents(1L, Seq.empty, tickTopology = true) + + blockUpdateGenerator.extractBlockEvents( + RawLedgerBlock(1L, Seq.empty, tickTopology = false) + ) shouldBe BlockEvents(1L, Seq.empty, tickTopology = false) + } + } + } + + "BlockUpdateGeneratorImpl.chunkBlock" should { + "append a tick chunk" when { + "the block requires one" in { + val sequencerAddressedEventTimestamp = aTimestamp.immediateSuccessor + val topologyTickEventTimestamp = sequencerAddressedEventTimestamp.immediateSuccessor + val rateLimitManagerMock = mock[SequencerRateLimitManager] + val memberValidatorMock = mock[SequencerMemberValidator] + val syncCryptoApiFake = + TestingIdentityFactory(loggerFactory).forOwnerAndDomain( + sequencerId, + domainId, + topologyTickEventTimestamp, + ) + + val blockUpdateGenerator = + new BlockUpdateGeneratorImpl( + domainId, + testedProtocolVersion, + syncCryptoApiFake, + sequencerId, + rateLimitManagerMock, + OrderingTimeFixMode.ValidateOnly, + SequencerTestMetrics, + loggerFactory, + memberValidatorMock, + ) + + for { + signedSubmissionRequest <- FutureUnlessShutdown.outcomeF( + sequencerSignedAndSenderSignedSubmissionRequest( + topologyTransactionFactory.participant1, + Recipients.cc(AllMembersOfDomain), + ) + ) + chunks = blockUpdateGenerator.chunkBlock( + BlockEvents( + height = 1L, + Seq( + Traced( + LedgerBlockEvent.Send(sequencerAddressedEventTimestamp, signedSubmissionRequest) + )(TraceContext.empty) + ), + tickTopology = true, + ) + ) + } yield { + chunks match { + case Seq( + NextChunk(1L, 0, chunkEvents), + TopologyTickChunk, + EndOfBlock(1L), + ) => + chunkEvents.forgetNE should matchPattern { + case Seq( + Traced( + LedgerBlockEvent.Send(`sequencerAddressedEventTimestamp`, _, _) + ) + ) => + } + case _ => fail(s"Unexpected chunks $chunks") + } + } + }.failOnShutdown + } + } +} diff --git a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerApiTest.scala b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerApiTest.scala index 161330ebb066..a8ceb8794051 100644 --- a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerApiTest.scala +++ b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerApiTest.scala @@ -39,7 +39,6 @@ abstract class DatabaseSequencerApiTest extends SequencerApiTest { val sequencerStore = SequencerStore( storage, testedProtocolVersion, - dbConfig.writer.maxSqlInListSize, timeouts, loggerFactory, sequencerId, diff --git a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerSnapshottingTest.scala b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerSnapshottingTest.scala index bbf3a97dcbed..2e90f31ef49b 100644 --- a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerSnapshottingTest.scala +++ b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/DatabaseSequencerSnapshottingTest.scala @@ -45,7 +45,6 @@ class DatabaseSequencerSnapshottingTest extends SequencerApiTest { val sequencerStore = SequencerStore( storage, testedProtocolVersion, - dbConfig.writer.maxSqlInListSize, timeouts, loggerFactory, sequencerId, diff --git a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerTest.scala b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerTest.scala index 4340e42f0412..d3e3ae831222 100644 --- a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerTest.scala +++ b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerTest.scala @@ -102,7 +102,6 @@ class SequencerTest extends FixtureAsyncWordSpec with BaseTest with HasExecution val sequencerStore = SequencerStore( storage, testedProtocolVersion, - dbConfig.writer.maxSqlInListSize, timeouts, loggerFactory, topologyClientMember, diff --git a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerTest.scala b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerTest.scala index 7bd491c7171b..9ab74a1de5d1 100644 --- a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerTest.scala +++ b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerTest.scala @@ -218,7 +218,7 @@ class BlockSequencerTest .map { i => if (n == i + 1) completed.success(()) - RawLedgerBlock(i.toLong, Seq.empty) + RawLedgerBlock(i.toLong, Seq.empty, tickTopology = false) } .iterator } diff --git a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStoreTest.scala b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStoreTest.scala index a602741a7e62..491f20e34d1c 100644 --- a/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStoreTest.scala +++ b/sdk/canton/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/store/DbSequencerStoreTest.scala @@ -4,8 +4,6 @@ package com.digitalasset.canton.domain.sequencing.sequencer.store import com.daml.nameof.NameOf.functionFullName -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric -import com.digitalasset.canton.domain.sequencing.sequencer.store.DbSequencerStoreTest.MaxInClauseSize import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} @@ -24,7 +22,6 @@ trait DbSequencerStoreTest extends SequencerStoreTest with MultiTenantedSequence new DbSequencerStore( storage, testedProtocolVersion, - MaxInClauseSize, timeouts, loggerFactory, sequencerMember, @@ -35,7 +32,6 @@ trait DbSequencerStoreTest extends SequencerStoreTest with MultiTenantedSequence new DbSequencerStore( storage, testedProtocolVersion, - MaxInClauseSize, timeouts, loggerFactory, sequencerMember, @@ -46,8 +42,6 @@ trait DbSequencerStoreTest extends SequencerStoreTest with MultiTenantedSequence } object DbSequencerStoreTest { - // intentionally low to expose any problems with usage of IN builder - val MaxInClauseSize = PositiveNumeric.tryCreate(2) def cleanSequencerTables( storage: DbStorage diff --git a/sdk/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala b/sdk/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala index debe17f7144c..327d18400324 100644 --- a/sdk/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala +++ b/sdk/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala @@ -13,7 +13,7 @@ import com.digitalasset.canton.domain.sequencing.sequencer.reference.store.DbRef import com.digitalasset.canton.domain.sequencing.sequencer.reference.store.ReferenceBlockOrderingStore.TimestampedBlock import com.digitalasset.canton.domain.sequencing.sequencer.reference.store.v1 as proto import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} -import com.digitalasset.canton.resource.DbStorage.Profile.{H2, Oracle, Postgres} +import com.digitalasset.canton.resource.DbStorage.Profile.{H2, Postgres} import com.digitalasset.canton.resource.{DbStorage, DbStore} import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.store.db.DbDeserializationException @@ -86,8 +86,6 @@ class DbReferenceBlockOrderingStore( sqlu"""merge into blocks (id, request, uuid) values ($blockHeight, $tracedRequest, $uuid) """ - case _: Oracle => - sys.error("reference sequencer does not support oracle database") }, s"insert block with height $blockHeight", ) @@ -118,8 +116,6 @@ class DbReferenceBlockOrderingStore( insert (id, request, uuid) values (vals.new_id, $tracedRequest, $uuid) """ - case _: Oracle => - sys.error("reference sequencer does not support oracle database") }).transactionally // serializable isolation level will avoid too much retrying due to key collisions .withTransactionIsolation(TransactionIsolation.Serializable), @@ -208,7 +204,7 @@ class DbReferenceBlockOrderingStore( val blockTimestamp = CantonTimestamp.ofEpochMicro(tracedRequest.value.microsecondsSinceEpoch) TimestampedBlock( - BlockFormat.Block(height, orderedRequests), + BlockFormat.Block(height, orderedRequests, tickTopology = false), blockTimestamp, lastTopologyTimestamp, ) diff --git a/sdk/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStore.scala b/sdk/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStore.scala index afac79cc8557..4471d6efe5bc 100644 --- a/sdk/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStore.scala +++ b/sdk/canton/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStore.scala @@ -149,7 +149,7 @@ class InMemoryReferenceSequencerDriverStore extends ReferenceBlockOrderingStore requestsWithTimestampsAndLastTopologyTimestamps.zip(LazyList.from(initial.toInt)).map { case ((blockTimestamp, tracedRequests, lastTopologyTimestamp), blockHeight) => TimestampedBlock( - BlockFormat.Block(blockHeight.toLong, tracedRequests), + BlockFormat.Block(blockHeight.toLong, tracedRequests, tickTopology = false), CantonTimestamp.ofEpochMicro(blockTimestamp), lastTopologyTimestamp, ) diff --git a/sdk/canton/community/drivers/reference/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStoreTest.scala b/sdk/canton/community/drivers/reference/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStoreTest.scala index 6c9d4423669d..efa585480c6d 100644 --- a/sdk/canton/community/drivers/reference/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStoreTest.scala +++ b/sdk/canton/community/drivers/reference/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStoreTest.scala @@ -37,7 +37,7 @@ trait ReferenceBlockOrderingStoreTest extends AsyncWordSpec with BaseTest { private def block(height: Long, tracedEvent: Traced[BlockFormat.OrderedRequest]) = TimestampedBlock( - BlockFormat.Block(height, Seq(tracedEvent)), + BlockFormat.Block(height, Seq(tracedEvent), tickTopology = false), CantonTimestamp.ofEpochMicro(tracedEvent.value.microsecondsSinceEpoch), SignedTopologyTransaction.InitialTopologySequencingTime, ) diff --git a/sdk/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/HasCycleUtils.scala b/sdk/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/HasCycleUtils.scala index e50e765eac0a..92d614a2aee4 100644 --- a/sdk/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/HasCycleUtils.scala +++ b/sdk/canton/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/HasCycleUtils.scala @@ -70,6 +70,5 @@ trait HasCycleUtils[E <: Environment, TCE <: TestConsoleEnvironment[E]] { val cycle = new M.Cycle(id, partyId.toProtoPrimitive).create.commands.loneElement participant.ledger_api.javaapi.commands .submit(Seq(partyId), Seq(cycle), commandId = commandId) - } } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala index aeb9ee74c685..1842987f66a4 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdater.scala @@ -148,7 +148,7 @@ private[platform] object InMemoryStateUpdaterFlow { } case Update.CommandRejected(recordTime, _, _, domainId, _, _) => Some((domainId, recordTime)) - case _: Update.SequencerIndexMoved => None + case sim: Update.SequencerIndexMoved => Some((sim.domainId, sim.recordTime)) case _: Update.CommitRepair => None } diff --git a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDto.scala b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDto.scala index 2659e6f50913..3ba59383f78e 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDto.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/backend/UpdateToDbDto.scala @@ -25,6 +25,7 @@ import com.digitalasset.daml.lf.ledger.EventId import io.grpc.Status object UpdateToDbDto { + import Update.* def apply( participantId: Ref.ParticipantId, @@ -33,389 +34,54 @@ object UpdateToDbDto { metrics: LedgerApiServerMetrics, )(implicit mc: MetricsContext): Offset => Traced[Update] => Iterator[DbDto] = { offset => tracedUpdate => - import Update.* val serializedTraceContext = SerializableTraceContext(tracedUpdate.traceContext).toDamlProto.toByteArray tracedUpdate.value match { case u: CommandRejected => - withExtraMetricLabels( - IndexerMetrics.Labels.grpcCode -> Status - .fromCodeValue(u.reasonTemplate.code) - .getCode - .name(), - IndexerMetrics.Labels.applicationId -> u.completionInfo.applicationId, - ) { implicit mc: MetricsContext => - incrementCounterForEvent( - metrics.indexer, - IndexerMetrics.Labels.eventType.transaction, - IndexerMetrics.Labels.status.rejected, - ) - } - Iterator( - commandCompletion( - offset = offset, - recordTime = u.recordTime, - transactionId = None, - completionInfo = u.completionInfo, - domainId = u.domainId.toProtoPrimitive, - requestIndex = u.domainIndex.flatMap(_.requestIndex), - serializedTraceContext = serializedTraceContext, - isTransaction = - true, // please note from usage point of view (deduplication) rejections are always used both for transactions and reassignments at the moment. - ).copy( - rejection_status_code = Some(u.reasonTemplate.code), - rejection_status_message = Some(u.reasonTemplate.message), - rejection_status_details = - Some(StatusDetails.of(u.reasonTemplate.status.details).toByteArray), - ) + commandRejectedToDbDto( + metrics = metrics, + offset = offset, + serializedTraceContext = serializedTraceContext, + commandRejected = u, ) case _: Init => Iterator() case u: PartyAddedToParticipant => - incrementCounterForEvent( - metrics.indexer, - IndexerMetrics.Labels.eventType.partyAllocation, - IndexerMetrics.Labels.status.accepted, - ) - Iterator( - DbDto.PartyEntry( - ledger_offset = offset.toHexString, - recorded_at = u.recordTime.micros, - submission_id = u.submissionId, - party = Some(u.party), - display_name = Option(u.displayName), - typ = JdbcLedgerDao.acceptType, - rejection_reason = None, - is_local = Some(u.participantId == participantId), - ) + partyAddedToParticipantToDbDto( + metrics = metrics, + participantId = participantId, + offset = offset, + partyAddedToParticipant = u, ) case u: PartyAllocationRejected => - incrementCounterForEvent( - metrics.indexer, - IndexerMetrics.Labels.eventType.partyAllocation, - IndexerMetrics.Labels.status.rejected, - ) - Iterator( - DbDto.PartyEntry( - ledger_offset = offset.toHexString, - recorded_at = u.recordTime.micros, - submission_id = Some(u.submissionId), - party = None, - display_name = None, - typ = JdbcLedgerDao.rejectType, - rejection_reason = Some(u.rejectionReason), - is_local = None, - ) + partyAllocationRejectedToDbDto( + metrics = metrics, + offset = offset, + partyAllocationRejected = u, ) case u: TransactionAccepted => - withOptionalMetricLabels( - IndexerMetrics.Labels.applicationId -> u.completionInfoO.map(_.applicationId) - ) { implicit mc: MetricsContext => - incrementCounterForEvent( - metrics.indexer, - IndexerMetrics.Labels.eventType.transaction, - IndexerMetrics.Labels.status.accepted, - ) - } - val blinding = u.blindingInfo - val preorderTraversal = - TransactionTraversalUtils.preorderTraversalForIngestion(u.transaction.transaction) - - val domainId = u.domainId.toProtoPrimitive - val transactionMeta = DbDto.TransactionMeta( - transaction_id = u.transactionId, - event_offset = offset.toHexString, - publication_time = 0, // this is filled later - record_time = u.recordTime.micros, - domain_id = domainId, - event_sequential_id_first = 0, // this is filled later - event_sequential_id_last = 0, // this is filled later + transactionAcceptedToDbDto( + translation = translation, + compressionStrategy = compressionStrategy, + metrics = metrics, + offset = offset, + serializedTraceContext = serializedTraceContext, + transactionAccepted = u, ) - val events: Iterator[DbDto] = preorderTraversal.iterator - .flatMap { - case (nodeId, create: Create) => - val eventId = EventId(u.transactionId, nodeId) - val templateId = create.templateId.toString - val stakeholders = create.stakeholders.map(_.toString) - val (createArgument, createKeyValue) = translation.serialize(create) - val informees = blinding.disclosure.getOrElse(nodeId, Set.empty).map(_.toString) - val nonStakeholderInformees = informees.diff(stakeholders) - Iterator( - DbDto.EventCreate( - event_offset = offset.toHexString, - transaction_id = u.transactionId, - ledger_effective_time = u.transactionMeta.ledgerEffectiveTime.micros, - command_id = u.completionInfoO.map(_.commandId), - workflow_id = u.transactionMeta.workflowId, - application_id = u.completionInfoO.map(_.applicationId), - submitters = u.completionInfoO.map(_.actAs.toSet), - node_index = nodeId.index, - event_id = eventId.toLedgerString, - contract_id = create.coid.coid, - template_id = templateId, - package_name = create.packageName, - package_version = create.packageVersion.map(_.toString()), - flat_event_witnesses = stakeholders, - tree_event_witnesses = informees, - create_argument = - compressionStrategy.createArgumentCompression.compress(createArgument), - create_signatories = create.signatories.map(_.toString), - create_observers = create.stakeholders.diff(create.signatories).map(_.toString), - create_key_value = createKeyValue - .map(compressionStrategy.createKeyValueCompression.compress), - create_key_maintainers = create.keyOpt.map(_.maintainers.map(_.toString)), - create_key_hash = create.keyOpt.map(_.globalKey.hash.bytes.toHexString), - create_argument_compression = compressionStrategy.createArgumentCompression.id, - create_key_value_compression = - compressionStrategy.createKeyValueCompression.id.filter(_ => - createKeyValue.isDefined - ), - event_sequential_id = 0, // this is filled later - driver_metadata = - // Allow None as the original participant might be running - // with a version predating the introduction of contract driver metadata - u.contractMetadata.get(create.coid).map(_.toByteArray), - domain_id = domainId, - trace_context = serializedTraceContext, - record_time = u.recordTime.micros, - ) - ) ++ stakeholders.iterator.map( - DbDto.IdFilterCreateStakeholder( - event_sequential_id = 0, // this is filled later - template_id = templateId, - _, - ) - ) ++ nonStakeholderInformees.iterator.map( - DbDto.IdFilterCreateNonStakeholderInformee( - event_sequential_id = 0, // this is filled later - _, - ) - ) - - case (nodeId, exercise: Exercise) => - val eventId = EventId(u.transactionId, nodeId) - val (exerciseArgument, exerciseResult, createKeyValue) = - translation.serialize(eventId, exercise) - val stakeholders = exercise.stakeholders.map(_.toString) - val informees = blinding.disclosure.getOrElse(nodeId, Set.empty).map(_.toString) - val flatWitnesses = if (exercise.consuming) stakeholders else Set.empty[String] - val nonStakeholderInformees = informees.diff(stakeholders) - val templateId = exercise.templateId.toString - Iterator( - DbDto.EventExercise( - consuming = exercise.consuming, - event_offset = offset.toHexString, - transaction_id = u.transactionId, - ledger_effective_time = u.transactionMeta.ledgerEffectiveTime.micros, - command_id = u.completionInfoO.map(_.commandId), - workflow_id = u.transactionMeta.workflowId, - application_id = u.completionInfoO.map(_.applicationId), - submitters = u.completionInfoO.map(_.actAs.toSet), - node_index = nodeId.index, - event_id = EventId(u.transactionId, nodeId).toLedgerString, - contract_id = exercise.targetCoid.coid, - template_id = templateId, - package_name = exercise.packageName, - flat_event_witnesses = flatWitnesses, - tree_event_witnesses = informees, - create_key_value = createKeyValue - .map(compressionStrategy.createKeyValueCompression.compress), - exercise_choice = exercise.qualifiedChoiceName.toString, - exercise_argument = - compressionStrategy.exerciseArgumentCompression.compress(exerciseArgument), - exercise_result = exerciseResult - .map(compressionStrategy.exerciseResultCompression.compress), - exercise_actors = exercise.actingParties.map(_.toString), - exercise_child_event_ids = exercise.children.iterator - .map(EventId(u.transactionId, _).toLedgerString.toString) - .toVector, - create_key_value_compression = compressionStrategy.createKeyValueCompression.id, - exercise_argument_compression = - compressionStrategy.exerciseArgumentCompression.id, - exercise_result_compression = compressionStrategy.exerciseResultCompression.id, - event_sequential_id = 0, // this is filled later - domain_id = domainId, - trace_context = serializedTraceContext, - record_time = u.recordTime.micros, - ) - ) ++ { - if (exercise.consuming) { - stakeholders.iterator.map(stakeholder => - DbDto.IdFilterConsumingStakeholder( - event_sequential_id = 0, // this is filled later - template_id = templateId, - party_id = stakeholder, - ) - ) ++ nonStakeholderInformees.iterator.map(stakeholder => - DbDto.IdFilterConsumingNonStakeholderInformee( - event_sequential_id = 0, // this is filled later - party_id = stakeholder, - ) - ) - } else { - informees.iterator.map(informee => - DbDto.IdFilterNonConsumingInformee( - event_sequential_id = 0, // this is filled later - party_id = informee, - ) - ) - } - } - case _ => - Iterator.empty // It is okay to collect: blinding info is already there, we are free at hand to filter out the fetch and lookup nodes here already - } - - val completions = - u.completionInfoO.iterator.map( - commandCompletion( - offset = offset, - recordTime = u.recordTime, - transactionId = Some(u.transactionId), - _, - domainId = domainId, - requestIndex = u.domainIndex.flatMap(_.requestIndex), - serializedTraceContext = serializedTraceContext, - isTransaction = true, - ) - ) - - // TransactionMeta DTO must come last in this sequence - // because in a later stage the preceding events - // will be assigned consecutive event sequential ids - // and transaction meta is assigned sequential ids of its first and last event - events ++ completions ++ Seq(transactionMeta) case u: ReassignmentAccepted => - withOptionalMetricLabels( - IndexerMetrics.Labels.applicationId -> u.optCompletionInfo.map(_.applicationId) - ) { implicit mc: MetricsContext => - incrementCounterForEvent( - metrics.indexer, - IndexerMetrics.Labels.eventType.reassignment, - IndexerMetrics.Labels.status.accepted, - ) - } - val events = u.reassignment match { - case unassign: Reassignment.Unassign => - val flatEventWitnesses = unassign.stakeholders.map(_.toString) - val templateId = unassign.templateId.toString - Iterator( - DbDto.EventUnassign( - event_offset = offset.toHexString, - update_id = u.updateId, - command_id = u.optCompletionInfo.map(_.commandId), - workflow_id = u.workflowId, - submitter = u.reassignmentInfo.submitter, - contract_id = unassign.contractId.coid, - template_id = templateId, - package_name = unassign.packageName, - flat_event_witnesses = flatEventWitnesses.toSet, - event_sequential_id = 0L, // this is filled later - source_domain_id = u.reassignmentInfo.sourceDomain.unwrap.toProtoPrimitive, - target_domain_id = u.reassignmentInfo.targetDomain.unwrap.toProtoPrimitive, - unassign_id = u.reassignmentInfo.unassignId.toMicros.toString, - reassignment_counter = u.reassignmentInfo.reassignmentCounter, - assignment_exclusivity = unassign.assignmentExclusivity.map(_.micros), - trace_context = serializedTraceContext, - record_time = u.recordTime.micros, - ) - ) ++ flatEventWitnesses.map( - DbDto.IdFilterUnassignStakeholder( - 0L, // this is filled later - templateId, - _, - ) - ) - case assign: Reassignment.Assign => - val templateId = assign.createNode.templateId.toString - val flatEventWitnesses = - assign.createNode.stakeholders.map(_.toString) - val (createArgument, createKeyValue) = translation.serialize(assign.createNode) - Iterator( - DbDto.EventAssign( - event_offset = offset.toHexString, - update_id = u.updateId, - command_id = u.optCompletionInfo.map(_.commandId), - workflow_id = u.workflowId, - submitter = u.reassignmentInfo.submitter, - contract_id = assign.createNode.coid.coid, - template_id = templateId, - package_name = assign.createNode.packageName, - package_version = assign.createNode.packageVersion.map(_.toString()), - flat_event_witnesses = flatEventWitnesses, - create_argument = createArgument, - create_signatories = assign.createNode.signatories.map(_.toString), - create_observers = assign.createNode.stakeholders - .diff(assign.createNode.signatories) - .map(_.toString), - create_key_value = createKeyValue - .map(compressionStrategy.createKeyValueCompression.compress), - create_key_maintainers = - assign.createNode.keyOpt.map(_.maintainers.map(_.toString)), - create_key_hash = - assign.createNode.keyOpt.map(_.globalKey.hash.bytes.toHexString), - create_argument_compression = compressionStrategy.createArgumentCompression.id, - create_key_value_compression = - compressionStrategy.createKeyValueCompression.id.filter(_ => - createKeyValue.isDefined - ), - event_sequential_id = 0L, // this is filled later - ledger_effective_time = assign.ledgerEffectiveTime.micros, - driver_metadata = assign.contractMetadata.toByteArray, - source_domain_id = u.reassignmentInfo.sourceDomain.unwrap.toProtoPrimitive, - target_domain_id = u.reassignmentInfo.targetDomain.unwrap.toProtoPrimitive, - unassign_id = u.reassignmentInfo.unassignId.toMicros.toString, - reassignment_counter = u.reassignmentInfo.reassignmentCounter, - trace_context = serializedTraceContext, - record_time = u.recordTime.micros, - ) - ) ++ flatEventWitnesses.map( - DbDto.IdFilterAssignStakeholder( - 0L, // this is filled later - templateId, - _, - ) - ) - } - - val domainId = u.reassignment match { - case _: Reassignment.Unassign => - u.reassignmentInfo.sourceDomain.unwrap.toProtoPrimitive - case _: Reassignment.Assign => - u.reassignmentInfo.targetDomain.unwrap.toProtoPrimitive - } - val completions = u.optCompletionInfo.iterator.map( - commandCompletion( - offset = offset, - recordTime = u.recordTime, - transactionId = Some(u.updateId), - _, - domainId = domainId, - requestIndex = u.domainIndex.flatMap(_.requestIndex), - serializedTraceContext = serializedTraceContext, - isTransaction = false, - ) - ) - - val transactionMeta = DbDto.TransactionMeta( - transaction_id = u.updateId, - event_offset = offset.toHexString, - publication_time = 0, // this is filled later - record_time = u.recordTime.micros, - domain_id = domainId, - event_sequential_id_first = 0, // this is filled later - event_sequential_id_last = 0, // this is filled later + reassignmentAcceptedToDbDto( + translation = translation, + compressionStrategy = compressionStrategy, + metrics = metrics, + offset = offset, + serializedTraceContext = serializedTraceContext, + reassignmentAccepted = u, ) - // TransactionMeta DTO must come last in this sequence - // because in a later stage the preceding events - // will be assigned consecutive event sequential ids - // and transaction meta is assigned sequential ids of its first and last event - events ++ completions ++ Seq(transactionMeta) - case u: SequencerIndexMoved => // nothing to persist, this is only a synthetic DbDto to facilitate updating the StringInterning Iterator(DbDto.SequencerIndexMoved(u.domainId.toProtoPrimitive)) @@ -425,6 +91,494 @@ object UpdateToDbDto { } } + private def commandRejectedToDbDto( + metrics: LedgerApiServerMetrics, + offset: Offset, + serializedTraceContext: Array[Byte], + commandRejected: CommandRejected, + )(implicit mc: MetricsContext): Iterator[DbDto] = { + withExtraMetricLabels( + IndexerMetrics.Labels.grpcCode -> Status + .fromCodeValue(commandRejected.reasonTemplate.code) + .getCode + .name(), + IndexerMetrics.Labels.applicationId -> commandRejected.completionInfo.applicationId, + ) { implicit mc: MetricsContext => + incrementCounterForEvent( + metrics.indexer, + IndexerMetrics.Labels.eventType.transaction, + IndexerMetrics.Labels.status.rejected, + ) + } + Iterator( + commandCompletion( + offset = offset, + recordTime = commandRejected.recordTime, + transactionId = None, + completionInfo = commandRejected.completionInfo, + domainId = commandRejected.domainId.toProtoPrimitive, + requestIndex = commandRejected.domainIndex.flatMap(_.requestIndex), + serializedTraceContext = serializedTraceContext, + isTransaction = + true, // please note from usage point of view (deduplication) rejections are always used both for transactions and reassignments at the moment. + ).copy( + rejection_status_code = Some(commandRejected.reasonTemplate.code), + rejection_status_message = Some(commandRejected.reasonTemplate.message), + rejection_status_details = + Some(StatusDetails.of(commandRejected.reasonTemplate.status.details).toByteArray), + ) + ) + } + + private def partyAddedToParticipantToDbDto( + metrics: LedgerApiServerMetrics, + participantId: Ref.ParticipantId, + offset: Offset, + partyAddedToParticipant: PartyAddedToParticipant, + )(implicit mc: MetricsContext): Iterator[DbDto] = { + incrementCounterForEvent( + metrics.indexer, + IndexerMetrics.Labels.eventType.partyAllocation, + IndexerMetrics.Labels.status.accepted, + ) + Iterator( + DbDto.PartyEntry( + ledger_offset = offset.toHexString, + recorded_at = partyAddedToParticipant.recordTime.micros, + submission_id = partyAddedToParticipant.submissionId, + party = Some(partyAddedToParticipant.party), + display_name = Option(partyAddedToParticipant.displayName), + typ = JdbcLedgerDao.acceptType, + rejection_reason = None, + is_local = Some(partyAddedToParticipant.participantId == participantId), + ) + ) + } + + private def partyAllocationRejectedToDbDto( + metrics: LedgerApiServerMetrics, + offset: Offset, + partyAllocationRejected: PartyAllocationRejected, + )(implicit mc: MetricsContext): Iterator[DbDto] = { + incrementCounterForEvent( + metrics.indexer, + IndexerMetrics.Labels.eventType.partyAllocation, + IndexerMetrics.Labels.status.rejected, + ) + Iterator( + DbDto.PartyEntry( + ledger_offset = offset.toHexString, + recorded_at = partyAllocationRejected.recordTime.micros, + submission_id = Some(partyAllocationRejected.submissionId), + party = None, + display_name = None, + typ = JdbcLedgerDao.rejectType, + rejection_reason = Some(partyAllocationRejected.rejectionReason), + is_local = None, + ) + ) + } + + private def transactionAcceptedToDbDto( + translation: LfValueSerialization, + compressionStrategy: CompressionStrategy, + metrics: LedgerApiServerMetrics, + offset: Offset, + serializedTraceContext: Array[Byte], + transactionAccepted: TransactionAccepted, + )(implicit mc: MetricsContext): Iterator[DbDto] = { + withOptionalMetricLabels( + IndexerMetrics.Labels.applicationId -> transactionAccepted.completionInfoO.map( + _.applicationId + ) + ) { implicit mc: MetricsContext => + incrementCounterForEvent( + metrics.indexer, + IndexerMetrics.Labels.eventType.transaction, + IndexerMetrics.Labels.status.accepted, + ) + } + + val transactionMeta = DbDto.TransactionMeta( + transaction_id = transactionAccepted.transactionId, + event_offset = offset.toHexString, + publication_time = 0, // this is filled later + record_time = transactionAccepted.recordTime.micros, + domain_id = transactionAccepted.domainId.toProtoPrimitive, + event_sequential_id_first = 0, // this is filled later + event_sequential_id_last = 0, // this is filled later + ) + + val events: Iterator[DbDto] = TransactionTraversalUtils + .preorderTraversalForIngestion( + transactionAccepted.transaction.transaction + ) + .iterator + .flatMap { + case (nodeId, create: Create) => + createNodeToDbDto( + compressionStrategy = compressionStrategy, + translation = translation, + offset = offset, + serializedTraceContext = serializedTraceContext, + transactionAccepted = transactionAccepted, + nodeId = nodeId, + create = create, + ) + + case (nodeId, exercise: Exercise) => + exerciseNodeToDbDto( + compressionStrategy = compressionStrategy, + translation = translation, + offset = offset, + serializedTraceContext = serializedTraceContext, + transactionAccepted = transactionAccepted, + nodeId = nodeId, + exercise = exercise, + ) + + case _ => + Iterator.empty // It is okay to collect: blinding info is already there, we are free at hand to filter out the fetch and lookup nodes here already + } + + val completions = + transactionAccepted.completionInfoO.iterator.map(completionInfo => + commandCompletion( + offset = offset, + recordTime = transactionAccepted.recordTime, + transactionId = Some(transactionAccepted.transactionId), + completionInfo = completionInfo, + domainId = transactionAccepted.domainId.toProtoPrimitive, + requestIndex = transactionAccepted.domainIndex.flatMap(_.requestIndex), + serializedTraceContext = serializedTraceContext, + isTransaction = true, + ) + ) + + // TransactionMeta DTO must come last in this sequence + // because in a later stage the preceding events + // will be assigned consecutive event sequential ids + // and transaction meta is assigned sequential ids of its first and last event + events ++ completions ++ Seq(transactionMeta) + } + + private def createNodeToDbDto( + compressionStrategy: CompressionStrategy, + translation: LfValueSerialization, + offset: Offset, + serializedTraceContext: Array[Byte], + transactionAccepted: TransactionAccepted, + nodeId: NodeId, + create: Create, + ): Iterator[DbDto] = { + val eventId = EventId(transactionAccepted.transactionId, nodeId) + val templateId = create.templateId.toString + val stakeholders = create.stakeholders.map(_.toString) + val (createArgument, createKeyValue) = translation.serialize(create) + val informees = + transactionAccepted.blindingInfo.disclosure.getOrElse(nodeId, Set.empty).map(_.toString) + val nonStakeholderInformees = informees.diff(stakeholders) + Iterator( + DbDto.EventCreate( + event_offset = offset.toHexString, + transaction_id = transactionAccepted.transactionId, + ledger_effective_time = transactionAccepted.transactionMeta.ledgerEffectiveTime.micros, + command_id = transactionAccepted.completionInfoO.map(_.commandId), + workflow_id = transactionAccepted.transactionMeta.workflowId, + application_id = transactionAccepted.completionInfoO.map(_.applicationId), + submitters = transactionAccepted.completionInfoO.map(_.actAs.toSet), + node_index = nodeId.index, + event_id = eventId.toLedgerString, + contract_id = create.coid.coid, + template_id = templateId, + package_name = create.packageName, + package_version = create.packageVersion.map(_.toString()), + flat_event_witnesses = stakeholders, + tree_event_witnesses = informees, + create_argument = compressionStrategy.createArgumentCompression.compress(createArgument), + create_signatories = create.signatories.map(_.toString), + create_observers = create.stakeholders.diff(create.signatories).map(_.toString), + create_key_value = createKeyValue + .map(compressionStrategy.createKeyValueCompression.compress), + create_key_maintainers = create.keyOpt.map(_.maintainers.map(_.toString)), + create_key_hash = create.keyOpt.map(_.globalKey.hash.bytes.toHexString), + create_argument_compression = compressionStrategy.createArgumentCompression.id, + create_key_value_compression = + compressionStrategy.createKeyValueCompression.id.filter(_ => createKeyValue.isDefined), + event_sequential_id = 0, // this is filled later + driver_metadata = + // Allow None as the original participant might be running + // with a version predating the introduction of contract driver metadata + transactionAccepted.contractMetadata.get(create.coid).map(_.toByteArray), + domain_id = transactionAccepted.domainId.toProtoPrimitive, + trace_context = serializedTraceContext, + record_time = transactionAccepted.recordTime.micros, + ) + ) ++ stakeholders.iterator.map( + DbDto.IdFilterCreateStakeholder( + event_sequential_id = 0, // this is filled later + template_id = templateId, + _, + ) + ) ++ nonStakeholderInformees.iterator.map( + DbDto.IdFilterCreateNonStakeholderInformee( + event_sequential_id = 0, // this is filled later + _, + ) + ) + } + + private def exerciseNodeToDbDto( + compressionStrategy: CompressionStrategy, + translation: LfValueSerialization, + offset: Offset, + serializedTraceContext: Array[Byte], + transactionAccepted: TransactionAccepted, + nodeId: NodeId, + exercise: Exercise, + ): Iterator[DbDto] = { + val eventId = EventId(transactionAccepted.transactionId, nodeId) + val (exerciseArgument, exerciseResult, createKeyValue) = + translation.serialize(eventId, exercise) + val stakeholders = exercise.stakeholders.map(_.toString) + val informees = + transactionAccepted.blindingInfo.disclosure.getOrElse(nodeId, Set.empty).map(_.toString) + val flatWitnesses = if (exercise.consuming) stakeholders else Set.empty[String] + val nonStakeholderInformees = informees.diff(stakeholders) + val templateId = exercise.templateId.toString + Iterator( + DbDto.EventExercise( + consuming = exercise.consuming, + event_offset = offset.toHexString, + transaction_id = transactionAccepted.transactionId, + ledger_effective_time = transactionAccepted.transactionMeta.ledgerEffectiveTime.micros, + command_id = transactionAccepted.completionInfoO.map(_.commandId), + workflow_id = transactionAccepted.transactionMeta.workflowId, + application_id = transactionAccepted.completionInfoO.map(_.applicationId), + submitters = transactionAccepted.completionInfoO.map(_.actAs.toSet), + node_index = nodeId.index, + event_id = EventId(transactionAccepted.transactionId, nodeId).toLedgerString, + contract_id = exercise.targetCoid.coid, + template_id = templateId, + package_name = exercise.packageName, + flat_event_witnesses = flatWitnesses, + tree_event_witnesses = informees, + create_key_value = createKeyValue + .map(compressionStrategy.createKeyValueCompression.compress), + exercise_choice = exercise.qualifiedChoiceName.toString, + exercise_argument = + compressionStrategy.exerciseArgumentCompression.compress(exerciseArgument), + exercise_result = exerciseResult + .map(compressionStrategy.exerciseResultCompression.compress), + exercise_actors = exercise.actingParties.map(_.toString), + exercise_child_event_ids = exercise.children.iterator + .map(EventId(transactionAccepted.transactionId, _).toLedgerString.toString) + .toVector, + create_key_value_compression = compressionStrategy.createKeyValueCompression.id, + exercise_argument_compression = compressionStrategy.exerciseArgumentCompression.id, + exercise_result_compression = compressionStrategy.exerciseResultCompression.id, + event_sequential_id = 0, // this is filled later + domain_id = transactionAccepted.domainId.toProtoPrimitive, + trace_context = serializedTraceContext, + record_time = transactionAccepted.recordTime.micros, + ) + ) ++ { + if (exercise.consuming) { + stakeholders.iterator.map(stakeholder => + DbDto.IdFilterConsumingStakeholder( + event_sequential_id = 0, // this is filled later + template_id = templateId, + party_id = stakeholder, + ) + ) ++ nonStakeholderInformees.iterator.map(stakeholder => + DbDto.IdFilterConsumingNonStakeholderInformee( + event_sequential_id = 0, // this is filled later + party_id = stakeholder, + ) + ) + } else { + informees.iterator.map(informee => + DbDto.IdFilterNonConsumingInformee( + event_sequential_id = 0, // this is filled later + party_id = informee, + ) + ) + } + } + } + + private def reassignmentAcceptedToDbDto( + translation: LfValueSerialization, + compressionStrategy: CompressionStrategy, + metrics: LedgerApiServerMetrics, + offset: Offset, + serializedTraceContext: Array[Byte], + reassignmentAccepted: ReassignmentAccepted, + )(implicit mc: MetricsContext): Iterator[DbDto] = { + withOptionalMetricLabels( + IndexerMetrics.Labels.applicationId -> reassignmentAccepted.optCompletionInfo.map( + _.applicationId + ) + ) { implicit mc: MetricsContext => + incrementCounterForEvent( + metrics.indexer, + IndexerMetrics.Labels.eventType.reassignment, + IndexerMetrics.Labels.status.accepted, + ) + } + + val events = reassignmentAccepted.reassignment match { + case unassign: Reassignment.Unassign => + unassignToDbDto( + offset = offset, + serializedTraceContext = serializedTraceContext, + reassignmentAccepted = reassignmentAccepted, + unassign = unassign, + ) + + case assign: Reassignment.Assign => + assignToDbDto( + translation = translation, + compressionStrategy = compressionStrategy, + offset = offset, + serializedTraceContext = serializedTraceContext, + reassignmentAccepted = reassignmentAccepted, + assign = assign, + ) + } + + val domainId = reassignmentAccepted.reassignment match { + case _: Reassignment.Unassign => + reassignmentAccepted.reassignmentInfo.sourceDomain.unwrap.toProtoPrimitive + case _: Reassignment.Assign => + reassignmentAccepted.reassignmentInfo.targetDomain.unwrap.toProtoPrimitive + } + val completions = reassignmentAccepted.optCompletionInfo.iterator.map( + commandCompletion( + offset = offset, + recordTime = reassignmentAccepted.recordTime, + transactionId = Some(reassignmentAccepted.updateId), + _, + domainId = domainId, + requestIndex = reassignmentAccepted.domainIndex.flatMap(_.requestIndex), + serializedTraceContext = serializedTraceContext, + isTransaction = false, + ) + ) + + val transactionMeta = DbDto.TransactionMeta( + transaction_id = reassignmentAccepted.updateId, + event_offset = offset.toHexString, + publication_time = 0, // this is filled later + record_time = reassignmentAccepted.recordTime.micros, + domain_id = domainId, + event_sequential_id_first = 0, // this is filled later + event_sequential_id_last = 0, // this is filled later + ) + + // TransactionMeta DTO must come last in this sequence + // because in a later stage the preceding events + // will be assigned consecutive event sequential ids + // and transaction meta is assigned sequential ids of its first and last event + events ++ completions ++ Seq(transactionMeta) + } + + private def unassignToDbDto( + offset: Offset, + serializedTraceContext: Array[Byte], + reassignmentAccepted: ReassignmentAccepted, + unassign: Reassignment.Unassign, + ): Iterator[DbDto] = { + val flatEventWitnesses = unassign.stakeholders.map(_.toString) + val templateId = unassign.templateId.toString + Iterator( + DbDto.EventUnassign( + event_offset = offset.toHexString, + update_id = reassignmentAccepted.updateId, + command_id = reassignmentAccepted.optCompletionInfo.map(_.commandId), + workflow_id = reassignmentAccepted.workflowId, + submitter = reassignmentAccepted.reassignmentInfo.submitter, + contract_id = unassign.contractId.coid, + template_id = templateId, + package_name = unassign.packageName, + flat_event_witnesses = flatEventWitnesses.toSet, + event_sequential_id = 0L, // this is filled later + source_domain_id = + reassignmentAccepted.reassignmentInfo.sourceDomain.unwrap.toProtoPrimitive, + target_domain_id = + reassignmentAccepted.reassignmentInfo.targetDomain.unwrap.toProtoPrimitive, + unassign_id = reassignmentAccepted.reassignmentInfo.unassignId.toMicros.toString, + reassignment_counter = reassignmentAccepted.reassignmentInfo.reassignmentCounter, + assignment_exclusivity = unassign.assignmentExclusivity.map(_.micros), + trace_context = serializedTraceContext, + record_time = reassignmentAccepted.recordTime.micros, + ) + ) ++ flatEventWitnesses.map( + DbDto.IdFilterUnassignStakeholder( + 0L, // this is filled later + templateId, + _, + ) + ) + } + + private def assignToDbDto( + translation: LfValueSerialization, + compressionStrategy: CompressionStrategy, + offset: Offset, + serializedTraceContext: Array[Byte], + reassignmentAccepted: ReassignmentAccepted, + assign: Reassignment.Assign, + ): Iterator[DbDto] = { + val templateId = assign.createNode.templateId.toString + val flatEventWitnesses = + assign.createNode.stakeholders.map(_.toString) + val (createArgument, createKeyValue) = translation.serialize(assign.createNode) + Iterator( + DbDto.EventAssign( + event_offset = offset.toHexString, + update_id = reassignmentAccepted.updateId, + command_id = reassignmentAccepted.optCompletionInfo.map(_.commandId), + workflow_id = reassignmentAccepted.workflowId, + submitter = reassignmentAccepted.reassignmentInfo.submitter, + contract_id = assign.createNode.coid.coid, + template_id = templateId, + package_name = assign.createNode.packageName, + package_version = assign.createNode.packageVersion.map(_.toString()), + flat_event_witnesses = flatEventWitnesses, + create_argument = createArgument, + create_signatories = assign.createNode.signatories.map(_.toString), + create_observers = assign.createNode.stakeholders + .diff(assign.createNode.signatories) + .map(_.toString), + create_key_value = createKeyValue + .map(compressionStrategy.createKeyValueCompression.compress), + create_key_maintainers = assign.createNode.keyOpt.map(_.maintainers.map(_.toString)), + create_key_hash = assign.createNode.keyOpt.map(_.globalKey.hash.bytes.toHexString), + create_argument_compression = compressionStrategy.createArgumentCompression.id, + create_key_value_compression = + compressionStrategy.createKeyValueCompression.id.filter(_ => createKeyValue.isDefined), + event_sequential_id = 0L, // this is filled later + ledger_effective_time = assign.ledgerEffectiveTime.micros, + driver_metadata = assign.contractMetadata.toByteArray, + source_domain_id = + reassignmentAccepted.reassignmentInfo.sourceDomain.unwrap.toProtoPrimitive, + target_domain_id = + reassignmentAccepted.reassignmentInfo.targetDomain.unwrap.toProtoPrimitive, + unassign_id = reassignmentAccepted.reassignmentInfo.unassignId.toMicros.toString, + reassignment_counter = reassignmentAccepted.reassignmentInfo.reassignmentCounter, + trace_context = serializedTraceContext, + record_time = reassignmentAccepted.recordTime.micros, + ) + ) ++ flatEventWitnesses.map( + DbDto.IdFilterAssignStakeholder( + 0L, // this is filled later + templateId, + _, + ) + ) + } + private def incrementCounterForEvent( metrics: IndexerMetrics, eventType: String, @@ -438,6 +592,7 @@ object UpdateToDbDto { ) { implicit mc => metrics.eventsMeter.mark() } + private def commandCompletion( offset: Offset, recordTime: Time.Timestamp, diff --git a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdaterSpec.scala b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdaterSpec.scala index af16863410ff..fe943667b7d9 100644 --- a/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdaterSpec.scala +++ b/sdk/canton/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/index/InMemoryStateUpdaterSpec.scala @@ -16,6 +16,7 @@ import com.digitalasset.canton.ledger.participant.state.{ Reassignment, ReassignmentInfo, RequestIndex, + SequencerIndex, TransactionMeta, Update, } @@ -68,8 +69,8 @@ import org.scalatest.matchers.should.Matchers import java.util.concurrent.ConcurrentLinkedQueue import scala.collection.mutable.ArrayBuffer -import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future} import scala.util.chaining.* class InMemoryStateUpdaterSpec @@ -287,6 +288,62 @@ class InMemoryStateUpdaterSpec } + "updateOffsetCheckpointCacheFlowWithTickingSource" should "update the domain time for all the Update types that contain one" in new Scope { + implicit val ec: ExecutionContext = executorService + + private val updatesSeq: Seq[Update] = Seq( + transactionAccepted(1, domainId1), + assignmentAccepted(2, source = domainId2, target = domainId1), + unassignmentAccepted(3, source = domainId1, target = domainId2), + commandRejected(4, domainId1), + sequencerIndexMoved(5, domainId1), + ) + + private val offsets = (1L to updatesSeq.length.toLong).map(Offset.fromLong) + private val updatesWithOffsets = offsets.zip(updatesSeq.map(Traced.empty)) + + // tick after each update to have one checkpoint after every update + // the None values denote the ticks arrived that are used to update the offset checkpoint cache + private val input = + updatesWithOffsets.flatMap(elem => Seq(Some(elem), None)) + + private val (expectedOutput, output, checkpoints) = + runUpdateOffsetCheckpointCacheFlow( + input + ).futureValue + + private val offsetCheckpointsExpected = + Seq( + // offset -> Map[domain, time] + 1 -> Map( + domainId1 -> 1 + ), + 2 -> Map( + domainId1 -> 2 + ), + 3 -> Map( + domainId1 -> 3 + ), + 4 -> Map( + domainId1 -> 4 + ), + 5 -> Map( + domainId1 -> 5 + ), + ).map { case (offset, domainTimesRaw) => + OffsetCheckpoint( + offset = Offset.fromLong(offset.toLong), + domainTimes = domainTimesRaw.map { case (d, t) => + d -> Timestamp(t.toLong) + }, + ) + } + + output shouldBe expectedOutput + checkpoints shouldBe offsetCheckpointsExpected + + } + } object InMemoryStateUpdaterSpec { @@ -658,21 +715,7 @@ object InMemoryStateUpdaterSpec { ) private val update1 = offset(1L) -> Traced( - Update.TransactionAccepted( - completionInfoO = None, - transactionMeta = someTransactionMeta, - transaction = CommittedTransaction(TransactionBuilder.Empty), - transactionId = txId1, - recordTime = Timestamp.Epoch, - hostedWitnesses = Nil, - contractMetadata = Map.empty, - domainId = domainId1, - Some( - DomainIndex.of( - RequestIndex(RequestCounter(1), Some(SequencerCounter(1)), CantonTimestamp.MinValue) - ) - ), - ) + transactionAccepted(t = 0L, domainId = domainId1) ) private val rawMetadataChangedUpdate = offset(2L) -> Update.Init( Timestamp.Epoch @@ -696,81 +739,14 @@ object InMemoryStateUpdaterSpec { ) ) private val update4 = offset(4L) -> Traced[Update]( - Update.CommandRejected( - recordTime = Time.Timestamp.assertFromLong(1337L), - completionInfo = CompletionInfo( - actAs = List.empty, - applicationId = Ref.ApplicationId.assertFromString("some-app-id"), - commandId = Ref.CommandId.assertFromString("cmdId"), - optDeduplicationPeriod = None, - submissionId = None, - None, - ), - reasonTemplate = FinalReason(new Status()), - domainId = DomainId.tryFromString("da::default"), - Some( - DomainIndex.of( - RequestIndex(RequestCounter(1), Some(SequencerCounter(1)), CantonTimestamp.MinValue) - ) - ), - ) + commandRejected(t = 1337L, domainId = DomainId.tryFromString("da::default")) ) private val update7 = offset(7L) -> Traced[Update]( - Update.ReassignmentAccepted( - optCompletionInfo = None, - workflowId = Some(workflowId), - updateId = txId3, - recordTime = Timestamp.Epoch, - reassignmentInfo = ReassignmentInfo( - sourceDomain = SourceDomainId(domainId1), - targetDomain = TargetDomainId(domainId2), - submitter = Option(party1), - reassignmentCounter = 15L, - hostedStakeholders = party2 :: Nil, - unassignId = CantonTimestamp.assertFromLong(155555L), - isReassigningParticipant = true, - ), - reassignment = Reassignment.Assign( - ledgerEffectiveTime = Timestamp.assertFromLong(12222), - createNode = someCreateNode, - contractMetadata = someContractMetadataBytes, - ), - Some( - DomainIndex.of( - RequestIndex(RequestCounter(1), Some(SequencerCounter(1)), CantonTimestamp.MinValue) - ) - ), - ) + assignmentAccepted(t = 0, source = domainId1, target = domainId2) ) private val update8 = offset(8L) -> Traced[Update]( - Update.ReassignmentAccepted( - optCompletionInfo = None, - workflowId = Some(workflowId), - updateId = txId4, - recordTime = Timestamp.Epoch, - reassignmentInfo = ReassignmentInfo( - sourceDomain = SourceDomainId(domainId2), - targetDomain = TargetDomainId(domainId1), - submitter = Option(party2), - reassignmentCounter = 15L, - hostedStakeholders = party1 :: Nil, - unassignId = CantonTimestamp.assertFromLong(1555551L), - isReassigningParticipant = true, - ), - reassignment = Reassignment.Unassign( - contractId = someCreateNode.coid, - templateId = templateId2, - packageName = packageName, - stakeholders = List(party2), - assignmentExclusivity = Some(Timestamp.assertFromLong(123456L)), - ), - Some( - DomainIndex.of( - RequestIndex(RequestCounter(1), Some(SequencerCounter(1)), CantonTimestamp.MinValue) - ) - ), - ) + unassignmentAccepted(t = 0, source = domainId2, target = domainId1) ) private val anotherMetadataChangedUpdate = @@ -823,55 +799,46 @@ object InMemoryStateUpdaterSpec { val updatesSeq: Seq[Option[Traced[Update.TransactionAccepted]]] = recordTimesAndTicks.zip(domainIds).map { - case (None, _) => None - case (_, None) => None case (Some(t), Some(domain)) => Some( - Traced.empty( - Update.TransactionAccepted( - completionInfoO = None, - transactionMeta = someTransactionMeta, - transaction = CommittedTransaction(TransactionBuilder.Empty), - transactionId = txId1, - recordTime = Timestamp(t), - hostedWitnesses = Nil, - contractMetadata = Map.empty, - domainId = domain, - domainIndex = Some( - DomainIndex.of( - RequestIndex( - RequestCounter(1), - Some(SequencerCounter(1)), - CantonTimestamp.MinValue, - ) - ) - ), - ) - ) + Traced.empty(transactionAccepted(t, domain)) ) + case _ => None } offsets.zip(updatesSeq).map { - case (None, _) => None - case (_, None) => None case (Some(offset), Some(tracedUpdate)) => Some((offset, tracedUpdate)) + case _ => None } } + // this function gets a sequence of offset, update pairs as Some values + // and ticks as Nones + // runs the updateOffsetCheckpointCacheFlowWithTickingSource + // and provides as output: + // - 1. the expected output + // - 2. the actual output + // - 3. the checkpoints updates in the offset checkpoint cache def runUpdateOffsetCheckpointCacheFlow( - inputSeq: Seq[Option[(Offset, Traced[Update.TransactionAccepted])]] - )(implicit materializer: Materializer, ec: ExecutionContext) = { + inputSeq: Seq[Option[(Offset, Traced[Update])]] + )(implicit materializer: Materializer, ec: ExecutionContext): Future[ + ( + Seq[Vector[(Offset, Traced[Update])]], + Seq[Vector[(Offset, Traced[Update])]], + Seq[OffsetCheckpoint], + ) + ] = { val elementsQueue = - new ConcurrentLinkedQueue[Option[(Offset, Traced[Update.TransactionAccepted])]] + new ConcurrentLinkedQueue[Option[(Offset, Traced[Update])]] inputSeq.foreach(elementsQueue.add) - val flattenedSeq: Seq[(Vector[(Offset, Traced[Update])], Long, CantonTimestamp)] = - inputSeq.flatten.map(Vector(_)).map((_, 1L, CantonTimestamp.MinValue)) + val flattenedSeq: Seq[Vector[(Offset, Traced[Update])]] = + inputSeq.flatten.map(Vector(_)) val bufferSize = 100 val (sourceQueueSomes, sourceSomes) = Source - .queue[(Vector[(Offset, Traced[Update])], Long, CantonTimestamp)](bufferSize) + .queue[Vector[(Offset, Traced[Update])]](bufferSize) .preMaterialize() val (sourceQueueNones, sourceNones) = Source .queue[Option[Nothing]](bufferSize) @@ -881,7 +848,7 @@ object InMemoryStateUpdaterSpec { Option(elementsQueue.poll()) match { // send element case Some(Some(pair)) => - sourceQueueSomes.offer((Vector(pair), 1L, CantonTimestamp.MinValue)) + sourceQueueSomes.offer(Vector(pair)) // send tick case Some(None) => sourceQueueNones.offer(None) @@ -894,6 +861,7 @@ object InMemoryStateUpdaterSpec { var checkpoints: Seq[OffsetCheckpoint] = Seq.empty val output = sourceSomes + .map((_, 1L, CantonTimestamp.MinValue)) .via( InMemoryStateUpdaterFlow .updateOffsetCheckpointCacheFlowWithTickingSource( @@ -904,10 +872,126 @@ object InMemoryStateUpdaterSpec { tick = sourceNones, ) ) + .map(_._1) .alsoTo(Sink.foreach(_ => offerNext())) .runWith(Sink.seq) output.map(o => (flattenedSeq, o, checkpoints)) } + + private def transactionAccepted(t: Long, domainId: DomainId): Update.TransactionAccepted = + Update.TransactionAccepted( + completionInfoO = None, + transactionMeta = someTransactionMeta, + transaction = CommittedTransaction(TransactionBuilder.Empty), + transactionId = txId1, + recordTime = Timestamp(t), + hostedWitnesses = Nil, + contractMetadata = Map.empty, + domainId = domainId, + domainIndex = Some( + DomainIndex.of( + RequestIndex( + RequestCounter(1), + Some(SequencerCounter(1)), + CantonTimestamp.MinValue, + ) + ) + ), + ) + + private def assignmentAccepted( + t: Long, + source: DomainId, + target: DomainId, + ): Update.ReassignmentAccepted = + Update.ReassignmentAccepted( + optCompletionInfo = None, + workflowId = Some(workflowId), + updateId = txId3, + recordTime = Timestamp(t), + reassignmentInfo = ReassignmentInfo( + sourceDomain = SourceDomainId(source), + targetDomain = TargetDomainId(target), + submitter = Option(party1), + reassignmentCounter = 15L, + hostedStakeholders = party2 :: Nil, + unassignId = CantonTimestamp.assertFromLong(155555L), + isReassigningParticipant = true, + ), + reassignment = Reassignment.Assign( + ledgerEffectiveTime = Timestamp.assertFromLong(12222), + createNode = someCreateNode, + contractMetadata = someContractMetadataBytes, + ), + Some( + DomainIndex.of( + RequestIndex(RequestCounter(1), Some(SequencerCounter(1)), CantonTimestamp.MinValue) + ) + ), + ) + + private def unassignmentAccepted( + t: Long, + source: DomainId, + target: DomainId, + ): Update.ReassignmentAccepted = + Update.ReassignmentAccepted( + optCompletionInfo = None, + workflowId = Some(workflowId), + updateId = txId4, + recordTime = Timestamp(t), + reassignmentInfo = ReassignmentInfo( + sourceDomain = SourceDomainId(source), + targetDomain = TargetDomainId(target), + submitter = Option(party2), + reassignmentCounter = 15L, + hostedStakeholders = party1 :: Nil, + unassignId = CantonTimestamp.assertFromLong(1555551L), + isReassigningParticipant = true, + ), + reassignment = Reassignment.Unassign( + contractId = someCreateNode.coid, + templateId = templateId2, + packageName = packageName, + stakeholders = List(party2), + assignmentExclusivity = Some(Timestamp.assertFromLong(123456L)), + ), + Some( + DomainIndex.of( + RequestIndex(RequestCounter(1), Some(SequencerCounter(1)), CantonTimestamp.MinValue) + ) + ), + ) + + private def commandRejected(t: Long, domainId: DomainId): Update.CommandRejected = + Update.CommandRejected( + recordTime = Time.Timestamp.assertFromLong(t), + completionInfo = CompletionInfo( + actAs = List.empty, + applicationId = Ref.ApplicationId.assertFromString("some-app-id"), + commandId = Ref.CommandId.assertFromString("cmdId"), + optDeduplicationPeriod = None, + submissionId = None, + None, + ), + reasonTemplate = FinalReason(new Status()), + domainId = domainId, + Some( + DomainIndex.of( + RequestIndex(RequestCounter(1), Some(SequencerCounter(1)), CantonTimestamp.MinValue) + ) + ), + ) + + private def sequencerIndexMoved(t: Long, domainId: DomainId): Update.SequencerIndexMoved = + Update.SequencerIndexMoved( + domainId = domainId, + sequencerIndex = SequencerIndex( + counter = SequencerCounter(1), + timestamp = CantonTimestamp.assertFromLong(t), + ), + requestCounterO = None, + ) } diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml index c9758efe8b3d..d009f38c5721 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --enable-interfaces=yes name: carbonv1-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml index c12830f0404b..856bdce45ed6 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --enable-interfaces=yes name: carbonv2-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml index 4c0630128975..0b490eaa7014 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c name: experimental-tests source: . version: 3.1.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml index 7692652bb51c..6225eae1bdc1 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --enable-interfaces=yes name: model-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/ongoing_stream_package_upload/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/ongoing_stream_package_upload/daml.yaml index ad0a4e9603ee..0521b4870ed9 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/ongoing_stream_package_upload/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/ongoing_stream_package_upload/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c name: ongoing-stream-package-upload-tests source: . version: 3.1.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml index aa70771a291f..f12ec417d8ea 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c name: package-management-tests source: . version: 3.1.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml index edcb06d3dc7f..944962124ab5 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --enable-interfaces=yes name: semantic-tests diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml index 55cf46840abe..c4bf9ed5618e 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c name: upgrade-tests source: . version: 1.0.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml index 329f6e25995a..af5b9366cfa8 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c name: upgrade-tests source: . version: 2.0.0 diff --git a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml index 2ec7cfd8ee93..142d62e860de 100644 --- a/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml +++ b/sdk/canton/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c name: upgrade-tests source: . version: 3.0.0 diff --git a/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/EndpointsCompanion.scala b/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/EndpointsCompanion.scala index 22bbc42d4ed5..01123fb05ebd 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/EndpointsCompanion.scala +++ b/sdk/canton/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/EndpointsCompanion.scala @@ -178,16 +178,13 @@ object EndpointsCompanion extends NoTracing { def notFound( logger: TracedLogger )(implicit lc: LoggingContextOf[InstanceUUID]): Route = (ctx: RequestContext) => - ctx.request match { - case HttpRequest(method, uri, _, _, _) => - extendWithRequestIdLogCtx(implicit lc => - Future.successful( - Complete( - httpResponseError(NotFound(s"${method: HttpMethod}, uri: ${uri: Uri}"), logger) - ) - ) + extendWithRequestIdLogCtx(implicit lc => + Future.successful( + Complete( + httpResponseError(NotFound(s"${ctx.request.method}, uri: ${ctx.request.uri}"), logger) ) - } + ) + ) def httpResponseError( error: Error, diff --git a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/dep/daml.yaml b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/dep/daml.yaml index 36ff436390e0..dc4097cad9da 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/dep/daml.yaml +++ b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/dep/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --target=2.dev - --enable-interfaces=yes diff --git a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/main/daml.yaml b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/main/daml.yaml index 3c06fef4ca83..af67388fdf57 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/main/daml.yaml +++ b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/damldefinitionsservice/main/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --target=2.dev - --enable-interfaces=yes diff --git a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml index a6035d1f7469..de72178ca1ee 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml +++ b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --target=2.1 name: JsonEncodingTest diff --git a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml index 9168b85f6008..6363b6599d4b 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml +++ b/sdk/canton/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --target=2.dev name: JsonEncodingTestDev diff --git a/sdk/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/daml/lf/value/json/ApiCodecCompressedSpec.scala b/sdk/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/daml/lf/value/json/ApiCodecCompressedSpec.scala index 5ea0d2e0cc36..3e5b86e66bc5 100644 --- a/sdk/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/daml/lf/value/json/ApiCodecCompressedSpec.scala +++ b/sdk/canton/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/daml/lf/value/json/ApiCodecCompressedSpec.scala @@ -5,7 +5,11 @@ package com.digitalasset.canton.daml.lf.value.json import com.digitalasset.daml.lf.value.Value.ContractId import com.digitalasset.daml.lf.data.{ImmArray, Numeric, Ref, SortedLookupList, Time} -import com.digitalasset.daml.lf.value.test.TypedValueGenerators.{genAddend, genTypeAndValue, ValueAddend as VA} +import com.digitalasset.daml.lf.value.test.TypedValueGenerators.{ + genAddend, + genTypeAndValue, + ValueAddend as VA, +} import com.digitalasset.daml.lf.value.test.ValueGenerators.coidGen import ApiCodecCompressed.{apiValueToJsValue, jsValueToApiValue} import com.digitalasset.canton.daml.lf.value.json.NavigatorModelAliases as model @@ -115,6 +119,7 @@ abstract class ApiCodecCompressedSpec fRecord = simpleRecordT, ), ) + @nowarn("msg=dubious usage of method asInstanceOf with unit value") val complexRecordV: complexRecordT.Inj = HRecord( fText = "foo", @@ -413,7 +418,7 @@ class ApiCodecCompressedSpecStable extends ApiCodecCompressedSpec { } } - import com.digitalasset.daml.lf.value.{Value as LfValue} + import com.digitalasset.daml.lf.value.Value as LfValue import ApiCodecCompressed.JsonImplicits.* val packageId: Ref.PackageId = mustBeOne( @@ -534,11 +539,11 @@ class ApiCodecCompressedSpecStable extends ApiCodecCompressedSpec { class ApiCodecCompressedSpecDev extends ApiCodecCompressedSpec { override def darPath: String = "JsonEncodingTestDev.dar" - import com.digitalasset.daml.lf.value.{Value as LfValue} + import com.digitalasset.daml.lf.value.Value as LfValue "API compressed JSON codec" when { "dealing with Contract Key" should { - import com.digitalasset.daml.lf.typesig.PackageSignature.TypeDecl.{Template as TDTemplate} + import com.digitalasset.daml.lf.typesig.PackageSignature.TypeDecl.Template as TDTemplate "decode type Key = Party from JSON" in { val templateDef: TDTemplate = mustBeOne( diff --git a/sdk/canton/community/lib/wartremover/src/main/scala/com/digitalasset/canton/SlickString.scala b/sdk/canton/community/lib/wartremover/src/main/scala/com/digitalasset/canton/SlickString.scala deleted file mode 100644 index 3706bf54f281..000000000000 --- a/sdk/canton/community/lib/wartremover/src/main/scala/com/digitalasset/canton/SlickString.scala +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. -// Proprietary code. All rights reserved. - -package com.digitalasset.canton - -import org.wartremover.{WartTraverser, WartUniverse} -import slick.jdbc.PositionedParameters - -import scala.reflect.NameTransformer - -/** The DB may truncate strings of unbounded length. Therefore, this wart flags the following: - *
    - *
  • Calls to [[slick.jdbc.PositionedParameters.setString]]
  • - *
  • Calls to [[slick.jdbc.PositionedParameters.setStringOption]]
  • - *
  • References to [[slick.jdbc.SetParameter.SetString]]
  • - *
  • References to [[slick.jdbc.SetParameter.SetStringOption]]
  • - *
- * This includes references generated by the string interpolators `sql` and `sqlu` - */ -object SlickString extends WartTraverser { - - val message: String = - "The DB may truncate strings of unbounded length. Use a LengthLimitedString instead." - - def apply(u: WartUniverse): u.Traverser = { - import u.universe.* - - val positionedParameterSymbol = typeOf[PositionedParameters] - val positionedParameterTypeSymbol = positionedParameterSymbol.typeSymbol - require(positionedParameterTypeSymbol != NoSymbol) - - val setStringMethodName: TermName = TermName("setString") - val setStringMethod = positionedParameterSymbol.member(setStringMethodName) - require(setStringMethod != NoSymbol) - - val setStringOptionMethodName: TermName = TermName("setStringOption") - val setStringOptionMethod = positionedParameterSymbol.member(setStringOptionMethodName) - require(setStringOptionMethod != NoSymbol) - - val setParameterMethodName: TermName = TermName(NameTransformer.encode(">>")) - val setParameterMethod = positionedParameterSymbol.member(setParameterMethodName) - require(setParameterMethod != NoSymbol) - - val setStringObject = rootMirror.staticModule("slick.jdbc.SetParameter.SetString") - val setStringOptionObject = rootMirror.staticModule("slick.jdbc.SetParameter.SetStringOption") - - val predefSymbol = typeOf[Predef.type] - val implicitlyMethodName: TermName = TermName("implicitly") - val implicitlyMethod = predefSymbol.member(implicitlyMethodName) - require(implicitlyMethod != NoSymbol) - - new Traverser { - override def traverse(tree: Tree): Unit = - tree match { - // Ignore trees marked by SuppressWarnings - case t if hasWartAnnotation(u)(t) => - // References to SetParameter.SetString - case t if t.symbol == setStringObject => - error(u)(tree.pos, message) - super.traverse(tree) - // References to SetParameter.SetStringOption - case t if t.symbol == setStringOptionObject => - error(u)(tree.pos, message) - super.traverse(tree) - // PositionedParameter.setString and PositionedParameter.setStringOption - case Apply(Select(receiver, methodName), _) - if receiver.tpe.typeSymbol == positionedParameterTypeSymbol && - (methodName == setStringMethodName || methodName == setStringOptionMethodName) => - error(u)(tree.pos, message) - super.traverse(tree) - case _ => - super.traverse(tree) - } - } - } -} diff --git a/sdk/canton/community/lib/wartremover/src/test/scala/com/digitalasset/canton/SlickStringTest.scala b/sdk/canton/community/lib/wartremover/src/test/scala/com/digitalasset/canton/SlickStringTest.scala deleted file mode 100644 index fa193ab7e348..000000000000 --- a/sdk/canton/community/lib/wartremover/src/test/scala/com/digitalasset/canton/SlickStringTest.scala +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. -// Proprietary code. All rights reserved. - -package com.digitalasset.canton - -import org.scalatest.Assertion -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec -import org.wartremover.test.WartTestTraverser -import slick.jdbc.PositionedParameters -import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.* - -class SlickStringTest extends AnyWordSpec with Matchers { - - def assertIsError(result: WartTestTraverser.Result): Assertion = { - result.errors.length should be >= 1 - result.errors.foreach(_ should include(SlickString.message)) - succeed - } - - "SlickString" should { - "detect explicit setString" in { - val result = WartTestTraverser(SlickString) { - val pp = (??? : PositionedParameters) - pp.setString("foo") - } - assertIsError(result) - } - - "detect explicit setStringOption" in { - val result = WartTestTraverser(SlickString) { - val pp = (??? : PositionedParameters) // fo - pp.setStringOption(None) - } - assertIsError(result) - } - - "detect implicit setString" in { - val result = WartTestTraverser(SlickString) { - val pp = (??? : PositionedParameters) - pp >> "bar" - } - assertIsError(result) - } - - "detect implicit setStringOption" in { - val result = WartTestTraverser(SlickString) { - val pp = (??? : PositionedParameters) - pp >> Option("bar") - } - assertIsError(result) - } - - "detect interpolated setString" in { - val sqlResult = WartTestTraverser(SlickString) { - val _ = sql"${"some string"}" - } - assertIsError(sqlResult) - - val sqluResult = WartTestTraverser(SlickString) { - val _ = sqlu"${"some string"}" - } - assertIsError(sqluResult) - } - - "detect interpolated setStringOption" in { - val sqlResult = WartTestTraverser(SlickString) { - val _ = sql"${Option("some string")}" - } - assertIsError(sqlResult) - - val sqluResult = WartTestTraverser(SlickString) { - val _ = sqlu"${Option("some string")}" - } - assertIsError(sqluResult) - } - - "allow references to unrelated SetString objects" in { - val result = WartTestTraverser(SlickString) { - val _ = SetParameter.SetString - } - result.errors shouldBe List.empty - } - } - - object SetParameter { - object SetString extends slick.jdbc.SetParameter[String] { - override def apply(v1: String, v2: PositionedParameters): Unit = ??? - } - } -} diff --git a/sdk/canton/community/participant/src/main/daml/daml.yaml b/sdk/canton/community/participant/src/main/daml/daml.yaml index 54dda27bb7f4..83b6b8db8600 100644 --- a/sdk/canton/community/participant/src/main/daml/daml.yaml +++ b/sdk/canton/community/participant/src/main/daml/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.2.0-snapshot.20240927.13322.0.v0ccfc472 +sdk-version: 3.2.0-snapshot.20241001.13325.0.vdeefd01c build-options: - --target=2.1 name: AdminWorkflows diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala index dd868ab9930a..8d82844394b3 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNode.scala @@ -567,7 +567,6 @@ class ParticipantNodeBootstrap( domainRegistry = new GrpcDomainRegistry( participantId, syncDomainPersistentStateManager, - persistentState.map(_.settingsStore), topologyDispatcher, syncCrypto, config.crypto, @@ -986,7 +985,7 @@ object ParticipantNodeBootstrap { private def createReplicationServiceFactory( arguments: Arguments - )(storage: Storage): ServerServiceDefinition = + ): ServerServiceDefinition = StaticGrpcServices .notSupportedByCommunity( EnterpriseParticipantReplicationServiceGrpc.SERVICE, @@ -1057,7 +1056,7 @@ object ParticipantNodeBootstrap { createEngine(arguments), CantonSyncService.DefaultFactory, createResourceService(arguments), - createReplicationServiceFactory(arguments), + _ => createReplicationServiceFactory(arguments), ledgerApiServerFactory = ledgerApiServerFactory, setInitialized = _ => (), ) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcInspectionService.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcInspectionService.scala index 5d431a57d58c..ce2e18e822a9 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcInspectionService.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcInspectionService.scala @@ -6,23 +6,10 @@ package com.digitalasset.canton.participant.admin.grpc import cats.data.EitherT import cats.implicits.{toBifunctorOps, toTraverseOps} import cats.syntax.either.* -import cats.syntax.parallel.* import com.daml.error.{ErrorCategory, ErrorCode, Explanation, Resolution} import com.daml.nonempty.NonEmpty import com.digitalasset.canton.admin.participant.v30.InspectionServiceGrpc.InspectionService -import com.digitalasset.canton.admin.participant.v30.{ - CountInFlight, - DomainTimeRange, - GetConfigForSlowCounterParticipants, - GetIntervalsBehindForCounterParticipants, - InspectCommitmentContracts, - LookupOffsetByTime, - LookupReceivedAcsCommitments, - LookupSentAcsCommitments, - OpenCommitment, - SetConfigForSlowCounterParticipants, - TimeRange, -} +import com.digitalasset.canton.admin.participant.v30.* import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.data.{CantonTimestamp, CantonTimestampSecond} import com.digitalasset.canton.error.CantonError @@ -47,7 +34,6 @@ import com.digitalasset.canton.store.{IndexedDomain, IndexedStringStore} import com.digitalasset.canton.topology.client.IdentityProvidingServiceClient import com.digitalasset.canton.topology.{DomainId, ParticipantId} import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} -import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.{EitherTUtil, GrpcStreamingUtils} import io.grpc.stub.StreamObserver diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala index af655b010acf..05db297ccba5 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala @@ -5,8 +5,6 @@ package com.digitalasset.canton.participant.admin.inspection import cats.Eval import cats.data.EitherT -import cats.syntax.foldable.* -import cats.syntax.parallel.* import cats.syntax.traverse.* import com.daml.nameof.NameOf.functionFullName import com.daml.nonempty.NonEmpty @@ -431,8 +429,8 @@ final class SyncStateInspection( val searchResult = domainPeriods.map { dp => for { domain <- syncDomainPersistentStateManager - .aliasForDomainId(dp.domain.domainId) - .toRight(s"No domain alias found for ${dp.domain.domainId}") + .aliasForDomainId(dp.indexedDomain.domainId) + .toRight(s"No domain alias found for ${dp.indexedDomain.domainId}") persistentState = getPersistentState(domain) result = for { @@ -463,7 +461,7 @@ final class SyncStateInspection( } } } yield SentAcsCommitment - .compare(dp.domain.domainId, computed, received, outstanding, verbose) + .compare(dp.indexedDomain.domainId, computed, received, outstanding, verbose) .filter(cmt => states.isEmpty || states.contains(cmt.state)) } yield result } @@ -487,8 +485,8 @@ final class SyncStateInspection( val searchResult = domainPeriods.map { dp => for { domain <- syncDomainPersistentStateManager - .aliasForDomainId(dp.domain.domainId) - .toRight(s"No domain alias found for ${dp.domain.domainId}") + .aliasForDomainId(dp.indexedDomain.domainId) + .toRight(s"No domain alias found for ${dp.indexedDomain.domainId}") persistentState = getPersistentState(domain) result = for { @@ -511,13 +509,13 @@ final class SyncStateInspection( .peekThrough(dp.toInclusive) // peekThrough takes an upper bound parameter .collect(iter => iter.filter(cmt => - cmt.period.fromExclusive >= dp.fromExclusive && cmt.domainId == dp.domain.domainId && (counterParticipants.isEmpty || + cmt.period.fromExclusive >= dp.fromExclusive && cmt.domainId == dp.indexedDomain.domainId && (counterParticipants.isEmpty || counterParticipants .contains(cmt.sender)) ) ) } yield ReceivedAcsCommitment - .compare(dp.domain.domainId, received, computed, buffered, outstanding, verbose) + .compare(dp.indexedDomain.domainId, received, computed, buffered, outstanding, verbose) .filter(cmt => states.isEmpty || states.contains(cmt.state)) } yield result } @@ -581,7 +579,7 @@ final class SyncStateInspection( getPersistentState(domain) .map(state => participantNodePersistentState.value.ledgerApiStore - .domainIndex(state.domainId.domainId) + .domainIndex(state.indexedDomain.domainId) ) .toRight(s"Not connected to $domain") @@ -627,7 +625,7 @@ final class SyncStateInspection( getPersistentState(domain) .toRight(s"Unknown domain $domain") ) - domainId = state.domainId.domainId + domainId = state.indexedDomain.domainId unsequencedSubmissions <- EitherT.right[String]( participantNodePersistentState.value.inFlightSubmissionStore .lookupUnsequencedUptoUnordered(domainId, CantonTimestamp.now()) @@ -649,7 +647,7 @@ final class SyncStateInspection( timeouts.inspection.await(functionFullName)( participantNodePersistentState.value.ledgerApiStore .onlyForTestingNumberOfAcceptedTransactionsFor( - domainPersistentState.domainId.domainId + domainPersistentState.indexedDomain.domainId ) ) ) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala index 6ea88aef9780..5f2f336f2015 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala @@ -40,14 +40,14 @@ import com.digitalasset.canton.logging.{ } import com.digitalasset.canton.participant.ParticipantNodeParameters import com.digitalasset.canton.participant.admin.PackageDependencyResolver -import com.digitalasset.canton.participant.admin.repair.RepairService.ContractToAdd +import com.digitalasset.canton.participant.admin.repair.RepairService.{ContractToAdd, DomainLookup} import com.digitalasset.canton.participant.domain.DomainAliasManager import com.digitalasset.canton.participant.event.RecordTime import com.digitalasset.canton.participant.ledger.api.LedgerApiIndexer import com.digitalasset.canton.participant.protocol.EngineController.EngineAbortStatus import com.digitalasset.canton.participant.protocol.RequestJournal.RequestState import com.digitalasset.canton.participant.store.* -import com.digitalasset.canton.participant.sync.SyncDomainPersistentStateManager +import com.digitalasset.canton.participant.topology.TopologyComponentFactory import com.digitalasset.canton.participant.util.DAMLe.ContractWithMetadata import com.digitalasset.canton.participant.util.{DAMLe, TimeOfChange} import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime @@ -96,13 +96,10 @@ final class RepairService( packageDependencyResolver: PackageDependencyResolver, damle: DAMLe, ledgerApiIndexer: Eval[LedgerApiIndexer], - val syncDomainPersistentStateManager: SyncDomainPersistentStateManager, aliasManager: DomainAliasManager, parameters: ParticipantNodeParameters, threadsAvailableForWriting: PositiveInt, - // TODO(i18695): attempt to unify these two for simplicity - isConnected: DomainId => Boolean, - isConnectedToAnyDomain: () => Boolean, + val domainLookup: DomainLookup, @VisibleForTesting private[canton] val executionQueue: SimpleExecutionQueue, protected val loggerFactory: NamedLoggerFactory, @@ -124,7 +121,7 @@ final class RepairService( ) private def domainNotConnected(domainId: DomainId): EitherT[Future, String, Unit] = EitherT.cond( - !isConnected(domainId), + !domainLookup.isConnected(domainId), (), s"Participant is still connected to domain $domainId", ) @@ -278,7 +275,7 @@ final class RepairService( ) ) - topologyFactory <- syncDomainPersistentStateManager + topologyFactory <- domainLookup .topologyFactoryFor(domainId) .toRight(s"No topology factory for domain $domainAlias") .toEitherT[Future] @@ -327,7 +324,7 @@ final class RepairService( if (contracts.isEmpty) { Either.right(logger.info("No contracts to add specified")) } else { - lockAndAwaitDomainAlias( + runConsecutiveAndAwaitDomainAlias( "repair.add", domainId => withRepairIndexer { repairIndexer => @@ -456,7 +453,7 @@ final class RepairService( logger.info( s"Purging ${contractIds.length} contracts from $domain with ignoreAlreadyPurged=$ignoreAlreadyPurged" ) - lockAndAwaitDomainAlias( + runConsecutiveAndAwaitDomainAlias( "repair.purge", domainId => withRepairIndexer { repairIndexer => @@ -538,7 +535,7 @@ final class RepairService( logger.info( s"Change assignation request for ${contractIds.length} contracts from $sourceDomain to $targetDomain with skipInactive=$skipInactive" ) - lockAndAwaitDomainPair( + runConsecutiveAndAwaitDomainPair( "repair.change_assignation", (sourceDomainId, targetDomainId) => { for { @@ -630,7 +627,7 @@ final class RepairService( traceContext: TraceContext ): EitherT[Future, String, Unit] = { logger.info(s"Ignoring sequenced events from $fromInclusive to $toInclusive (force = $force).") - lock( + runConsecutive( "repair.skip_messages", for { _ <- domainNotConnected(domain) @@ -731,7 +728,7 @@ final class RepairService( logger.info( s"Unignoring sequenced events from $fromInclusive to $toInclusive (force = $force)." ) - lock( + runConsecutive( "repair.unskip_messages", for { _ <- domainNotConnected(domain) @@ -1324,8 +1321,8 @@ final class RepairService( traceContext: TraceContext ): Either[String, SyncDomainPersistentState] = for { - dp <- syncDomainPersistentStateManager - .get(domainId) + dp <- domainLookup + .persistentStateFor(domainId) .toRight(log(s"Could not find $domainDescription")) _ <- Either.cond( !dp.isMemory, @@ -1336,7 +1333,7 @@ final class RepairService( ) } yield dp - private def lockAndAwait[B]( + private def runConsecutiveAndAwait[B]( description: String, code: => EitherT[Future, String, B], )(implicit @@ -1346,12 +1343,11 @@ final class RepairService( // repair commands can take an unbounded amount of time parameters.processingTimeouts.unbounded.await(description)( - lock(description, code).value + runConsecutive(description, code).value ) } - // TODO(i18695): attempt to rename lock to something more suitable - private def lock[B]( + private def runConsecutive[B]( description: String, code: => EitherT[Future, String, B], )(implicit @@ -1366,7 +1362,7 @@ final class RepairService( ) } - private def lockAndAwaitDomainAlias[B]( + private def runConsecutiveAndAwaitDomainAlias[B]( description: String, code: DomainId => EitherT[Future, String, B], domainAlias: DomainAlias, @@ -1377,13 +1373,13 @@ final class RepairService( aliasManager.domainIdForAlias(domainAlias).toRight(s"Could not find $domainAlias") ) - lockAndAwait( + runConsecutiveAndAwait( description, domainId.flatMap(code), ) } - private def lockAndAwaitDomainPair[B]( + private def runConsecutiveAndAwaitDomainPair[B]( description: String, code: (DomainId, DomainId) => EitherT[Future, String, B], domainAliases: (DomainAlias, DomainAlias), @@ -1394,7 +1390,7 @@ final class RepairService( aliasToDomainId(domainAliases._1), aliasToDomainId(domainAliases._2), ).tupled - lockAndAwait[B]( + runConsecutiveAndAwait[B]( description, domainIds.flatMap(Function.tupled(code)), ) @@ -1411,7 +1407,7 @@ final class RepairService( private def withRepairIndexer(code: FutureQueue[Traced[Update]] => EitherT[Future, String, Unit])( implicit traceContext: TraceContext ): EitherT[Future, String, Unit] = - if (isConnectedToAnyDomain()) { + if (domainLookup.isConnectedToAnyDomain) { EitherT.leftT[Future, Unit]( "There are still domains connected. Please disconnect all domains." ) @@ -1528,4 +1524,14 @@ object RepairService { .map(DriverContractMetadata(_).toLfBytes(protocolVersion)) .getOrElse(Bytes.Empty) } + + trait DomainLookup { + def isConnected(domainId: DomainId): Boolean + + def isConnectedToAnyDomain: Boolean + + def persistentStateFor(domainId: DomainId): Option[SyncDomainPersistentState] + + def topologyFactoryFor(domainId: DomainId): Option[TopologyComponentFactory] + } } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala index 53a9ddb8c49b..b80eb751fd2c 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala @@ -9,7 +9,6 @@ import com.daml.tls.{TlsConfiguration, TlsVersion} import com.digitalasset.canton.config import com.digitalasset.canton.config.RequireTypes.* import com.digitalasset.canton.config.* -import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.http.JsonApiConfig import com.digitalasset.canton.networking.grpc.CantonServerBuilder import com.digitalasset.canton.participant.admin.AdminWorkflowConfig @@ -238,55 +237,6 @@ object LedgerApiServerConfig { minFreeHeapSpaceBytes = 0, ) - /** the following case class match will help us detect any additional configuration options added. - * If the below match fails because there are more config options, add them to our "LedgerApiServerConfig". - */ - private def _completenessCheck( - managementServiceTimeout: config.NonNegativeFiniteDuration, - tlsConfiguration: Option[TlsConfiguration], - ): Unit = { - - def fromClientAuth(clientAuth: ClientAuth): ServerAuthRequirementConfig = { - import ServerAuthRequirementConfig.* - clientAuth match { - case ClientAuth.REQUIRE => - None // not passing "require" as we need adminClientCerts in this case which are not available here - case ClientAuth.OPTIONAL => Optional - case ClientAuth.NONE => None - } - } - - val tlsConfig = tlsConfiguration match { - case Some( - TlsConfiguration( - true, - Some(keyCertChainFile), - Some(keyFile), - trustCertCollectionFile, - authRequirement, - enableCertRevocationChecking, - optTlsVersion, - ) - ) => - Some( - TlsServerConfig( - certChainFile = ExistingFile.tryCreate(keyCertChainFile), - privateKeyFile = ExistingFile.tryCreate(keyFile), - trustCollectionFile = trustCertCollectionFile.map(x => ExistingFile.tryCreate(x)), - clientAuth = fromClientAuth(authRequirement), - minimumServerProtocolVersion = optTlsVersion.map(_.version), - enableCertRevocationChecking = enableCertRevocationChecking, - ) - ) - case _ => None - } - - LedgerApiServerConfig( - tls = tlsConfig, - managementServiceTimeout = managementServiceTimeout, - ).discard - } - def ledgerApiServerTlsConfigFromCantonServerConfig( tlsCantonConfig: TlsServerConfig ): TlsConfiguration = diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistryHelpers.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistryHelpers.scala index 7df206a6b2bf..9c6ebf49afb1 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistryHelpers.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistryHelpers.scala @@ -104,10 +104,7 @@ trait DomainRegistryHelpers extends FlagCloseable with NamedLogging { this: HasF EitherTUtil.unit.mapK(FutureUnlessShutdown.outcomeK) } else { topologyDispatcher - .trustDomain( - domainId, - sequencerAggregatedInfo.staticDomainParameters, - ) + .trustDomain(domainId) .leftMap( DomainRegistryError.ConfigurationErrors.CanNotIssueDomainTrustCertificate.Error(_) ) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/grpc/GrpcDomainRegistry.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/grpc/GrpcDomainRegistry.scala index 9eab97912977..32fa2a252e14 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/grpc/GrpcDomainRegistry.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/grpc/GrpcDomainRegistry.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.participant.domain.grpc -import cats.Eval import cats.instances.future.* import cats.syntax.either.* import com.daml.grpc.adapter.ExecutionSequencerFactory @@ -17,10 +16,7 @@ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.participant.ParticipantNodeParameters import com.digitalasset.canton.participant.domain.* import com.digitalasset.canton.participant.metrics.SyncDomainMetrics -import com.digitalasset.canton.participant.store.{ - ParticipantSettingsLookup, - SyncDomainPersistentState, -} +import com.digitalasset.canton.participant.store.SyncDomainPersistentState import com.digitalasset.canton.participant.sync.SyncDomainPersistentStateManager import com.digitalasset.canton.participant.topology.{ LedgerServerPartyNotifier, @@ -56,7 +52,6 @@ import scala.concurrent.ExecutionContextExecutor class GrpcDomainRegistry( val participantId: ParticipantId, syncDomainPersistentStateManager: SyncDomainPersistentStateManager, - participantSettings: Eval[ParticipantSettingsLookup], topologyDispatcher: ParticipantTopologyDispatcher, cryptoApiProvider: SyncCryptoApiProvider, cryptoConfig: CryptoConfig, diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala index 0f16243b2a66..09077d45135f 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala @@ -189,30 +189,27 @@ class RecordOrderPublisher( Future.unit } - // TODO(i18695) this new tracecontext is disturbing as completely disjoint from the event-processing it relates to def scheduleAcsChangePublication( recordSequencerCounter: SequencerCounter, timestamp: CantonTimestamp, requestCounter: RequestCounter, commitSet: CommitSet, - ): Unit = TraceContext.withNewTraceContext { implicit traceContext => + )(implicit traceContext: TraceContext): Unit = taskScheduler.scheduleTask( AcsChangePublicationTask(recordSequencerCounter, timestamp)(Some((requestCounter, commitSet))) ) - } /** Schedules an empty acs change publication task to be published to the `acsChangeListener`. */ def scheduleEmptyAcsChangePublication( sequencerCounter: SequencerCounter, timestamp: CantonTimestamp, - ): Unit = TraceContext.withNewTraceContext { implicit traceContext => + )(implicit traceContext: TraceContext): Unit = if (sequencerCounter >= initSc) { taskScheduler.scheduleTask( AcsChangePublicationTask(sequencerCounter, timestamp)(None) ) } - } private sealed trait PublicationTask extends TaskScheduler.TimedTask @@ -407,7 +404,13 @@ class RecordOrderPublisher( requestCounterCommitSetPairO.map(_._1.unwrap).getOrElse(RecordTime.lowestTiebreaker), ) val waitForLastEventPersisted = lastPublishedPersisted(sequencerCounter, timestamp) - acsChangeListener.get.foreach(_.publish(recordTime, acsChange, waitForLastEventPersisted)) + // The trace context is deliberately generated here instead of continuing the previous one + // to unlink the asynchronous acs commitment processing from message processing trace. + TraceContext.withNewTraceContext { implicit traceContext => + acsChangeListener.get.foreach( + _.publish(recordTime, acsChange, waitForLastEventPersisted) + ) + } } FutureUnlessShutdown.outcomeF(acsChangePublish) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiJdbcUrl.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiJdbcUrl.scala index 21329e176934..d9a43447fdd9 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiJdbcUrl.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/LedgerApiJdbcUrl.scala @@ -28,7 +28,6 @@ object LedgerApiJdbcUrl { private val userKey = "user" private val passwordKey = "password" private val databaseNameKey = "databaseName" - private val currentSchema = "currentSchema" private val nonParametersProperties = Set( serverNameKey, @@ -174,10 +173,6 @@ object LedgerApiJdbcUrl { def isDefined(key: String): Boolean = options.keySet.find(_.equalsIgnoreCase(key)).fold(false)(_ => true) - def replace(key: String, value: String): UrlBuilder = - // remove any existing values with this key and add the provided - copy(options = options.filterNot(_._1.equalsIgnoreCase(key)) + (key -> value)) - def addIfMissing(key: String, defaultValue: => Option[String]): UrlBuilder = if (isDefined(key)) this else diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/ValueRemapper.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/ValueRemapper.scala deleted file mode 100644 index fc5a1a6bef55..000000000000 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/ValueRemapper.scala +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.ledger.api.client - -import com.daml.ledger.api.v2.event.CreatedEvent -import com.daml.ledger.api.v2.value - -/** Utilities for modifying ledger api values, e.g. to make them suitable for importing into canton: - * - * Contract ids: Importing a set of contracts often requires awareness of contract dependencies and references to - * ensure that when contract ids are modified that references are updates to reflect such modifications. - * - * Party ids: Contracts from non-canton daml ledgers are incompatible with canton party ids that contain additional - * information (fingerprint suffix). Remapping party ids enables updating embedded party id types to - * conform to the canton party id format. - */ -object ValueRemapper { - - /** Helper for CreatedEvents. - */ - def convertEvent(remapContractId: String => String, mapPartyId: String => String)( - createdEvent: CreatedEvent - ): CreatedEvent = - createdEvent.copy( - contractId = remapContractId(createdEvent.contractId), - signatories = createdEvent.signatories.map(mapPartyId), - observers = createdEvent.observers.map(mapPartyId), - witnessParties = createdEvent.witnessParties.map(mapPartyId), - createArguments = - createdEvent.createArguments.map(remapRecord(remapContractId, mapPartyId, _)), - contractKey = createdEvent.contractKey.map(remapValue(remapContractId, mapPartyId)), - ) - - /** Helper specifically useful for CreatedEvents that contain arguments as a record. - */ - def remapRecord( - remapContractId: String => String, - remapParty: String => String, - record: value.Record, - ): value.Record = - record match { - case value.Record(id, fields) => - val remappedFields = fields.map { case value.RecordField(label, v) => - value.RecordField(label, v.map(remapValue(remapContractId, remapParty))) - } - value.Record(id, remappedFields) - } - - /** Helper for arbitrary ledger api values. - */ - def remapValue(remapContractId: String => String, remapParty: String => String)( - v: value.Value - ): value.Value = - value.Value(v.sum match { - case value.Value.Sum.ContractId(cid) => - value.Value.Sum.ContractId(remapContractId(cid)) - case value.Value.Sum.Party(party) => - value.Value.Sum.Party(remapParty(party)) - case value.Value.Sum.Record(record) => - value.Value.Sum.Record(remapRecord(remapContractId, remapParty, record)) - case value.Value.Sum.List(value.List(seq)) => - value.Value.Sum.List(value.List(seq.map(remapValue(remapContractId, remapParty)))) - case value.Value.Sum.TextMap(value.TextMap(entries)) => - value.Value.Sum.TextMap(value.TextMap(entries.map { case value.TextMap.Entry(k, v) => - value.TextMap.Entry(k, v.map(remapValue(remapContractId, remapParty))) - })) - case value.Value.Sum.GenMap(value.GenMap(entries)) => - value.Value.Sum.GenMap(value.GenMap(entries.map { case value.GenMap.Entry(k, v) => - value.GenMap.Entry(k, v.map(remapValue(remapContractId, remapParty))) - })) - case value.Value.Sum.Variant(value.Variant(id, constructor, v)) => - value.Value.Sum.Variant( - value.Variant(id, constructor, v.map(remapValue(remapContractId, remapParty))) - ) - case value.Value.Sum.Optional(value.Optional(v)) => - value.Value.Sum.Optional(value.Optional(v.map(remapValue(remapContractId, remapParty)))) - case v => v - }) - -} diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/package.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/package.scala deleted file mode 100644 index dd98cc0ce634..000000000000 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/package.scala +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton - -import com.digitalasset.canton.data.Offset -import com.digitalasset.daml.lf.data.Time - -package object participant { - - // Sync event and offset used by participant state ReadService api - type LedgerSyncOffset = Offset - val LedgerSyncOffset: Offset.type = data.Offset - - // Ledger record time is "single-dimensional" - type LedgerSyncRecordTime = Time.Timestamp - val LedgerSyncRecordTime: Time.Timestamp.type = Time.Timestamp -} diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageDispatcher.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageDispatcher.scala index 4aef99edd106..0832b349c2d3 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageDispatcher.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/MessageDispatcher.scala @@ -581,7 +581,7 @@ trait MessageDispatcher { this: NamedLogging => events: Seq[RawProtocolEvent] )(implicit traceContext: TraceContext): FutureUnlessShutdown[ProcessingResult] = { val receipts = events.mapFilter { - case Deliver(counter, timestamp, _domainId, messageIdO, batch, _, _) => + case Deliver(counter, timestamp, _domainId, messageIdO, _batch, _, _) => // The event was submitted by the current participant iff the message ID is set. messageIdO.map(_ -> SequencedSubmission(counter, timestamp)) case DeliverError( diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessor.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessor.scala index 0858ef62bda9..b73462cf2adb 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessor.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessor.scala @@ -617,7 +617,7 @@ abstract class ProtocolProcessor[ sc: SequencerCounter, batch: steps.RequestBatch, )(implicit traceContext: TraceContext): HandlerResult = { - val RequestAndRootHashMessage(viewMessages, rootHashMessage, mediatorId, _isReceipt) = batch + val RequestAndRootHashMessage(_viewMessages, _rootHashMessage, _mediatorId, _isReceipt) = batch val requestId = RequestId(ts) if (precedesCleanReplay(requestId)) { @@ -1651,7 +1651,7 @@ abstract class ProtocolProcessor[ show"${steps.requestKind.unquoted} request at $requestId: Received event at $resultTimestamp for request that is invalid" ) Right(default) - case err => Left(processorError) + case _err => Left(processorError) } } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableState.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableState.scala index 133b9d918f6d..833dfb78c4e7 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableState.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableState.scala @@ -116,8 +116,6 @@ private[conflictdetection] object LockableState { type PendingWriteCounter = counters.PendingWriteCounter val PendingWriteCounter: counters.PendingWriteCounter.type = counters.PendingWriteCounter - - private[this] def uintToString(i: Int): String = (i.toLong & 0xffffffffL).toString } private[conflictdetection] final case class ImmutableLockableState[Status <: PrettyPrinting]( diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableStates.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableStates.scala index f86666a79d87..560514251bfd 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableStates.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableStates.scala @@ -560,7 +560,7 @@ private[conflictdetection] class LockableStates[ } def assertVersionedStateIsLatestIfNoPendingWrites(): Unit = { - val withoutPendingWrites = states.filterNot { case (id, state) => state.hasPendingWrites } + val withoutPendingWrites = states.filterNot { case (_id, state) => state.hasPendingWrites } // Await on the store Futures to make sure that there's no context switch in the conflict detection thread // This ensures that invariant checking runs atomically. val storeSnapshot = diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentProcessingSteps.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentProcessingSteps.scala index 2944c79766be..8ac878ca7233 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentProcessingSteps.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/reassignment/ReassignmentProcessingSteps.scala @@ -569,7 +569,7 @@ object ReassignmentProcessingSteps { submitterLf: LfPartyId, hash: RootHash, ) extends ReassignmentProcessorError { - private def kind = reassignmentId.map(id => "assign").getOrElse("unassign") + private def kind = reassignmentId.map(_id => "assign").getOrElse("unassign") override def message: String = s"Cannot $kind $reassignmentId: duplicatehash" } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/ChangeId.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/ChangeId.scala index 15ee0a339bd9..7d4470f11373 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/ChangeId.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/ChangeId.scala @@ -28,7 +28,6 @@ object ChangeIdHash { ) } - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) // LfHash is at most 64 chars implicit val setParameterChangeId: SetParameter[ChangeIdHash] = (changeIdHash, pp) => pp.setString(changeIdHash.hash.toHexString) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/CommandDeduplicator.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/CommandDeduplicator.scala index 37ab6b412b5d..e6fbe6731e23 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/CommandDeduplicator.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/CommandDeduplicator.scala @@ -7,9 +7,10 @@ import cats.Eval import cats.data.EitherT import cats.syntax.either.* import com.digitalasset.canton.LedgerSubmissionId -import com.digitalasset.canton.data.{CantonTimestamp, DeduplicationPeriod} +import com.digitalasset.canton.data.{CantonTimestamp, DeduplicationPeriod, Offset} import com.digitalasset.canton.ledger.participant.state.ChangeId import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.participant.GlobalOffset import com.digitalasset.canton.participant.protocol.submission.CommandDeduplicator.{ AlreadyExists, DeduplicationFailed, @@ -19,7 +20,6 @@ import com.digitalasset.canton.participant.protocol.submission.CommandDeduplicat import com.digitalasset.canton.participant.store.CommandDeduplicationStore.OffsetAndPublicationTime import com.digitalasset.canton.participant.store.* import com.digitalasset.canton.participant.sync.UpstreamOffsetConvert -import com.digitalasset.canton.participant.{GlobalOffset, LedgerSyncOffset} import com.digitalasset.canton.platform.indexer.parallel.PostPublishData import com.digitalasset.canton.time.Clock import com.digitalasset.canton.tracing.TraceContext @@ -199,13 +199,13 @@ class CommandDeduplicatorImpl( } def dedupOffset( - offset: LedgerSyncOffset + offset: Offset ): EitherT[Future, DeduplicationFailed, GlobalOffset] = { def checkAgainstPruning( dedupOffset: GlobalOffset ): EitherT[Future, DeduplicationFailed, GlobalOffset] = EitherTUtil.leftSubflatMap(store.value.latestPruning().toLeft(unprunedDedupOffset)) { - case OffsetAndPublicationTime(prunedOffset, prunedPublicationTime) => + case OffsetAndPublicationTime(prunedOffset, _prunedPublicationTime) => Either.cond( prunedOffset <= dedupOffset, prunedOffset, diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/InFlightSubmission.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/InFlightSubmission.scala index 4c51fedc6106..741a2afdf1d1 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/InFlightSubmission.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/InFlightSubmission.scala @@ -6,10 +6,7 @@ package com.digitalasset.canton.participant.protocol.submission import cats.Functor import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} -import com.digitalasset.canton.participant.store.InFlightSubmissionStore.{ - InFlightByMessageId, - InFlightBySequencingInfo, -} +import com.digitalasset.canton.participant.store.InFlightSubmissionStore.InFlightByMessageId import com.digitalasset.canton.protocol.RootHash import com.digitalasset.canton.sequencing.protocol.MessageId import com.digitalasset.canton.store.db.DbSerializationException @@ -57,11 +54,6 @@ final case class InFlightSubmission[+SequencingInfo <: SubmissionSequencingInfo] /** Whether the submission's sequencing has been observed */ def isSequenced: Boolean = sequencingInfo.isSequenced - def mapSequencingInfo[B <: SubmissionSequencingInfo]( - f: SequencingInfo => B - ): InFlightSubmission[B] = - setSequencingInfo(f(sequencingInfo)) - def traverseSequencingInfo[F[_], B <: SubmissionSequencingInfo](f: SequencingInfo => F[B])( implicit F: Functor[F] ): F[InFlightSubmission[B]] = @@ -91,12 +83,8 @@ final case class InFlightSubmission[+SequencingInfo <: SubmissionSequencingInfo] ) def referenceByMessageId: InFlightByMessageId = InFlightByMessageId(submissionDomain, messageId) - - def referenceBySequencingInfo(implicit - ev: SequencingInfo <:< SequencedSubmission - ): InFlightBySequencingInfo = - InFlightBySequencingInfo(submissionDomain, ev(sequencingInfo)) } + object InFlightSubmission { implicit def getResultInFlightSubmission[SequencingInfo <: SubmissionSequencingInfo: GetResult]( implicit getResultTraceContext: GetResult[SerializableTraceContext] diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/InFlightSubmissionTracker.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/InFlightSubmissionTracker.scala index b08c18a0e856..60d586f809f9 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/InFlightSubmissionTracker.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/submission/InFlightSubmissionTracker.scala @@ -281,7 +281,7 @@ class InFlightSubmissionTracker( timelyRejects <- FutureUnlessShutdown .outcomeF(store.value.lookupUnsequencedUptoUnordered(domainId, upToInclusive)) events = timelyRejects.map(timelyRejectionEventFor) - skippedE <- participantEventPublisher.publishDomainRelatedEvents(events) + _skippedE <- participantEventPublisher.publishDomainRelatedEvents(events) } yield () private[this] def timelyRejectionEventFor( diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/RecipientsValidator.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/RecipientsValidator.scala index e83a987deb3f..0ec8b131abf8 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/RecipientsValidator.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/RecipientsValidator.scala @@ -406,7 +406,7 @@ class RecipientsValidator[I]( informeeParticipantsOfPositionAndParty(viewPosition) val informeeRecipients = informeeParticipantsOfParty.toList - .flatMap { case (party, participants) => + .flatMap { case (_party, participants) => participants.map(MemberRecipient) } .toSet[Recipient] diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala index 122a16abaeb1..70403fa8bf1e 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala @@ -8,7 +8,6 @@ import cats.syntax.contravariantSemigroupal.* import cats.syntax.foldable.* import cats.syntax.functor.* import cats.syntax.parallel.* -import cats.syntax.traverse.* import cats.syntax.validated.* import com.daml.error.* import com.daml.metrics.api.MetricsContext @@ -234,11 +233,12 @@ class AcsCommitmentProcessor private ( /* The sequencer timestamp for which we are ready to process remote commitments. Continuously updated as new local commitments are computed. All received remote commitments with the timestamp lower than this one will either have been processed or queued. - Note that since access to this variable isn't synchronized, we don't guarantee that every remote commitment will - be processed once this moves. However, such commitments will not be lost, as they will be put in the persistent - buffer and get picked up by `processBuffered` eventually. + Note that we don't guarantee that every remote commitment will be processed once this moves. However, such + commitments will not be lost, as they will be put in the persistent buffer and get picked up by `processBuffered` + eventually. */ - @volatile private var readyForRemote: Option[CantonTimestampSecond] = None + private val readyForRemote: AtomicReference[Option[CantonTimestampSecond]] = + new AtomicReference[Option[CantonTimestampSecond]](None) /* End of the last period until which we have processed, sent and persisted all local and remote commitments. It's accessed only through chained futures, such that all accesses are synchronized */ @@ -725,10 +725,15 @@ class AcsCommitmentProcessor private ( // This is a replay of an already processed ACS change, ignore FutureUnlessShutdown.unit } else { - // Serialize the access to the DB only after having obtained the reconciliation intervals and topology snapshot. - // During crash recovery, the topology client may only be able to serve the intervals and snapshots - // for re-published ACS changes after some messages have been processed, - // which may include ACS commitments that go through the same queue. + // During crash recovery, it should be that only in tests could we have the situation where we replay + // ACS changes while the ledger end lags behind the replayed change timestamp. In normal processing, + // we publish ACS changes only after the ledger end has moved, which should mean that all topology events + // for a given timestamp have been processed before processing the ACS change for the same timestamp. + // + // In that former case, when the ledger end lags behind, the topology client may only be able to serve + // the intervals and snapshots for re-published ACS changes after some messages have been processed, + // which may include ACS commitments that go through the same queue. Therefore, we serialize the access to + // the DB only after having obtained the reconciliation intervals and topology snapshot. dbQueue.executeUS( Policy.noisyInfiniteRetryUS( performPublish(acsSnapshot, reconciliationIntervals, periodEndO), @@ -974,10 +979,9 @@ class AcsCommitmentProcessor private ( )(implicit traceContext: TraceContext): FutureUnlessShutdown[Unit] = dbQueue .execute( - // Make sure that the ready-for-remote check is atomic with buffering the commitment { - val readyToCheck = readyForRemote.exists(_ >= commitment.period.toInclusive) - + // Make sure that the ready-for-remote check is atomic with buffering the commitment + val readyToCheck = readyForRemote.get().exists(_ >= commitment.period.toInclusive) if (readyToCheck) { // Do not sequentialize the checking Future.successful(checkMatchAndMarkSafe(List(commitment))) @@ -990,14 +994,24 @@ class AcsCommitmentProcessor private ( ) .flatMap(FutureUnlessShutdown.outcomeF) - private def indicateReadyForRemote(timestamp: CantonTimestampSecond): Unit = { - readyForRemote.foreach(oldTs => - assert( - oldTs <= timestamp, - s"out of order timestamps in the commitment processor: $oldTs and $timestamp", + private def indicateReadyForRemote(timestamp: CantonTimestampSecond)(implicit + traceContext: TraceContext + ): Unit = { + val updated = readyForRemote.updateAndGet { oldTs => + oldTs.fold(Some(timestamp))(oldReadyForRemote => + if (oldReadyForRemote > timestamp) Some(oldReadyForRemote) else Some(timestamp) ) - ) - readyForRemote = Some(timestamp) + } + + updated match { + case Some(newTimestamp) => + if (newTimestamp != timestamp) { + logger.debug( + s"out of order timestamps when updating ready for remote in the commitment processor: $newTimestamp and $timestamp. Ready-for-remote remains at $newTimestamp" + ) + } + case None => + } } private def processBuffered( @@ -1340,7 +1354,7 @@ class AcsCommitmentProcessor private ( })(_.value))(_.value) val delayMillis = if (maxDelayMillis > 0) rand.nextInt(maxDelayMillis) else 0 - def sendUnlessClosing()(ts: CantonTimestamp) = { + def sendUnlessClosing() = { implicit val metricsContext: MetricsContext = MetricsContext("type" -> "send-commitment") performUnlessClosingUSF(functionFullName) { def message = s"Failed to send commitment message batch for period $period" @@ -1381,7 +1395,7 @@ class AcsCommitmentProcessor private ( .logOnFailureUnlessShutdown( clock .scheduleAfter( - sendUnlessClosing(), + _ => sendUnlessClosing(), java.time.Duration.ofMillis(delayMillis.toLong), ), s"Failed to schedule sending commitment message batch for period $period at time ${clock.now @@ -1608,12 +1622,12 @@ object AcsCommitmentProcessor extends HasLoggerName { exitOnFatalFailures, maxCommitmentSendDelayMillis, ) - // TODO(#21502) We trigger the processing of the buffered commitments, but we do not wait for it to complete here, - // because, if processing buffered required topology updates that go through the same queue, we'd create a deadlock. - // It should be that only in tests could we have the situation where we replay ACS changes while the ledger end - // lags behind the replayed change timestamp. In normal processing, we publish ACS changes only after the ledger - // end has moved, which should mean that all topology events for a given timestamp have been processed before - // processing the ACS change for the same timestamp. We should validate this behavior. + // We trigger the processing of the buffered commitments, but we do not wait for it to complete here, + // because, if processing buffered required topology updates that go through the same queue, we'd create a deadlock. + // It should be that only in tests could we have the situation where we replay ACS changes while the ledger end + // lags behind the replayed change timestamp. In normal processing, we publish ACS changes only after the ledger + // end has moved, which should mean that all topology events for a given timestamp have been processed before + // processing the ACS change for the same timestamp _ = processor.processBufferedAtInit(endOfLastProcessedPeriod) _ = loggingContext.info( s"Initialized the ACS commitment processor DB queue and started processing buffered commitments until $endOfLastProcessedPeriod" diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/PruningProcessor.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/PruningProcessor.scala index 499adf540110..fc65f3f90786 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/PruningProcessor.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/PruningProcessor.scala @@ -510,7 +510,7 @@ class PruningProcessor( val PruningCutoffs.DomainOffset(state, lastTimestamp, lastRequestCounter) = domainOffset logger.info( - show"Pruning ${state.domainId.item} up to $lastTimestamp and request counter $lastRequestCounter" + show"Pruning ${state.indexedDomain.domainId} up to $lastTimestamp and request counter $lastRequestCounter" ) logger.debug("Pruning contract store...") diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala index 622dd27250b8..1d3a3915987d 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.participant.store import cats.syntax.foldable.* import cats.syntax.parallel.* -import com.digitalasset.canton.config.CantonRequireTypes.{LengthLimitedString, String100, String36} +import com.digitalasset.canton.config.CantonRequireTypes.{LengthLimitedString, String36} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.ErrorLoggingContext import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} @@ -330,25 +330,13 @@ object ActiveContractStore { type ContractState = StateChange[Status] val ContractState: StateChange.type = StateChange - sealed trait ChangeType { - def name: String - - // lazy val so that `kind` is initialized first in the subclasses - final lazy val toDbPrimitive: String100 = - // The Oracle DB schema allows up to 100 chars; Postgres, H2 map this to an enum - String100.tryCreate(name) - } + sealed abstract class ChangeType(val name: String) object ChangeType { - case object Activation extends ChangeType { - override val name = "activation" - } - - case object Deactivation extends ChangeType { - override val name = "deactivation" - } + case object Activation extends ChangeType("activation") + case object Deactivation extends ChangeType("deactivation") - implicit val setParameterChangeType: SetParameter[ChangeType] = (v, pp) => pp >> v.toDbPrimitive + implicit val setParameterChangeType: SetParameter[ChangeType] = (v, pp) => pp >> v.name implicit val getResultChangeType: GetResult[ChangeType] = GetResult(r => r.nextString() match { case ChangeType.Activation.name => ChangeType.Activation diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlLfSerializers.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlLfSerializers.scala deleted file mode 100644 index b8b3da69de07..000000000000 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlLfSerializers.scala +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.store - -import com.digitalasset.canton.protocol -import com.digitalasset.canton.protocol.{ - LfActionNode, - LfContractInst, - LfNodeId, - LfVersionedTransaction, -} -import com.digitalasset.daml.lf.CantonOnly -import com.digitalasset.daml.lf.transaction.{ - Node, - TransactionCoder, - TransactionOuterClass, - Versioned, -} -import com.digitalasset.daml.lf.value.Value.ContractInstance -import com.digitalasset.daml.lf.value.ValueCoder.{DecodeError, EncodeError} -import com.google.protobuf.ByteString - -/** Serialization and deserialization utilities for transactions and contracts. - * Only intended for use within database storage. - * Should not be used for hashing as no attempt is made to keep the serialization deterministic. - * Errors are returned as an Either but it is expected callers will eventually throw a [[com.digitalasset.canton.store.db.DbSerializationException]] or [[com.digitalasset.canton.store.db.DbDeserializationException]]. - * Currently throws [[com.google.protobuf.InvalidProtocolBufferException]] if the `parseFrom` operations fail to read the provided bytes. - */ -private[store] object DamlLfSerializers { - - def serializeTransaction( - versionedTransaction: LfVersionedTransaction - ): Either[EncodeError, ByteString] = - TransactionCoder - .encodeTransaction(tx = versionedTransaction) - .map(_.toByteString) - - def deserializeTransaction(bytes: ByteString): Either[DecodeError, LfVersionedTransaction] = - TransactionCoder - .decodeTransaction( - protoTx = TransactionOuterClass.Transaction.parseFrom(bytes) - ) - - def serializeContract( - contract: LfContractInst - ): Either[EncodeError, ByteString] = - TransactionCoder - .encodeContractInstance(coinst = contract) - .map(_.toByteString) - - def deserializeContract( - bytes: ByteString - ): Either[DecodeError, Versioned[ContractInstance]] = - TransactionCoder.decodeContractInstance( - protoCoinst = TransactionOuterClass.ContractInstance.parseFrom(bytes) - ) - - private def deserializeNode( - proto: TransactionOuterClass.Node - ): Either[DecodeError, protocol.LfNode] = - for { - version <- TransactionCoder.decodeVersion(proto.getVersion) - idAndNode <- CantonOnly.decodeVersionedNode( - version, - proto, - ) - (_, node) = idAndNode - } yield node - - def deserializeCreateNode( - proto: TransactionOuterClass.Node - ): Either[DecodeError, protocol.LfNodeCreate] = - for { - node <- deserializeNode(proto) - createNode <- node match { - case create: Node.Create => Right(create) - case _ => - Left( - DecodeError(s"Failed to deserialize create node: wrong node type `${node.nodeType}`") - ) - } - } yield createNode - - def deserializeExerciseNode( - proto: TransactionOuterClass.Node - ): Either[DecodeError, protocol.LfNodeExercises] = - for { - node <- deserializeNode(proto) - exerciseNode <- node match { - case exercise: Node.Exercise => Right(exercise) - case _ => - Left( - DecodeError(s"Failed to deserialize exercise node: wrong node type `${node.nodeType}`") - ) - } - } yield exerciseNode - - def serializeNode( - node: LfActionNode - ): Either[EncodeError, ByteString] = - CantonOnly - .encodeNode( - node.version, - LfNodeId(0), - node, - ) - .map(_.toByteString) - -} diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlPackageStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlPackageStore.scala index a18246b8bf09..c600d208e644 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlPackageStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DamlPackageStore.scala @@ -114,7 +114,6 @@ object DamlPackageStore { new InMemoryDamlPackageStore(loggerFactory) case pool: DbStorage => new DbDamlPackageStore( - parameterConfig.batchingConfig.maxItemsInSqlClause, pool, parameterConfig.processingTimeouts, futureSupervisor, diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DomainConnectionConfigStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DomainConnectionConfigStore.scala index e78837f69cd8..740a08f85474 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DomainConnectionConfigStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/DomainConnectionConfigStore.scala @@ -78,7 +78,6 @@ object DomainConnectionConfigStore { def isActive: Boolean } - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) implicit val setParameterStatus: SetParameter[Status] = (f, pp) => pp >> f.dbType.toString implicit val getResultStatus: GetResult[Status] = GetResult { r => diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/InFlightSubmissionStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/InFlightSubmissionStore.scala index 910d9b4b50a0..d81acd0ddb12 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/InFlightSubmissionStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/InFlightSubmissionStore.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.participant.store import cats.data.{EitherT, OptionT} -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric import com.digitalasset.canton.config.{BatchAggregatorConfig, ProcessingTimeout} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown @@ -187,7 +186,6 @@ trait InFlightSubmissionStore extends AutoCloseable { object InFlightSubmissionStore { def apply( storage: Storage, - maxItemsInSqlInClause: PositiveNumeric[Int], registerBatchAggregatorConfig: BatchAggregatorConfig, releaseProtocolVersion: ReleaseProtocolVersion, timeouts: ProcessingTimeout, @@ -199,7 +197,6 @@ object InFlightSubmissionStore { case jdbc: DbStorage => new DbInFlightSubmissionStore( jdbc, - maxItemsInSqlInClause, registerBatchAggregatorConfig, releaseProtocolVersion, timeouts, diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodePersistentState.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodePersistentState.scala index a65520fc95c2..ee9d2e715bbe 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodePersistentState.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ParticipantNodePersistentState.scala @@ -85,7 +85,6 @@ object ParticipantNodePersistentState extends HasLoggerName { ) val inFlightSubmissionStore = InFlightSubmissionStore( storage, - batching.maxItemsInSqlClause, batching.aggregator, releaseProtocolVersion, timeouts, diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ReassignmentStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ReassignmentStore.scala index b38020956247..197e8ffc5720 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ReassignmentStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ReassignmentStore.scala @@ -93,7 +93,7 @@ trait ReassignmentStore extends ReassignmentLookup { * * @param timeOfCompletion Provides the request counter and activeness time of the committed assignment request. */ - def completeReasignment(reassignmentId: ReassignmentId, timeOfCompletion: TimeOfChange)(implicit + def completeReassignment(reassignmentId: ReassignmentId, timeOfCompletion: TimeOfChange)(implicit traceContext: TraceContext ): CheckedT[Future, Nothing, ReassignmentStoreError, Unit] @@ -105,10 +105,10 @@ trait ReassignmentStore extends ReassignmentLookup { ): Future[Unit] /** Removes all completions of reassignments that have been triggered by requests with at least the given counter. - * This method must not be called concurrently with [[completeReasignment]], but may be called concurrently with + * This method must not be called concurrently with [[completeReassignment]], but may be called concurrently with * [[addReassignment]] and [[addUnassignmentResult]]. * - * Therefore, this method need not be linearizable w.r.t. [[completeReasignment]]. + * Therefore, this method need not be linearizable w.r.t. [[completeReassignment]]. * For example, if two requests `rc1` complete two reassignments while [[deleteCompletionsSince]] is running for * some `rc <= rc1, rc2`, then there are no guarantees which of the completions of `rc1` and `rc2` remain. */ diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainEphemeralState.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainEphemeralState.scala index d82c45a39c92..7087fcba381c 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainEphemeralState.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainEphemeralState.scala @@ -120,14 +120,14 @@ class SyncDomainEphemeralState( val timelyRejectNotifier: TimelyRejectNotifier = TimelyRejectNotifier( participantNodeEphemeralState, - persistentState.domainId.item, - Some(startingPoints.processing.prenextTimestamp), + persistentState.indexedDomain.domainId, + startingPoints.processing.prenextTimestamp, loggerFactory, ) val recordOrderPublisher: RecordOrderPublisher = new RecordOrderPublisher( - persistentState.domainId.item, + persistentState.indexedDomain.domainId, startingPoints.processing.nextSequencerCounter, startingPoints.processing.prenextTimestamp, ledgerApiIndexer, diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainEphemeralStateFactory.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainEphemeralStateFactory.scala index b50baa4fc3d7..543b4be6df0b 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainEphemeralStateFactory.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainEphemeralStateFactory.scala @@ -62,13 +62,15 @@ class SyncDomainEphemeralStateFactoryImpl( closeContext: CloseContext, ): Future[SyncDomainEphemeralState] = for { - _ <- ledgerApiIndexer.value.ensureNoProcessingForDomain(persistentState.domainId.domainId) + _ <- ledgerApiIndexer.value.ensureNoProcessingForDomain( + persistentState.indexedDomain.domainId + ) startingPoints <- SyncDomainEphemeralStateFactory.startingPoints( persistentState.requestJournalStore, persistentState.sequencedEventStore, tc => ledgerApiIndexer.value.ledgerApiStore.value - .domainIndex(persistentState.domainId.domainId)(tc), + .domainIndex(persistentState.indexedDomain.domainId)(tc), ) _ <- SyncDomainEphemeralStateFactory.cleanupPersistentState(persistentState, startingPoints) } yield { diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainPersistentState.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainPersistentState.scala index c9357f054c1f..2fd1939deb6a 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainPersistentState.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainPersistentState.scala @@ -29,7 +29,7 @@ trait SyncDomainPersistentState extends NamedLogging with AutoCloseable { /** The crypto operations used on the domain */ def pureCryptoApi: CryptoPureApi - def domainId: IndexedDomain + def indexedDomain: IndexedDomain def staticDomainParameters: StaticDomainParameters def enableAdditionalConsistencyChecks: Boolean def contractStore: ContractStore diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStore.scala index 8dadc146ab22..b2b216c9d4fd 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbAcsCommitmentStore.scala @@ -37,7 +37,7 @@ import com.digitalasset.canton.resource.DbStorage.Implicits.BuilderChain.{ mergeBuildersIntoChain, toSQLActionBuilderChain, } -import com.digitalasset.canton.resource.DbStorage.{DbAction, Profile, SQLActionBuilderChain} +import com.digitalasset.canton.resource.DbStorage.{DbAction, SQLActionBuilderChain} import com.digitalasset.canton.resource.{DbStorage, DbStore} import com.digitalasset.canton.serialization.DeterministicEncoding import com.digitalasset.canton.store.IndexedDomain @@ -55,7 +55,7 @@ import scala.concurrent.{ExecutionContext, Future} class DbAcsCommitmentStore( override protected val storage: DbStorage, - override val domainId: IndexedDomain, + override val indexedDomain: IndexedDomain, protocolVersion: ProtocolVersion, override protected val timeouts: ProcessingTimeout, futureSupervisor: FutureSupervisor, @@ -106,7 +106,7 @@ class DbAcsCommitmentStore( ): Future[Iterable[(CommitmentPeriod, AcsCommitment.CommitmentType)]] = { val query = sql""" select from_exclusive, to_inclusive, commitment from par_computed_acs_commitments - where domain_id = $domainId + where domain_idx = $indexedDomain and counter_participant = $counterParticipant and from_exclusive < ${period.toInclusive} and to_inclusive > ${period.fromExclusive} @@ -127,7 +127,7 @@ class DbAcsCommitmentStore( def setData(pp: PositionedParameters)(item: CommitmentData): Unit = { val CommitmentData(counterParticipant, period, commitment) = item - pp >> domainId + pp >> indexedDomain pp >> counterParticipant pp >> period.fromExclusive pp >> period.toInclusive @@ -138,36 +138,23 @@ class DbAcsCommitmentStore( case _: DbStorage.Profile.H2 => """merge into par_computed_acs_commitments cs using ( - select cast(? as int) domain_id, cast(? as varchar(300)) counter_participant, cast(? as bigint) from_exclusive, + select cast(? as int) domain_idx, cast(? as varchar(300)) counter_participant, cast(? as bigint) from_exclusive, cast(? as bigint) to_inclusive, cast(? as binary large object) commitment from dual) - excluded on (cs.domain_id = excluded.domain_id and cs.counter_participant = excluded.counter_participant and + excluded on (cs.domain_idx = excluded.domain_idx and cs.counter_participant = excluded.counter_participant and cs.from_exclusive = excluded.from_exclusive and cs.to_inclusive = excluded.to_inclusive) when matched and cs.commitment = excluded.commitment then update set cs.commitment = excluded.commitment when not matched then - insert (domain_id, counter_participant, from_exclusive, to_inclusive, commitment) - values (excluded.domain_id, excluded.counter_participant, excluded.from_exclusive, excluded.to_inclusive, excluded.commitment) + insert (domain_idx, counter_participant, from_exclusive, to_inclusive, commitment) + values (excluded.domain_idx, excluded.counter_participant, excluded.from_exclusive, excluded.to_inclusive, excluded.commitment) """ case _: DbStorage.Profile.Postgres => - """insert into par_computed_acs_commitments(domain_id, counter_participant, from_exclusive, to_inclusive, commitment) + """insert into par_computed_acs_commitments(domain_idx, counter_participant, from_exclusive, to_inclusive, commitment) values (?, ?, ?, ?, ?) - on conflict (domain_id, counter_participant, from_exclusive, to_inclusive) do + on conflict (domain_idx, counter_participant, from_exclusive, to_inclusive) do update set commitment = excluded.commitment where par_computed_acs_commitments.commitment = excluded.commitment """ - case _: DbStorage.Profile.Oracle => - """merge into par_computed_acs_commitments cs - using ( - select ? domain_id, ? counter_participant, ? from_exclusive, ? to_inclusive, ? commitment from dual) - excluded on (cs.domain_id = excluded.domain_id and cs.counter_participant = excluded.counter_participant and - cs.from_exclusive = excluded.from_exclusive and cs.to_inclusive = excluded.to_inclusive) - when matched then - update set cs.commitment = excluded.commitment - where dbms_lob.compare(cs.commitment, excluded.commitment) = 0 - when not matched then - insert (domain_id, counter_participant, from_exclusive, to_inclusive, commitment) - values (excluded.domain_id, excluded.counter_participant, excluded.from_exclusive, excluded.to_inclusive, excluded.commitment) - """ } val bulkUpsert = DbStorage.bulkOperation(query, items.toList, storage.profile)(setData) @@ -178,7 +165,7 @@ class DbAcsCommitmentStore( // Underreporting of the affected rows should not matter here as the query is idempotent and updates the row even if the same values had been there before ErrorUtil.requireState( rowCount != 0, - s"Commitment for domain $domainId, counterparticipant $counterParticipant and period $period already computed with a different value; refusing to insert $commitment", + s"Commitment for domain $indexedDomain, counterparticipant $counterParticipant and period $period already computed with a different value; refusing to insert $commitment", ) } } @@ -196,32 +183,12 @@ class DbAcsCommitmentStore( // Slick doesn't support bulk insertions by default, so we have to stitch our own val insertOutstanding = - storage.profile match { - case _: DbStorage.Profile.Oracle => - (sql"""merge into par_outstanding_acs_commitments - using (with updates as (""" ++ - counterParticipants.toList - .map(p => - sql"select $domainId did, ${period.fromExclusive} periodFrom, ${period.toInclusive} periodTo, $p counter_participant, ${CommitmentPeriodState.Outstanding} matching_state from dual" - ) - .intercalate(sql" union all ") ++ - sql""") select * from updates) U on ( - U.did = par_outstanding_acs_commitments.domain_id and - U.periodFrom = par_outstanding_acs_commitments.from_exclusive and - U.periodTo = par_outstanding_acs_commitments.to_inclusive and - U.counter_participant = par_outstanding_acs_commitments.counter_participant) - when not matched then - insert (domain_id, from_exclusive, to_inclusive, counter_participant, matching_state) - values (U.did, U.periodFrom, U.periodTo, U.counter_participant, U.matching_state)""").asUpdate - case _ => - (sql"""insert into par_outstanding_acs_commitments (domain_id, from_exclusive, to_inclusive, counter_participant, matching_state) values """ ++ - counterParticipants.toList - .map(p => - sql"($domainId, ${period.fromExclusive}, ${period.toInclusive}, $p,${CommitmentPeriodState.Outstanding})" - ) - .intercalate(sql", ") ++ sql" on conflict do nothing").asUpdate - - } + (sql"""insert into par_outstanding_acs_commitments (domain_idx, from_exclusive, to_inclusive, counter_participant, matching_state) values """ ++ + counterParticipants.toList + .map(p => + sql"($indexedDomain, ${period.fromExclusive}, ${period.toInclusive}, $p,${CommitmentPeriodState.Outstanding})" + ) + .intercalate(sql", ") ++ sql" on conflict do nothing").asUpdate storage.update_(insertOutstanding, operationName = "commitments: storeOutstanding") } @@ -233,24 +200,10 @@ class DbAcsCommitmentStore( val timestamp = period.toInclusive val upsertQuery = storage.profile match { case _: DbStorage.Profile.H2 => - sqlu"""merge into par_last_computed_acs_commitments(domain_id, ts) values ($domainId, $timestamp)""" + sqlu"""merge into par_last_computed_acs_commitments(domain_idx, ts) values ($indexedDomain, $timestamp)""" case _: DbStorage.Profile.Postgres => - sqlu"""insert into par_last_computed_acs_commitments(domain_id, ts) values ($domainId, $timestamp) - on conflict (domain_id) do update set ts = $timestamp""" - case _: DbStorage.Profile.Oracle => - sqlu"""merge into par_last_computed_acs_commitments lcac - using ( - select - $domainId domain_id, - $timestamp ts - from dual - ) parameters - on (lcac.domain_id = parameters.domain_id) - when matched then - update set lcac.ts = parameters.ts - when not matched then - insert (domain_id, ts) values (parameters.domain_id, parameters.ts) - """ + sqlu"""insert into par_last_computed_acs_commitments(domain_idx, ts) values ($indexedDomain, $timestamp) + on conflict (domain_idx) do update set ts = $timestamp""" } storage.update_(upsertQuery, operationName = "commitments: markComputedAndSent") @@ -275,7 +228,7 @@ class DbAcsCommitmentStore( import DbStorage.Implicits.BuilderChain.* val query = sql"""select from_exclusive, to_inclusive, counter_participant, matching_state - from par_outstanding_acs_commitments where domain_id = $domainId and to_inclusive >= $start and from_exclusive < $end + from par_outstanding_acs_commitments where domain_idx = $indexedDomain and to_inclusive >= $start and from_exclusive < $end and ($includeMatchedPeriods or matching_state != ${CommitmentPeriodState.Matched}) """ ++ participantFilter @@ -300,26 +253,17 @@ class DbAcsCommitmentStore( case _: DbStorage.Profile.H2 => sqlu"""merge into par_received_acs_commitments using dual - on domain_id = $domainId and sender = $sender and from_exclusive = $from and to_inclusive = $to and signed_commitment = $serialized + on domain_idx = $indexedDomain and sender = $sender and from_exclusive = $from and to_inclusive = $to and signed_commitment = $serialized when not matched then - insert (domain_id, sender, from_exclusive, to_inclusive, signed_commitment) - values ($domainId, $sender, $from, $to, $serialized) + insert (domain_idx, sender, from_exclusive, to_inclusive, signed_commitment) + values ($indexedDomain, $sender, $from, $to, $serialized) """ case _: DbStorage.Profile.Postgres => - sqlu"""insert into par_received_acs_commitments(domain_id, sender, from_exclusive, to_inclusive, signed_commitment) - select $domainId, $sender, $from, $to, $serialized - where not exists( - select * from par_received_acs_commitments - where domain_id = $domainId and sender = $sender and from_exclusive = $from and to_inclusive = $to and signed_commitment = $serialized) - """ - case _: DbStorage.Profile.Oracle => - sqlu"""insert into par_received_acs_commitments(domain_id, sender, from_exclusive, to_inclusive, signed_commitment) - select $domainId, $sender, $from, $to, $serialized - from dual + sqlu"""insert into par_received_acs_commitments(domain_idx, sender, from_exclusive, to_inclusive, signed_commitment) + select $indexedDomain, $sender, $from, $to, $serialized where not exists( select * from par_received_acs_commitments - where domain_id = $domainId and sender = $sender and from_exclusive = $from and to_inclusive = $to - and dbms_lob.compare(signed_commitment, $serialized) = 0) + where domain_idx = $indexedDomain and sender = $sender and from_exclusive = $from and to_inclusive = $to and signed_commitment = $serialized) """ } storage.update_( @@ -350,21 +294,9 @@ class DbAcsCommitmentStore( true } - val insertQuery = storage.profile match { - case Profile.H2(_) | Profile.Postgres(_) => - """insert into par_outstanding_acs_commitments (domain_id, from_exclusive, to_inclusive, counter_participant, matching_state) - values (?, ?, ?, ?, ?) on conflict do nothing""" - - case Profile.Oracle(_) => - """merge /*+ INDEX ( par_outstanding_acs_commitments ( counter_participant, domain_id, from_exclusive, to_inclusive, matching_state ) ) */ - |into par_outstanding_acs_commitments t - |using (select ? domain_id, ? from_exclusive, ? to_inclusive, ? counter_participant, ? matching_state from dual) input - |on (t.counter_participant = input.counter_participant and t.domain_id = input.domain_id and - | t.from_exclusive = input.from_exclusive and t.to_inclusive = input.to_inclusive) - |when not matched then - | insert (domain_id, from_exclusive, to_inclusive, counter_participant,matching_state) - | values (input.domain_id, input.from_exclusive, input.to_inclusive, input.counter_participant, input.matching_state)""".stripMargin - } + val insertQuery = + """insert into par_outstanding_acs_commitments (domain_idx, from_exclusive, to_inclusive, counter_participant, matching_state) + values (?, ?, ?, ?, ?) on conflict do nothing""" val stateUpdateFilter: SQLActionBuilderChain = if (matchingState == CommitmentPeriodState.Matched) @@ -383,14 +315,14 @@ class DbAcsCommitmentStore( for { overlappingIntervals <- (sql"""select from_exclusive, to_inclusive, matching_state from par_outstanding_acs_commitments - where domain_id = $domainId and counter_participant = $counterParticipant + where domain_idx = $indexedDomain and counter_participant = $counterParticipant and from_exclusive < ${period.toInclusive} and to_inclusive > ${period.fromExclusive} and """ ++ stateUpdateFilter).toActionBuilder .as[(CantonTimestampSecond, CantonTimestampSecond, CommitmentPeriodState)] _ <- (sql"""delete from par_outstanding_acs_commitments - where domain_id = $domainId and counter_participant = $counterParticipant + where domain_idx = $indexedDomain and counter_participant = $counterParticipant and from_exclusive < ${period.toInclusive} and to_inclusive > ${period.fromExclusive} and """ ++ stateUpdateFilter).toActionBuilder.asUpdate @@ -412,7 +344,7 @@ class DbAcsCommitmentStore( _ <- DbStorage.bulkOperation_(insertQuery, newPeriods, storage.profile) { pp => newPeriod => val (period, state) = newPeriod - pp >> domainId + pp >> indexedDomain pp >> period.fromExclusive pp >> period.toInclusive pp >> counterParticipant @@ -456,11 +388,11 @@ class DbAcsCommitmentStore( lastPruning: Option[CantonTimestamp], )(implicit traceContext: TraceContext): Future[Int] = { val query1 = - sqlu"delete from par_received_acs_commitments where domain_id=$domainId and to_inclusive < $before" + sqlu"delete from par_received_acs_commitments where domain_idx=$indexedDomain and to_inclusive < $before" val query2 = - sqlu"delete from par_computed_acs_commitments where domain_id=$domainId and to_inclusive < $before" + sqlu"delete from par_computed_acs_commitments where domain_idx=$indexedDomain and to_inclusive < $before" val query3 = - sqlu"delete from par_outstanding_acs_commitments where domain_id=$domainId and matching_state = ${CommitmentPeriodState.Matched} and to_inclusive < $before" + sqlu"delete from par_outstanding_acs_commitments where domain_idx=$indexedDomain and matching_state = ${CommitmentPeriodState.Matched} and to_inclusive < $before" storage .queryAndUpdate( query1.zip(query2.zip(query3)), @@ -473,7 +405,7 @@ class DbAcsCommitmentStore( traceContext: TraceContext ): Future[Option[CantonTimestampSecond]] = storage.query( - sql"select ts from par_last_computed_acs_commitments where domain_id = $domainId" + sql"select ts from par_last_computed_acs_commitments where domain_idx = $indexedDomain" .as[CantonTimestampSecond] .headOption, functionFullName, @@ -487,7 +419,7 @@ class DbAcsCommitmentStore( adjustedTsOpt = computed.map(_.forgetRefinement.min(beforeOrAt)) outstandingOpt <- adjustedTsOpt.traverse { ts => storage.query( - sql"select from_exclusive, to_inclusive from par_outstanding_acs_commitments where domain_id=$domainId and from_exclusive < $ts and matching_state != ${CommitmentPeriodState.Matched}" + sql"select from_exclusive, to_inclusive from par_outstanding_acs_commitments where domain_idx=$indexedDomain and from_exclusive < $ts and matching_state != ${CommitmentPeriodState.Matched}" .as[(CantonTimestamp, CantonTimestamp)] .withTransactionIsolation(Serializable), operationName = "commitments: compute no outstanding", @@ -520,7 +452,7 @@ class DbAcsCommitmentStore( val query = sql"""select from_exclusive, to_inclusive, counter_participant, commitment from par_computed_acs_commitments - where domain_id = $domainId and to_inclusive >= $start and from_exclusive < $end""" + where domain_idx = $indexedDomain and to_inclusive >= $start and from_exclusive < $end""" ++ participantFilter storage.query( @@ -547,17 +479,23 @@ class DbAcsCommitmentStore( val query = sql"""select signed_commitment from par_received_acs_commitments - where domain_id = $domainId and to_inclusive >= $start and from_exclusive < $end""" ++ participantFilter + where domain_idx = $indexedDomain and to_inclusive >= $start and from_exclusive < $end""" ++ participantFilter storage.query(query.as[SignedProtocolMessage[AcsCommitment]], functionFullName) } override val runningCommitments: DbIncrementalCommitmentStore = - new DbIncrementalCommitmentStore(storage, domainId, protocolVersion, timeouts, loggerFactory) + new DbIncrementalCommitmentStore( + storage, + indexedDomain, + protocolVersion, + timeouts, + loggerFactory, + ) override val queue: DbCommitmentQueue = - new DbCommitmentQueue(storage, domainId, protocolVersion, timeouts, loggerFactory) + new DbCommitmentQueue(storage, indexedDomain, protocolVersion, timeouts, loggerFactory) override def onClosed(): Unit = Lifecycle.close( @@ -569,7 +507,7 @@ class DbAcsCommitmentStore( class DbIncrementalCommitmentStore( override protected val storage: DbStorage, - domainId: IndexedDomain, + indexedDomain: IndexedDomain, protocolVersion: ProtocolVersion, override protected val timeouts: ProcessingTimeout, protected val loggerFactory: NamedLoggerFactory, @@ -591,11 +529,11 @@ class DbIncrementalCommitmentStore( res <- storage.query( (for { tsWithTieBreaker <- - sql"""select ts, tie_breaker from par_commitment_snapshot_time where domain_id = $domainId""" + sql"""select ts, tie_breaker from par_commitment_snapshot_time where domain_idx = $indexedDomain""" .as[(CantonTimestamp, Long)] .headOption snapshot <- - sql"""select stakeholders, commitment from par_commitment_snapshot where domain_id = $domainId""" + sql"""select stakeholders, commitment from par_commitment_snapshot where domain_idx = $indexedDomain""" .as[(StoredParties, AcsCommitment.CommitmentType)] } yield (tsWithTieBreaker, snapshot)).transactionally .withTransactionIsolation(Serializable), @@ -614,7 +552,7 @@ class DbIncrementalCommitmentStore( override def watermark(implicit traceContext: TraceContext): Future[RecordTime] = { val query = - sql"""select ts, tie_breaker from par_commitment_snapshot_time where domain_id=$domainId""" + sql"""select ts, tie_breaker from par_commitment_snapshot_time where domain_idx=$indexedDomain""" .as[(CantonTimestamp, Long)] .headOption storage @@ -637,9 +575,9 @@ class DbIncrementalCommitmentStore( .toLengthLimitedHexString def deleteCommitments(stakeholders: List[SortedSet[LfPartyId]]): DbAction.All[Unit] = { val deleteStatement = - "delete from par_commitment_snapshot where domain_id = ? and stakeholders_hash = ?" + "delete from par_commitment_snapshot where domain_idx = ? and stakeholders_hash = ?" DbStorage.bulkOperation_(deleteStatement, stakeholders, storage.profile) { pp => stkhs => - pp >> domainId + pp >> indexedDomain pp >> partySetHash(stkhs) } } @@ -651,7 +589,7 @@ class DbIncrementalCommitmentStore( pp: PositionedParameters ): ((SortedSet[LfPartyId], AcsCommitment.CommitmentType)) => Unit = { case (stkhs, commitment) => - pp >> domainId + pp >> indexedDomain pp >> partySetHash(stkhs) pp >> StoredParties(stkhs) pp >> commitment @@ -659,29 +597,12 @@ class DbIncrementalCommitmentStore( val statement = storage.profile match { case _: DbStorage.Profile.H2 => - """merge into par_commitment_snapshot (domain_id, stakeholders_hash, stakeholders, commitment) + """merge into par_commitment_snapshot (domain_idx, stakeholders_hash, stakeholders, commitment) values (?, ?, ?, ?)""" case _: DbStorage.Profile.Postgres => - """insert into par_commitment_snapshot (domain_id, stakeholders_hash, stakeholders, commitment) - values (?, ?, ?, ?) on conflict (domain_id, stakeholders_hash) do update set commitment = excluded.commitment""" - - case _: DbStorage.Profile.Oracle => - """merge into par_commitment_snapshot cs - using ( - select - ? domain_id, - ? stakeholders_hash, - ? stakeholders, - ? commitment - from dual - ) excluded - on (cs.domain_id = excluded.domain_id and cs.stakeholders_hash = excluded.stakeholders_hash) - when matched then - update set cs.commitment = excluded.commitment - when not matched then - insert (domain_id, stakeholders_hash, stakeholders, commitment) - values (excluded.domain_id, excluded.stakeholders_hash, excluded.stakeholders, excluded.commitment)""" + """insert into par_commitment_snapshot (domain_idx, stakeholders_hash, stakeholders, commitment) + values (?, ?, ?, ?) on conflict (domain_idx, stakeholders_hash) do update set commitment = excluded.commitment""" } DbStorage.bulkOperation_( statement, @@ -693,19 +614,10 @@ class DbIncrementalCommitmentStore( def insertRt(rt: RecordTime): DbAction.WriteOnly[Int] = storage.profile match { case _: DbStorage.Profile.H2 => - sqlu"""merge into par_commitment_snapshot_time (domain_id, ts, tie_breaker) values ($domainId, ${rt.timestamp}, ${rt.tieBreaker})""" + sqlu"""merge into par_commitment_snapshot_time (domain_idx, ts, tie_breaker) values ($indexedDomain, ${rt.timestamp}, ${rt.tieBreaker})""" case _: DbStorage.Profile.Postgres => - sqlu"""insert into par_commitment_snapshot_time(domain_id, ts, tie_breaker) values ($domainId, ${rt.timestamp}, ${rt.tieBreaker}) - on conflict (domain_id) do update set ts = ${rt.timestamp}, tie_breaker = ${rt.tieBreaker}""" - case _: DbStorage.Profile.Oracle => - sqlu"""merge into par_commitment_snapshot_time cst - using dual - on (cst.domain_id = $domainId) - when matched then - update set ts = ${rt.timestamp}, tie_breaker = ${rt.tieBreaker} - when not matched then - insert (domain_id, ts, tie_breaker) values ($domainId, ${rt.timestamp}, ${rt.tieBreaker}) - """ + sqlu"""insert into par_commitment_snapshot_time(domain_idx, ts, tie_breaker) values ($indexedDomain, ${rt.timestamp}, ${rt.tieBreaker}) + on conflict (domain_idx) do update set ts = ${rt.timestamp}, tie_breaker = ${rt.tieBreaker}""" } val updateList = updates.toList @@ -722,7 +634,7 @@ class DbIncrementalCommitmentStore( class DbCommitmentQueue( override protected val storage: DbStorage, - domainId: IndexedDomain, + indexedDomain: IndexedDomain, protocolVersion: ProtocolVersion, override protected val timeouts: ProcessingTimeout, override protected val loggerFactory: NamedLoggerFactory, @@ -734,25 +646,18 @@ class DbCommitmentQueue( import storage.api.* private implicit val acsCommitmentReader: GetResult[AcsCommitment] = - AcsCommitment.getAcsCommitmentResultReader(domainId.item, protocolVersion) + AcsCommitment.getAcsCommitmentResultReader(indexedDomain.domainId, protocolVersion) override def enqueue( commitment: AcsCommitment )(implicit traceContext: TraceContext): Future[Unit] = { val commitmentDbHash = Hash.digest(HashPurpose.AcsCommitmentDb, commitment.commitment, HashAlgorithm.Sha256) - val insertAction = storage.profile match { - case _: DbStorage.Profile.Oracle => - sqlu"""insert - /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( PAR_COMMITMENT_QUEUE ( commitment_hash, domain_id, sender, counter_participant, from_exclusive, to_inclusive ) ) */ - into par_commitment_queue(domain_id, sender, counter_participant, from_exclusive, to_inclusive, commitment, commitment_hash) - values($domainId, ${commitment.sender}, ${commitment.counterParticipant}, ${commitment.period.fromExclusive}, ${commitment.period.toInclusive}, ${commitment.commitment}, $commitmentDbHash)""" - case _ => - sqlu"""insert - into par_commitment_queue(domain_id, sender, counter_participant, from_exclusive, to_inclusive, commitment, commitment_hash) - values($domainId, ${commitment.sender}, ${commitment.counterParticipant}, ${commitment.period.fromExclusive}, ${commitment.period.toInclusive}, ${commitment.commitment}, $commitmentDbHash) - on conflict do nothing""" - } + val insertAction = + sqlu"""insert + into par_commitment_queue(domain_idx, sender, counter_participant, from_exclusive, to_inclusive, commitment, commitment_hash) + values($indexedDomain, ${commitment.sender}, ${commitment.counterParticipant}, ${commitment.period.fromExclusive}, ${commitment.period.toInclusive}, ${commitment.commitment}, $commitmentDbHash) + on conflict do nothing""" { storage.update_(insertAction, operationName = "enqueue commitment") @@ -770,7 +675,7 @@ class DbCommitmentQueue( .query( sql"""select sender, counter_participant, from_exclusive, to_inclusive, commitment from par_commitment_queue - where domain_id = $domainId and to_inclusive <= $timestamp""" + where domain_idx = $indexedDomain and to_inclusive <= $timestamp""" .as[AcsCommitment], operationName = NameOf.qualifiedNameOfCurrentFunc, ) @@ -787,7 +692,7 @@ class DbCommitmentQueue( .query( sql"""select sender, counter_participant, from_exclusive, to_inclusive, commitment from par_commitment_queue - where domain_id = $domainId and to_inclusive >= $timestamp""" + where domain_idx = $indexedDomain and to_inclusive >= $timestamp""" .as[AcsCommitment], operationName = NameOf.qualifiedNameOfCurrentFunc, ) @@ -802,7 +707,7 @@ class DbCommitmentQueue( .query( sql"""select sender, counter_participant, from_exclusive, to_inclusive, commitment from par_commitment_queue - where domain_id = $domainId and sender = $counterParticipant + where domain_idx = $indexedDomain and sender = $counterParticipant and to_inclusive > ${period.fromExclusive} and from_exclusive < ${period.toInclusive} """ .as[AcsCommitment], @@ -814,7 +719,7 @@ class DbCommitmentQueue( timestamp: CantonTimestamp )(implicit traceContext: TraceContext): Future[Unit] = storage.update_( - sqlu"delete from par_commitment_queue where domain_id = $domainId and to_inclusive <= $timestamp", + sqlu"delete from par_commitment_queue where domain_idx = $indexedDomain and to_inclusive <= $timestamp", operationName = "delete queued commitments", ) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStore.scala index bda0c904d6aa..8ca9e3110869 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStore.scala @@ -12,22 +12,15 @@ import cats.syntax.parallel.* import cats.syntax.traverse.* import com.daml.nameof.NameOf.functionFullName import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.CantonRequireTypes.{LengthLimitedString, String100} +import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.store.ActiveContractSnapshot.ActiveContractIdsChange import com.digitalasset.canton.participant.store.ActiveContractStore.ActivenessChangeDetail.* +import com.digitalasset.canton.participant.store.* import com.digitalasset.canton.participant.store.data.ActiveContractsData import com.digitalasset.canton.participant.store.db.DbActiveContractStore.* -import com.digitalasset.canton.participant.store.{ - ActivationsDeactivationsConsistencyCheck, - ActiveContractStore, - ContractChange, - ContractStore, - StateChangeType, -} import com.digitalasset.canton.participant.util.TimeOfChange import com.digitalasset.canton.protocol.ContractIdSyntax.* import com.digitalasset.canton.protocol.{ @@ -42,16 +35,14 @@ import com.digitalasset.canton.resource.DbStorage.Implicits.BuilderChain.{ } import com.digitalasset.canton.resource.DbStorage.* import com.digitalasset.canton.resource.{DbStorage, DbStore} -import com.digitalasset.canton.store.db.{DbDeserializationException, DbPrunableByTimeDomain} +import com.digitalasset.canton.store.db.DbPrunableByTimeDomain import com.digitalasset.canton.store.{IndexedDomain, IndexedStringStore, PrunableByTimeParameters} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.FutureInstances.* -import com.digitalasset.canton.util.{Checked, CheckedT, ErrorUtil, IterableUtil, MonadUtil} -import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.util.* import com.digitalasset.canton.{ReassignmentCounter, RequestCounter} import com.digitalasset.daml.lf.data.Ref.PackageId import slick.jdbc.* -import slick.jdbc.canton.SQLActionBuilder import scala.Ordered.orderingToOrdered import scala.annotation.nowarn @@ -62,21 +53,19 @@ import scala.concurrent.{ExecutionContext, Future} /** Active contracts journal * * This database table has the following indexes to support scaling query performance: - * - CREATE index idx_par_active_contracts_dirty_request_reset ON par_active_contracts (domain_id, request_counter) + * - create index idx_par_active_contracts_dirty_request_reset on par_active_contracts (domain_idx, request_counter) * used on startup of the SyncDomain to delete all inflight validation requests. - * - CREATE index idx_par_active_contracts_contract_id ON par_active_contracts (contract_id) - * used in conflict detection for point wise lookup of the contract status. - * - CREATE index idx_par_active_contracts_ts_domain_id ON par_active_contracts (ts, domain_id) + * - create index idx_par_active_contracts_contract_id on par_active_contracts (contract_id) + * used in conflict detection for point-wise lookup of the contract status. + * - create index idx_par_active_contracts_ts_domain_idx on par_active_contracts (ts, domain_idx) * used on startup by the SyncDomain to replay ACS changes to the ACS commitment processor. */ class DbActiveContractStore( override protected val storage: DbStorage, - protected[this] override val domainId: IndexedDomain, + protected[this] override val indexedDomain: IndexedDomain, enableAdditionalConsistencyChecks: Boolean, - maxContractIdSqlInListSize: PositiveNumeric[Int], batchingParametersConfig: PrunableByTimeParameters, val indexedStringStore: IndexedStringStore, - protocolVersion: ProtocolVersion, override protected val timeouts: ProcessingTimeout, override protected val loggerFactory: NamedLoggerFactory, )(implicit val ec: ExecutionContext) @@ -284,7 +273,7 @@ class DbActiveContractStore( contractIds: Iterable[LfContractId] )(implicit traceContext: TraceContext): Future[Map[LfContractId, ContractState]] = storage.profile match { - case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Oracle => + case _: DbStorage.Profile.H2 => // With H2, it is faster to do lookup contracts individually than to use a range query contractIds .to(LazyList) @@ -301,29 +290,23 @@ class DbActiveContractStore( case Some(contractIdsNel) => import DbStorage.Implicits.BuilderChain.* - val queries = - DbStorage - .toInClauses_("contract_id", contractIdsNel, maxContractIdSqlInListSize) - .map { inClause => - val query = - sql""" + val query = + (sql""" with ordered_changes(contract_id, operation, reassignment_counter, remote_domain_idx, ts, request_counter, row_num) as ( select contract_id, operation, reassignment_counter, remote_domain_idx, ts, request_counter, - ROW_NUMBER() OVER (partition by domain_id, contract_id order by ts desc, request_counter desc, change asc) + ROW_NUMBER() OVER (partition by domain_idx, contract_id order by ts desc, request_counter desc, change asc) from par_active_contracts - where domain_id = $domainId and """ ++ inClause ++ - sql""" + where domain_idx = $indexedDomain and """ ++ DbStorage + .toInClause("contract_id", contractIdsNel) ++ + sql""" ) select contract_id, operation, reassignment_counter, remote_domain_idx, ts, request_counter from ordered_changes where row_num = 1; - """ - - query.as[(LfContractId, StoredActiveContract)] - } + """).as[(LfContractId, StoredActiveContract)] storage - .sequentialQueryAndCombine(queries, functionFullName) + .query(query, functionFullName) .flatMap(_.toList.parTraverse { case (id, contract) => contract.toContractState.map(cs => (id, cs)) }) @@ -340,22 +323,15 @@ class DbActiveContractStore( // As we can directly query daml_contracts from the database import DbStorage.Implicits.* - import DbStorage.Implicits.BuilderChain.* // TODO(i9480): Integrate with performance tests to check that we can remove packages when there are many contracts. - val limitStatement: SQLActionBuilder = storage.profile match { - case _: DbStorage.Profile.Oracle => - sql"""fetch first 1 rows only""" - case _ => sql"limit 1" - } - val query = (sql""" with ordered_changes(contract_id, package_id, change, ts, request_counter, remote_domain_idx, row_num) as ( select par_active_contracts.contract_id, par_contracts.package_id, change, ts, par_active_contracts.request_counter, remote_domain_idx, ROW_NUMBER() OVER ( - partition by par_active_contracts.domain_id, par_active_contracts.contract_id + partition by par_active_contracts.domain_idx, par_active_contracts.contract_id order by ts desc, par_active_contracts.request_counter desc, @@ -363,15 +339,16 @@ class DbActiveContractStore( ) from par_active_contracts join par_contracts on par_active_contracts.contract_id = par_contracts.contract_id - and par_active_contracts.domain_id = par_contracts.domain_id - where par_active_contracts.domain_id = $domainId + and par_active_contracts.domain_idx = par_contracts.domain_idx + where par_active_contracts.domain_idx = $indexedDomain and par_contracts.package_id = $pkg ) select contract_id, package_id from ordered_changes where row_num = 1 and change = 'activation' - """ ++ limitStatement).as[(LfContractId)] + limit 1 + """).as[(LfContractId)] val queryResult = storage.query(query, functionFullName) queryResult.map(_.headOption) @@ -488,30 +465,21 @@ class DbActiveContractStore( (sql""" select distinct(contract_id), #${p.attribute}, reassignment_counter from par_active_contracts AC - where not exists(select * from par_active_contracts AC2 where domain_id = $domainId and AC.contract_id = AC2.contract_id + where not exists(select * from par_active_contracts AC2 where domain_idx = $indexedDomain and AC.contract_id = AC2.contract_id and AC2.#${p.attribute} <= ${p.bound} and ((AC.ts, AC.request_counter) < (AC2.ts, AC2.request_counter) or (AC.ts = AC2.ts and AC.request_counter = AC2.request_counter and AC2.change = ${ChangeType.Deactivation}))) - and AC.#${p.attribute} <= ${p.bound} and domain_id = $domainId""" ++ + and AC.#${p.attribute} <= ${p.bound} and domain_idx = $indexedDomain""" ++ idsO.fold(sql"")(ids => sql" and AC.contract_id in " ++ ids) ++ ordering) .as[(LfContractId, T, ReassignmentCounter)] case _: DbStorage.Profile.Postgres => (sql""" select distinct(contract_id), AC3.#${p.attribute}, AC3.reassignment_counter from par_active_contracts AC1 join lateral - (select #${p.attribute}, change, reassignment_counter from par_active_contracts AC2 where domain_id = $domainId + (select #${p.attribute}, change, reassignment_counter from par_active_contracts AC2 where domain_idx = $indexedDomain and AC2.contract_id = AC1.contract_id and #${p.attribute} <= ${p.bound} order by ts desc, request_counter desc, change asc #${storage .limit(1)}) as AC3 on true - where AC1.domain_id = $domainId and AC3.change = CAST(${ChangeType.Activation} as change_type)""" ++ - idsO.fold(sql"")(ids => sql" and AC1.contract_id in " ++ ids) ++ ordering) - .as[(LfContractId, T, ReassignmentCounter)] - case _: DbStorage.Profile.Oracle => - (sql"""select distinct(contract_id), AC3.#${p.attribute}, AC3.reassignment_counter from par_active_contracts AC1, lateral - (select #${p.attribute}, change, reassignment_counter from par_active_contracts AC2 where domain_id = $domainId - and AC2.contract_id = AC1.contract_id and #${p.attribute} <= ${p.bound} - order by ts desc, request_counter desc, change desc - fetch first 1 row only) AC3 - where AC1.domain_id = $domainId and AC3.change = 'activation'""" ++ + where AC1.domain_idx = $indexedDomain and AC3.change = CAST(${ChangeType.Activation} as change_type)""" ++ idsO.fold(sql"")(ids => sql" and AC1.contract_id in " ++ ids) ++ ordering) .as[(LfContractId, T, ReassignmentCounter)] } @@ -538,14 +506,14 @@ class DbActiveContractStore( with deactivation_counter(contract_id, request_counter) as ( select contract_id, max(request_counter) from par_active_contracts - where domain_id = $domainId + where domain_idx = $indexedDomain and change = cast('deactivation' as change_type) and ts <= $beforeAndIncluding group by contract_id ) select ac.contract_id, ac.ts, ac.request_counter, ac.change from deactivation_counter dc - join par_active_contracts ac on ac.domain_id = $domainId and ac.contract_id = dc.contract_id + join par_active_contracts ac on ac.domain_idx = $indexedDomain and ac.contract_id = dc.contract_id where ac.request_counter <= dc.request_counter""" .as[(LfContractId, CantonTimestamp, RequestCounter, ChangeType)], s"$functionFullName: Fetch ACS entries to be pruned", @@ -556,13 +524,13 @@ class DbActiveContractStore( if (acsEntriesToPrune.isEmpty) Future.successful(0) else { val deleteStatement = - s"delete from par_active_contracts where domain_id = ? and contract_id = ? and ts = ?" + s"delete from par_active_contracts where domain_idx = ? and contract_id = ? and ts = ?" + " and request_counter = ? and change = CAST(? as change_type);" storage.queryAndUpdate( DbStorage .bulkOperation(deleteStatement, acsEntriesToPrune, storage.profile) { pp => { case (contractId, ts, rc, change) => - pp >> domainId + pp >> indexedDomain pp >> contractId pp >> ts pp >> rc @@ -582,42 +550,22 @@ class DbActiveContractStore( with deactivation_counter(contract_id, request_counter) as ( select contract_id, max(request_counter) from par_active_contracts - where domain_id = $domainId + where domain_idx = $indexedDomain and change = ${ChangeType.Deactivation} and ts <= $beforeAndIncluding group by contract_id ) delete from par_active_contracts - where (domain_id, contract_id, ts, request_counter, change) in ( - select ac.domain_id, ac.contract_id, ac.ts, ac.request_counter, ac.change + where (domain_idx, contract_id, ts, request_counter, change) in ( + select ac.domain_idx, ac.contract_id, ac.ts, ac.request_counter, ac.change from deactivation_counter dc - join par_active_contracts ac on ac.domain_id = $domainId and ac.contract_id = dc.contract_id + join par_active_contracts ac on ac.domain_idx = $indexedDomain and ac.contract_id = dc.contract_id where ac.request_counter <= dc.request_counter ); """, functionFullName, ) ) - case _: DbStorage.Profile.Oracle => - performUnlessClosingF("ACS.doPrune")( - storage.queryAndUpdate( - sqlu"""delete from par_active_contracts where rowid in ( - with deactivation_counter(contract_id, request_counter) as ( - select contract_id, max(request_counter) - from par_active_contracts - where domain_id = $domainId - and change = 'deactivation' - and ts <= $beforeAndIncluding - group by contract_id - ) - select ac.rowid - from deactivation_counter dc - join par_active_contracts ac on ac.domain_id = $domainId and ac.contract_id = dc.contract_id - where ac.request_counter <= dc.request_counter - )""", - functionFullName, - ) - ) } } yield nrPruned).onShutdown(0) @@ -673,7 +621,7 @@ class DbActiveContractStore( def deleteSince(criterion: RequestCounter)(implicit traceContext: TraceContext): Future[Unit] = { val query = - sqlu"delete from par_active_contracts where domain_id = $domainId and request_counter >= $criterion" + sqlu"delete from par_active_contracts where domain_idx = $indexedDomain and request_counter >= $criterion" storage .update(query, functionFullName) .map(count => logger.debug(s"DeleteSince on $criterion removed at least $count ACS entries")) @@ -687,16 +635,12 @@ class DbActiveContractStore( s"Provided timestamps are in the wrong order: $fromExclusive and $toInclusive", ) val changeQuery = { - val changeOrder = storage.profile match { - case _: DbStorage.Profile.Oracle => "asc" - case _ => "desc" - } sql"""select ts, request_counter, contract_id, operation, reassignment_counter, remote_domain_idx - from par_active_contracts where domain_id = $domainId and + from par_active_contracts where domain_idx = $indexedDomain and ((ts = ${fromExclusive.timestamp} and request_counter > ${fromExclusive.rc}) or ts > ${fromExclusive.timestamp}) and ((ts = ${toInclusive.timestamp} and request_counter <= ${toInclusive.rc}) or ts <= ${toInclusive.timestamp}) - order by ts asc, request_counter asc, change #$changeOrder""" + order by ts asc, request_counter asc, change desc""" }.as[(TimeOfChange, LfContractId, ActivenessChangeDetail)] for { @@ -731,25 +675,22 @@ class DbActiveContractStore( .map { case ((rc, _), _) => rc.unwrap } .maxOption .getOrElse(RequestCounter.Genesis.unwrap) - val archivalCidsWithoutReassignmentCountersQueries = DbStorage - .toInClauses_("contract_id", cids, maxContractIdSqlInListSize)( - absCoidSetParameter - ) + val inClause = DbStorage + .toInClause("contract_id", cids)(absCoidSetParameter) + val archivalCidsWithoutReassignmentCountersQueries = // Note that the sql query does not filter entries with ts <= toExclusive.timestamp, // but it also includes the entries between (`fromExclusive`, `toInclusive`]. // This is an implementation choice purely to reuse code: we pass the query result into the // function `reassignmentCounterForArchivals` and obtain the reassignment counters for (rc, cid) pairs. // One could have a more restrictive query and compute the reassignment counters in some other way. - .map { inClause => - (sql"""select ts, request_counter, contract_id, operation, reassignment_counter, remote_domain_idx - from par_active_contracts where domain_id = $domainId + (sql"""select ts, request_counter, contract_id, operation, reassignment_counter, remote_domain_idx + from par_active_contracts where domain_idx = $indexedDomain and (request_counter <= $maximumRc) and (ts <= ${toInclusive.timestamp}) and """ ++ inClause ++ sql""" order by ts asc, request_counter asc""") - .as[(TimeOfChange, LfContractId, ActivenessChangeDetail)] - } + .as[(TimeOfChange, LfContractId, ActivenessChangeDetail)] val resultArchivalReassignmentCounters = storage - .sequentialQueryAndCombine( + .query( archivalCidsWithoutReassignmentCountersQueries, "ACS: get data to compute the reassignment counters for archived contracts", ) @@ -856,15 +797,11 @@ class DbActiveContractStore( toc: TimeOfChange, )(implicit traceContext: TraceContext): CheckedT[Future, AcsError, AcsWarning, Unit] = { - val query = storage.profile match { - case _: DbStorage.Profile.Oracle => - throw new IllegalArgumentException("Implement for oracle") - case _ => - // change desc allows to have activations first - sql"""select operation, reassignment_counter, remote_domain_idx, ts, request_counter from par_active_contracts - where domain_id = $domainId and contract_id = $contractId + val query = + // change desc allows to have activations first + sql"""select operation, reassignment_counter, remote_domain_idx, ts, request_counter from par_active_contracts + where domain_idx = $indexedDomain and contract_id = $contractId order by ts asc, request_counter asc, change desc""" - } val changesF: Future[Vector[StoredActiveContract]] = storage.query(query.as[StoredActiveContract], functionFullName) @@ -936,18 +873,8 @@ class DbActiveContractStore( operationName: LengthLimitedString, )(implicit traceContext: TraceContext): CheckedT[Future, AcsError, AcsWarning, Unit] = { val insertQuery = storage.profile match { - case _: DbStorage.Profile.Oracle => - """merge /*+ INDEX ( par_active_contracts ( contract_id, ts, request_counter, change, domain_id, reassignment_counter ) ) */ - |into par_active_contracts - |using (select ? contract_id, ? ts, ? request_counter, ? change, ? domain_id from dual) input - |on (par_active_contracts.contract_id = input.contract_id and par_active_contracts.ts = input.ts and - | par_active_contracts.request_counter = input.request_counter and par_active_contracts.change = input.change and - | par_active_contracts.domain_id = input.domain_id) - |when not matched then - | insert (contract_id, ts, request_counter, change, domain_id, operation, reassignment_counter, remote_domain_idx) - | values (input.contract_id, input.ts, input.request_counter, input.change, input.domain_id, ?, ?, ?)""".stripMargin case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Postgres => - """insert into par_active_contracts(contract_id, ts, request_counter, change, domain_id, operation, reassignment_counter, remote_domain_idx) + """insert into par_active_contracts(contract_id, ts, request_counter, change, domain_idx, operation, reassignment_counter, remote_domain_idx) values (?, ?, ?, CAST(? as change_type), ?, CAST(? as operation_type), ?, ?) on conflict do nothing""" } @@ -958,7 +885,7 @@ class DbActiveContractStore( pp >> toc.timestamp pp >> toc.rc pp >> operationType.changeType - pp >> domainId + pp >> indexedDomain pp >> operationType } @@ -986,39 +913,24 @@ class DbActiveContractStore( val (idsToCheck, rcsToCheck, tssToCheck) = unzip(toCheck) - val contractIdsNotInsertedInClauses = - DbStorage.toInClauses_("contract_id", idsToCheck, maxContractIdSqlInListSize) - - val rcsInClauses = - DbStorage.toInClauses_("request_counter", rcsToCheck, maxContractIdSqlInListSize) - - val tssInClauses = - DbStorage.toInClauses_("ts", tssToCheck, maxContractIdSqlInListSize) - - def query( - cidsInClause: SQLActionBuilderChain, - rcsInClause: SQLActionBuilderChain, - tssInClause: SQLActionBuilderChain, - ) = storage.profile match { - case _: DbStorage.Profile.Oracle => - sql"select contract_id, operation, reassignment_counter, remote_domain_idx, ts, request_counter from par_active_contracts where domain_id = $domainId and " ++ cidsInClause ++ - sql" and " ++ tssInClause ++ sql" and " ++ rcsInClause ++ sql" and change = $change" - case _ => - sql"select contract_id, operation, reassignment_counter, remote_domain_idx, ts, request_counter from par_active_contracts where domain_id = $domainId and " ++ cidsInClause ++ - sql" and " ++ tssInClause ++ sql" and " ++ rcsInClause ++ sql" and change = CAST($change as change_type)" - } + val cidsInClause = + DbStorage.toInClause("contract_id", idsToCheck) - val queries = - contractIdsNotInsertedInClauses.zip(rcsInClauses).zip(tssInClauses).map { - case ((cidsInClause, rcsInClause), tssInClause) => - query(cidsInClause, rcsInClause, tssInClause) - .as[(LfContractId, ActivenessChangeDetail, TimeOfChange)] - } + val rcsInClause = + DbStorage.toInClause("request_counter", rcsToCheck) + + val tssInClause = + DbStorage.toInClause("ts", tssToCheck) + + val query = + (sql"select contract_id, operation, reassignment_counter, remote_domain_idx, ts, request_counter from par_active_contracts where domain_idx = $indexedDomain and " ++ cidsInClause ++ + sql" and " ++ tssInClause ++ sql" and " ++ rcsInClause ++ sql" and change = CAST($change as change_type)") + .as[(LfContractId, ActivenessChangeDetail, TimeOfChange)] val isActivation = change == ChangeType.Activation val warningsF = storage - .sequentialQueryAndCombine(queries, functionFullName) + .query(query, functionFullName) .map(_.toList.mapFilter { case (cid, previousOperationType, toc) => val newOperationType = contractChanges.getOrElse((cid, toc), previousOperationType) @@ -1052,8 +964,6 @@ class DbActiveContractStore( CheckedT.result(storage.queryAndUpdate(insertAll, functionFullName)).flatMap { (_: Unit) => if (enableAdditionalConsistencyChecks) { // Check all contracts whether they have been inserted or are already there - // We don't analyze the update count - // so that we can use the fast IGNORE_ROW_ON_DUPKEY_INDEX directive in Oracle NonEmpty .from(contractChanges.keySet.toSeq) .map(checkIdempotence) @@ -1062,16 +972,6 @@ class DbActiveContractStore( } } - private def fetchLatestCreation( - contractId: LfContractId - ): DbAction.ReadOnly[Option[StoredActiveContract]] = - fetchContractStateQuery(contractId, Some(ActivenessChangeDetail.create)) - - private def fetchEarliestArchival( - contractId: LfContractId - ): DbAction.ReadOnly[Option[StoredActiveContract]] = - fetchContractStateQuery(contractId, Some(ActivenessChangeDetail.archive), descending = false) - private def fetchEarliestContractStateAfter( contractId: LfContractId, toc: TimeOfChange, @@ -1100,14 +1000,9 @@ class DbActiveContractStore( val baseQuery = sql"""select operation, reassignment_counter, remote_domain_idx, ts, request_counter from par_active_contracts - where domain_id = $domainId and contract_id = $contractId""" + where domain_idx = $indexedDomain and contract_id = $contractId""" val opFilterQuery = - storage.profile match { - case _: DbStorage.Profile.Oracle => - operationFilter.fold(sql" ")(o => sql" and operation = $o") - case _ => - operationFilter.fold(sql" ")(o => sql" and operation = CAST($o as operation_type)") - } + operationFilter.fold(sql" ")(o => sql" and operation = CAST($o as operation_type)") val tocFilterQuery = tocFilter.fold(sql" ") { toc => if (descending) sql" and (ts < ${toc.timestamp} or (ts = ${toc.timestamp} and request_counter < ${toc.rc}))" @@ -1115,14 +1010,9 @@ class DbActiveContractStore( sql" and (ts > ${toc.timestamp} or (ts = ${toc.timestamp} and request_counter > ${toc.rc}))" } val (normal_order, reversed_order) = if (descending) ("desc", "asc") else ("asc", "desc") - val orderQuery = storage.profile match { - case _: DbStorage.Profile.Oracle => - sql" order by ts #$normal_order, request_counter #$normal_order, change #$normal_order #${storage - .limit(1)}" - case _ => - sql" order by ts #$normal_order, request_counter #$normal_order, change #$reversed_order #${storage - .limit(1)}" - } + val orderQuery = + sql" order by ts #$normal_order, request_counter #$normal_order, change #$reversed_order #${storage + .limit(1)}" val query = baseQuery ++ opFilterQuery ++ tocFilterQuery ++ orderQuery query.as[StoredActiveContract].headOption } @@ -1152,32 +1042,4 @@ private object DbActiveContractStore { val setParameter: SetParameter[RequestCounter] = implicitly[SetParameter[RequestCounter]] } } - - sealed trait ChangeType { - def name: String - - // lazy val so that `kind` is initialized first in the subclasses - final lazy val toDbPrimitive: String100 = - // The Oracle DB schema allows up to 100 chars; Postgres, H2 map this to an enum - String100.tryCreate(name) - } - - object ChangeType { - case object Activation extends ChangeType { - override val name = "activation" - } - - case object Deactivation extends ChangeType { - override val name = "deactivation" - } - - implicit val setParameterChangeType: SetParameter[ChangeType] = (v, pp) => pp >> v.toDbPrimitive - implicit val getResultChangeType: GetResult[ChangeType] = GetResult(r => - r.nextString() match { - case ChangeType.Activation.name => ChangeType.Activation - case ChangeType.Deactivation.name => ChangeType.Deactivation - case unknown => throw new DbDeserializationException(s"Unknown change type [$unknown]") - } - ) - } } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbCommandDeduplicationStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbCommandDeduplicationStore.scala index 46b06244ef6c..548d8a54ebaa 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbCommandDeduplicationStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbCommandDeduplicationStore.scala @@ -95,53 +95,6 @@ class DbCommandDeduplicationStore( trace_context_acceptance = (case when ? = '1' then excluded.trace_context_acceptance else par_command_deduplication.trace_context_acceptance end) where par_command_deduplication.offset_definite_answer < excluded.offset_definite_answer """ - case _: DbStorage.Profile.Oracle => - """ - merge into par_command_deduplication using - (select - ? as change_id_hash, - ? as application_id, - ? as command_id, - ? as act_as, - ? as offset_definite_answer, - ? as publication_time_definite_answer, - ? as submission_id_definite_answer, - ? as trace_context_definite_answer, - ? as offset_acceptance, - ? as publication_time_acceptance, - cast(? as nvarchar2(300)) as submission_id_acceptance, - to_blob(?) as trace_context_acceptance - from dual) excluded - on (par_command_deduplication.change_id_hash = excluded.change_id_hash) - when matched then - update set - offset_definite_answer = excluded.offset_definite_answer, - publication_time_definite_answer = excluded.publication_time_definite_answer, - submission_id_definite_answer = excluded.submission_id_definite_answer, - trace_context_definite_answer = excluded.trace_context_definite_answer, - offset_acceptance = (case when ? = '1' then excluded.offset_acceptance else par_command_deduplication.offset_acceptance end), - publication_time_acceptance = (case when ? = '1' then excluded.publication_time_acceptance else par_command_deduplication.publication_time_acceptance end), - submission_id_acceptance = (case when ? = '1' then excluded.submission_id_acceptance else par_command_deduplication.submission_id_acceptance end), - trace_context_acceptance = (case when ? = '1' then excluded.trace_context_acceptance else par_command_deduplication.trace_context_acceptance end) - where par_command_deduplication.offset_definite_answer < excluded.offset_definite_answer - when not matched then - insert ( - change_id_hash, - application_id, command_id, act_as, - offset_definite_answer, publication_time_definite_answer, - submission_id_definite_answer, trace_context_definite_answer, - offset_acceptance, publication_time_acceptance, - submission_id_acceptance, trace_context_acceptance - ) - values ( - excluded.change_id_hash, - excluded.application_id, excluded.command_id, excluded.act_as, - excluded.offset_definite_answer, excluded.publication_time_definite_answer, - excluded.submission_id_definite_answer, excluded.trace_context_definite_answer, - excluded.offset_acceptance, excluded.publication_time_acceptance, - excluded.submission_id_acceptance, excluded.trace_context_acceptance - ) - """ case _: DbStorage.Profile.H2 => """ merge into par_command_deduplication using @@ -210,7 +163,6 @@ class DbCommandDeduplicationStore( pp >> acceptance.flatMap(_.serializableSubmissionId) pp >> acceptance.map(accept => SerializableTraceContext(accept.traceContext)) - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) def setAcceptFlag(): Unit = { val acceptedFlag = if (accepted) "1" else "0" pp >> acceptedFlag @@ -347,7 +299,7 @@ class DbCommandDeduplicationStore( pruning_offset = (case when par_command_deduplication_pruning.pruning_offset < excluded.pruning_offset then excluded.pruning_offset else par_command_deduplication_pruning.pruning_offset end), publication_time = (case when par_command_deduplication_pruning.publication_time < excluded.publication_time then excluded.publication_time else par_command_deduplication_pruning.publication_time end) """ - case _: DbStorage.Profile.Oracle | _: DbStorage.Profile.H2 => + case _: DbStorage.Profile.H2 => sqlu""" merge into par_command_deduplication_pruning using dual on (client = 0) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala index 5772496771b7..0c62389b29ae 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbContractStore.scala @@ -9,7 +9,6 @@ import cats.syntax.traverse.* import com.daml.nameof.NameOf.functionFullName import com.daml.nonempty.NonEmpty import com.digitalasset.canton.config.CantonRequireTypes.String2066 -import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.config.{BatchAggregatorConfig, CacheConfig, ProcessingTimeout} import com.digitalasset.canton.crypto.Salt import com.digitalasset.canton.data.CantonTimestamp @@ -27,7 +26,7 @@ import com.digitalasset.canton.tracing.{TraceContext, Traced} import com.digitalasset.canton.util.EitherUtil.RichEitherIterable import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.Thereafter.syntax.* -import com.digitalasset.canton.util.{BatchAggregator, ErrorUtil, MonadUtil} +import com.digitalasset.canton.util.{BatchAggregator, ErrorUtil} import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{LfPartyId, RequestCounter, checked} import com.github.blemale.scaffeine.AsyncCache @@ -39,9 +38,8 @@ import scala.util.{Failure, Success, Try} class DbContractStore( override protected val storage: DbStorage, - domainIdIndexed: IndexedDomain, + indexedDomain: IndexedDomain, protocolVersion: ProtocolVersion, - maxContractIdSqlInListSize: PositiveInt, cacheConfig: CacheConfig, dbQueryBatcherConfig: BatchAggregatorConfig, insertBatchAggregatorConfig: BatchAggregatorConfig, @@ -56,7 +54,6 @@ class DbContractStore( import storage.converters.* private val profile = storage.profile - private val domainId = domainIdIndexed.index override protected[store] def logger: TracedLogger = super.logger @@ -118,34 +115,31 @@ class DbContractStore( sql"""select contract_id, instance, metadata, ledger_create_time, request_counter, creating_transaction_id, contract_salt from par_contracts""" - private def lookupQueries( + private def lookupQuery( ids: NonEmpty[Seq[LfContractId]] - ): immutable.Iterable[DbAction.ReadOnly[Seq[Option[StoredContract]]]] = { + ): DbAction.ReadOnly[Seq[Option[StoredContract]]] = { import DbStorage.Implicits.BuilderChain.* - DbStorage.toInClauses("contract_id", ids, maxContractIdSqlInListSize).map { - case (idGroup, inClause) => - (contractsBaseQuery ++ sql" where domain_id = $domainId and " ++ inClause) - .as[StoredContract] - .map { storedContracts => - val foundContracts = - storedContracts - .map(storedContract => (storedContract.contractId, storedContract)) - .toMap - idGroup.map(foundContracts.get) - } - } + val inClause = DbStorage.toInClause("contract_id", ids) + (contractsBaseQuery ++ sql" where domain_idx = $indexedDomain and " ++ inClause) + .as[StoredContract] + .map { storedContracts => + val foundContracts = storedContracts + .map(storedContract => (storedContract.contractId, storedContract)) + .toMap + ids.map(foundContracts.get) + } } - private def bulkLookupQueries( + private def bulkLookupQuery( ids: NonEmpty[Seq[LfContractId]] - ): immutable.Iterable[DbAction.ReadOnly[immutable.Iterable[StoredContract]]] = - DbStorage.toInClauses_("contract_id", ids, maxContractIdSqlInListSize).map { inClause => - import DbStorage.Implicits.BuilderChain.* - val query = - contractsBaseQuery ++ sql" where domain_id = $domainId and " ++ inClause - query.as[StoredContract] - } + ): DbAction.ReadOnly[immutable.Iterable[StoredContract]] = { + val inClause = DbStorage.toInClause("contract_id", ids) + import DbStorage.Implicits.BuilderChain.* + val query = + contractsBaseQuery ++ sql" where domain_idx = $indexedDomain and " ++ inClause + query.as[StoredContract] + } def lookup( id: LfContractId @@ -159,7 +153,8 @@ class DbContractStore( .from(ids) .map(ids => EitherT(lookupManyUncachedInternal(ids).map(ids.toList.zip(_).traverse { - case (id, contract) => contract.toRight(id) + case (id, contract) => + contract.toRight(id) })) ) .getOrElse(EitherT.rightT(List.empty)) @@ -167,10 +162,7 @@ class DbContractStore( private def lookupManyUncachedInternal( ids: NonEmpty[Seq[LfContractId]] )(implicit traceContext: TraceContext) = - storage.sequentialQueryAndCombine(lookupQueries(ids), functionFullName)( - traceContext, - closeContext, - ) + storage.query(lookupQuery(ids), functionFullName) override def find( filterId: Option[String], @@ -183,7 +175,6 @@ class DbContractStore( // If filter is set returns a conjunctive (`and` prepended) constraint on attribute `name`. // Otherwise empty sql action. - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) def createConjunctiveFilter(name: String, filter: Option[String]): SQLActionBuilderChain = filter .map { f => @@ -196,7 +187,7 @@ class DbContractStore( .getOrElse(sql" ") val where = sql" where " - val domainConstraint = sql" domain_id = $domainId " + val domainConstraint = sql" domain_idx = $indexedDomain " val pkgFilter = createConjunctiveFilter("package_id", filterPackage) val templateFilter = createConjunctiveFilter("template_id", filterTemplate) val coidFilter = createConjunctiveFilter("contract_id", filterId) @@ -267,7 +258,7 @@ class DbContractStore( val packageId = template.packageId val templateId = checked(String2066.tryCreate(template.qualifiedName.toString)) - pp >> domainId + pp >> indexedDomain pp >> contractId pp >> metadata pp >> ledgerCreateTime.ts @@ -281,16 +272,14 @@ class DbContractStore( // As we assume that the contract data has previously been authenticated against the contract id, // we only update those fields that are not covered by the authentication. - // Note that the instance payload cannot be updated under Oracle as updating a previously set field is problematic for Oracle when it exceeds 32KB - // (https://support.oracle.com/knowledge/Middleware/2773919_1.html). val query = profile match { case _: DbStorage.Profile.Postgres => """insert into par_contracts as c ( - domain_id, contract_id, metadata, + domain_idx, contract_id, metadata, ledger_create_time, request_counter, creating_transaction_id, package_id, template_id, contract_salt, instance) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - on conflict(domain_id, contract_id) do update + on conflict(domain_idx, contract_id) do update set request_counter = excluded.request_counter, creating_transaction_id = excluded.creating_transaction_id @@ -298,7 +287,7 @@ class DbContractStore( (c.creating_transaction_id is not null and excluded.creating_transaction_id is not null and c.request_counter < excluded.request_counter)""" case _: DbStorage.Profile.H2 => """merge into par_contracts c - using (select cast(? as integer) domain_id, + using (select cast(? as integer) domain_idx, cast(? as varchar(300)) contract_id, cast(? as binary large object) metadata, cast(? as varchar(300)) ledger_create_time, @@ -309,7 +298,7 @@ class DbContractStore( cast(? as binary large object) contract_salt, cast(? as binary large object) instance from dual) as input - on (c.domain_id = input.domain_id and c.contract_id = input.contract_id) + on (c.domain_idx = input.domain_idx and c.contract_id = input.contract_id) when matched and ( (c.creating_transaction_id is null and (input.creating_transaction_id is not null or c.request_counter < input.request_counter)) or (c.creating_transaction_id is not null and input.creating_transaction_id is not null and c.request_counter < input.request_counter) @@ -318,34 +307,9 @@ class DbContractStore( request_counter = input.request_counter, creating_transaction_id = input.creating_transaction_id when not matched then - insert (domain_id, contract_id, instance, metadata, ledger_create_time, - request_counter, creating_transaction_id, package_id, template_id, contract_salt) - values (input.domain_id, input.contract_id, input.instance, input.metadata, input.ledger_create_time, - input.request_counter, input.creating_transaction_id, input.package_id, input.template_id, input.contract_salt)""" - case _: DbStorage.Profile.Oracle => - """merge into par_contracts c - using (select ? domain_id, - ? contract_id, - ? metadata, - ? ledger_create_time, - ? request_counter, - ? creating_transaction_id, - ? package_id, - ? template_id, - ? contract_salt - from dual) input - on (c.domain_id = input.domain_id and c.contract_id = input.contract_id) - when matched then - update set - request_counter = input.request_counter, - creating_transaction_id = input.creating_transaction_id - where - (c.creating_transaction_id is null and (input.creating_transaction_id is not null or c.request_counter < input.request_counter)) or - (c.creating_transaction_id is not null and input.creating_transaction_id is not null and c.request_counter < input.request_counter) - when not matched then - insert (domain_id, contract_id, instance, metadata, ledger_create_time, + insert (domain_idx, contract_id, instance, metadata, ledger_create_time, request_counter, creating_transaction_id, package_id, template_id, contract_salt) - values (input.domain_id, input.contract_id, ?, input.metadata, input.ledger_create_time, + values (input.domain_idx, input.contract_id, input.instance, input.metadata, input.ledger_create_time, input.request_counter, input.creating_transaction_id, input.package_id, input.template_id, input.contract_salt)""" } DbStorage.bulkOperation(query, items.map(_.value), profile)(setParams) @@ -365,8 +329,8 @@ class DbContractStore( ErrorUtil.internalErrorTry(new IllegalStateException(message)) override protected type CheckData = StoredContract - // the primary key for the contracts table is (domainId, contractId) - // since the store is handling insertion, deletion and lookup for a specific domainId, the identifier is + // the primary key for the contracts table is: domain (index), contractId + // since the store is handling insertion, deletion and lookup for a specific domain, the identifier is // sufficient to be the contractId override protected type ItemIdentifier = LfContractId override protected def itemIdentifier(item: StoredContract): ItemIdentifier = item.contractId @@ -374,8 +338,8 @@ class DbContractStore( override protected def checkQuery(itemsToCheck: NonEmpty[Seq[ItemIdentifier]])(implicit batchTraceContext: TraceContext - ): immutable.Iterable[DbAction.ReadOnly[immutable.Iterable[CheckData]]] = - bulkLookupQueries(itemsToCheck) + ): DbAction.ReadOnly[immutable.Iterable[CheckData]] = + bulkLookupQuery(itemsToCheck) override protected def analyzeFoundData( item: StoredContract, @@ -439,7 +403,7 @@ class DbContractStore( .flatMap { _ => EitherT.right[UnknownContract]( storage.update_( - sqlu"delete from par_contracts where contract_id = $id and domain_id = $domainId", + sqlu"delete from par_contracts where contract_id = $id and domain_idx = $indexedDomain", functionFullName, ) ) @@ -451,37 +415,25 @@ class DbContractStore( contractIds: Iterable[LfContractId] )(implicit traceContext: TraceContext): Future[Unit] = { import DbStorage.Implicits.BuilderChain.* - MonadUtil - .batchedSequentialTraverse_( - parallelism = PositiveInt.two * storage.threadsAvailableForWriting, - chunkSize = maxContractIdSqlInListSize, - )(contractIds.toSeq) { cids => - val inClause = sql"contract_id in (" ++ - cids - .map(value => sql"$value") - .intercalate(sql", ") ++ sql")" - storage.update_( - (sql"""delete from par_contracts where domain_id = $domainId and """ ++ inClause).asUpdate, - functionFullName, - ) - } - .thereafter(_ => cache.synchronous().invalidateAll(contractIds)) + NonEmpty.from(contractIds.toSeq) match { + case None => Future.unit + case Some(cids) => + val inClause = DbStorage.toInClause("contract_id", cids) + storage + .update_( + (sql"""delete from par_contracts where domain_idx = $indexedDomain and """ ++ inClause).asUpdate, + functionFullName, + ) + .thereafter(_ => cache.synchronous().invalidateAll(contractIds)) + } } override def deleteDivulged( upTo: RequestCounter )(implicit traceContext: TraceContext): Future[Unit] = { - val query = profile match { - case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 => - sqlu"""delete from par_contracts - where domain_id = $domainId and request_counter <= $upTo and creating_transaction_id is null""" - case _: DbStorage.Profile.Oracle => - // Here we use exactly the same expression as in idx_contracts_request_counter - // to make sure the index is used. - sqlu"""delete from par_contracts - where (case when creating_transaction_id is null then domain_id end) = $domainId and - (case when creating_transaction_id is null then request_counter end) <= $upTo""" - } + val query = + sqlu"""delete from par_contracts + where domain_idx = $indexedDomain and request_counter <= $upTo and creating_transaction_id is null""" storage.update_(query, functionFullName) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStore.scala index b861f10ec0ba..1f9dfdcb871e 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStore.scala @@ -14,7 +14,6 @@ import com.digitalasset.canton.config.CantonRequireTypes.LengthLimitedString.{ } import com.digitalasset.canton.config.CantonRequireTypes.{LengthLimitedString, String255} import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric import com.digitalasset.canton.crypto.Hash import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, Lifecycle} @@ -35,7 +34,6 @@ import com.digitalasset.daml.lf.data.Ref.PackageId import scala.concurrent.{ExecutionContext, Future} class DbDamlPackageStore( - maxContractIdSqlInListSize: PositiveNumeric[Int], override protected val storage: DbStorage, override protected val timeouts: ProcessingTimeout, futureSupervisor: FutureSupervisor, @@ -91,23 +89,6 @@ class DbDamlPackageStore( | when matched and ? then | update set | source_description = excluded.source_description""".stripMargin - case _: DbStorage.Profile.Oracle => - """merge /*+ INDEX ( par_daml_packages (package_id) ) */ - | into par_daml_packages - | using ( - | select - | ? as package_id, - | ? as source_description - | from dual - | ) excluded - | on (par_daml_packages.package_id = excluded.package_id) - | when not matched then - | insert (package_id, data, source_description, uploaded_at, package_size) - | values (excluded.package_id, ?, excluded.source_description, ?, ?) - | when matched then - | update set - | source_description = excluded.source_description - | where ? = 1""".stripMargin // Strangely (or not), it looks like Oracle does not have a Boolean type... case _: DbStorage.Profile.Postgres => """insert | into par_daml_packages (package_id, source_description, data, uploaded_at, package_size) @@ -129,26 +110,10 @@ class DbDamlPackageStore( } val insertToDarPackages = { - val sql = storage.profile match { - case _: DbStorage.Profile.Oracle => - """merge /*+ INDEX ( dar_packages (dar_hash_hex package_id) ) */ - | into par_dar_packages - | using ( - | select - | ? as dar_hash_hex, - | ? package_id - | from dual - | ) excluded - | on (dar_packages.dar_hash_hex = excluded.dar_hash_hex and dar_packages.package_id = excluded.package_id) - | when not matched then - | insert (dar_hash_hex, package_id) - | values (excluded.dar_hash_hex, excluded.package_id)""".stripMargin - case _ => - """insert into par_dar_packages (dar_hash_hex, package_id) + val sql = """insert into par_dar_packages (dar_hash_hex, package_id) | values (?, ?) | on conflict do | nothing""".stripMargin - } DbStorage.bulkOperation_(sql, pkgs, storage.profile) { pp => pkg => pp >> (dar.hash.toLengthLimitedHexString: LengthLimitedString) @@ -251,29 +216,21 @@ class DbDamlPackageStore( val limitClause = limit.map(l => sql"#${storage.limit(l)}").getOrElse(sql"") - val queryActions = DbStorage - .toInClauses_( - field = "package_id", - values = nonEmptyPackages, - maxContractIdSqlInListSize, - ) - .map { inStatement => - (sql""" - select package_id - from par_dar_packages remove_candidates - where - """ ++ inStatement ++ - sql""" - and not exists ( - select package_id - from par_dar_packages other_dars - where - remove_candidates.package_id = other_dars.package_id - and dar_hash_hex != ${darHash.toLengthLimitedHexString} - )""" ++ limitClause).as[LfPackageId] - } + val query = { + val inClause = DbStorage.toInClause(field = "package_id", values = nonEmptyPackages) + (sql"""select package_id + from par_dar_packages remove_candidates + where """ ++ inClause ++ + sql""" and not exists ( + select package_id + from par_dar_packages other_dars + where + remove_candidates.package_id = other_dars.package_id + and dar_hash_hex != ${darHash.toLengthLimitedHexString} + )""" ++ limitClause).as[LfPackageId] + } - storage.sequentialQueryAndCombine(queryActions, functionFullName).map(_.toSeq) + storage.query(query, functionFullName).map(_.toSeq) } override def anyPackagePreventsDarRemoval(packages: Seq[PackageId], removeDar: DarDescriptor)( @@ -327,12 +284,6 @@ class DbDamlPackageStore( case _: DbStorage.Profile.Postgres => sqlu"""insert into par_dars (hash_hex, hash, data, name) values (${dar.hash.toLengthLimitedHexString},${dar.hash}, ${dar.data}, ${dar.name}) on conflict (hash_hex) do nothing""" - case _: DbStorage.Profile.Oracle => - sqlu"""insert - /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( par_dars ( hash_hex ) ) */ - into par_dars (hash_hex, hash, data, name) - values (${dar.hash.toLengthLimitedHexString}, ${dar.hash}, ${dar.data}, ${dar.name}) - """ } override def removeDar( diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDomainConnectionConfigStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDomainConnectionConfigStore.scala index 8fe1898825fa..6389154326d2 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDomainConnectionConfigStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDomainConnectionConfigStore.scala @@ -97,18 +97,11 @@ class DbDomainConnectionConfigStore private[store] ( val domainAlias = config.domain - val insertAction: DbAction.WriteOnly[Int] = storage.profile match { - case _: DbStorage.Profile.Oracle => - sqlu"""insert - /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( PAR_DOMAIN_CONNECTION_CONFIGS ( domain_alias ) ) */ - into par_domain_connection_configs(domain_alias, config, status) - values ($domainAlias, $config, $status)""" - case _ => - sqlu"""insert - into par_domain_connection_configs(domain_alias, config, status) - values ($domainAlias, $config, $status) - on conflict do nothing""" - } + val insertAction: DbAction.WriteOnly[Int] = + sqlu"""insert + into par_domain_connection_configs(domain_alias, config, status) + values ($domainAlias, $config, $status) + on conflict do nothing""" for { nrRows <- EitherT.right(storage.update(insertAction, functionFullName)) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDomainParameterStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDomainParameterStore.scala index 5a229368bc21..8b5a63e8dc3c 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDomainParameterStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbDomainParameterStore.scala @@ -35,17 +35,10 @@ class DbDomainParameterStore( )(implicit traceContext: TraceContext): Future[Unit] = { // We do not check equality of the parameters on the serialized format in the DB query because serialization may // be different even though the parameters are the same - val query = storage.profile match { - case _: DbStorage.Profile.Oracle => - sqlu"""insert - /*+ IGNORE_ROW_ON_DUPKEY_INDEX ( par_static_domain_parameters ( domain_id ) ) */ - into par_static_domain_parameters(domain_id, params) - values ($domainId, $newParameters)""" - case _ => - sqlu"""insert into par_static_domain_parameters(domain_id, params) - values ($domainId, $newParameters) - on conflict do nothing""" - } + val query = + sqlu"""insert into par_static_domain_parameters(domain_id, params) + values ($domainId, $newParameters) + on conflict do nothing""" storage.update(query, functionFullName).flatMap { rowCount => if (rowCount == 1) Future.unit diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStore.scala index f79a68b3a606..d54d44e85c41 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStore.scala @@ -7,7 +7,6 @@ import cats.data.{EitherT, OptionT} import cats.syntax.alternative.* import com.daml.nameof.NameOf.functionFullName import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric import com.digitalasset.canton.config.{BatchAggregatorConfig, ProcessingTimeout} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps @@ -33,8 +32,8 @@ import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.{SerializableTraceContext, TraceContext, Traced} import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.TryUtil.ForFailedOps +import com.digitalasset.canton.util.* import com.digitalasset.canton.util.retry.NoExceptionRetryPolicy -import com.digitalasset.canton.util.{BatchAggregator, ErrorUtil, OptionUtil, SingleUseCell, retry} import com.digitalasset.canton.version.ReleaseProtocolVersion import slick.jdbc.{PositionedParameters, SetParameter} @@ -45,7 +44,6 @@ import scala.util.{Failure, Success, Try} class DbInFlightSubmissionStore( override protected val storage: DbStorage, - maxItemsInSqlInClause: PositiveNumeric[Int], registerBatchAggregatorConfig: BatchAggregatorConfig, releaseProtocolVersion: ReleaseProtocolVersion, override protected val timeouts: ProcessingTimeout, @@ -71,8 +69,8 @@ class DbInFlightSubmissionStore( )(implicit traceContext: TraceContext): Future[Seq[InFlightSubmission[UnsequencedSubmission]]] = { val query = sql""" - select change_id_hash, submission_id, submission_domain, message_id, root_hash_hex, sequencing_timeout, tracking_data, trace_context - from par_in_flight_submission where submission_domain = $domainId and sequencing_timeout <= $observedSequencingTime + select change_id_hash, submission_id, submission_domain_id, message_id, root_hash_hex, sequencing_timeout, tracking_data, trace_context + from par_in_flight_submission where submission_domain_id = $domainId and sequencing_timeout <= $observedSequencingTime """.as[InFlightSubmission[UnsequencedSubmission]] storage.query(query, "lookup unsequenced in-flight submission") } @@ -83,8 +81,8 @@ class DbInFlightSubmissionStore( )(implicit traceContext: TraceContext): Future[Seq[InFlightSubmission[SequencedSubmission]]] = { val query = sql""" - select change_id_hash, submission_id, submission_domain, message_id, root_hash_hex, sequencer_counter, sequencing_time, trace_context - from par_in_flight_submission where submission_domain = $domainId and sequencing_time <= $sequencingTimeInclusive + select change_id_hash, submission_id, submission_domain_id, message_id, root_hash_hex, sequencer_counter, sequencing_time, trace_context + from par_in_flight_submission where submission_domain_id = $domainId and sequencing_time <= $sequencingTimeInclusive """.as[InFlightSubmission[SequencedSubmission]] storage.query(query, "lookup sequenced in-flight submission") } @@ -94,8 +92,8 @@ class DbInFlightSubmissionStore( ): Future[Option[InFlightSubmission[SubmissionSequencingInfo]]] = { val query = sql""" - select change_id_hash, submission_id, submission_domain, message_id, root_hash_hex, sequencing_timeout, sequencer_counter, sequencing_time, tracking_data, trace_context - from par_in_flight_submission where submission_domain = $domainId and message_id = $messageId + select change_id_hash, submission_id, submission_domain_id, message_id, root_hash_hex, sequencing_timeout, sequencer_counter, sequencing_time, tracking_data, trace_context + from par_in_flight_submission where submission_domain_id = $domainId and message_id = $messageId #${storage.limit(1)} """.as[InFlightSubmission[SubmissionSequencingInfo]].headOption storage.query(query, "lookup in-flight submission by message id") @@ -107,7 +105,7 @@ class DbInFlightSubmissionStore( val query = sql""" select min(sequencing_time), min(sequencing_timeout) - from par_in_flight_submission where submission_domain = $domainId + from par_in_flight_submission where submission_domain_id = $domainId """.as[(Option[CantonTimestamp], Option[CantonTimestamp])].headOption storage .query(query, "lookup earliest in-flight submission") @@ -135,7 +133,6 @@ class DbInFlightSubmissionStore( val processor = new DbInFlightSubmissionStore.RegisterProcessor( storage, - maxItemsInSqlInClause, releaseProtocolVersion, logger, ) @@ -149,7 +146,7 @@ class DbInFlightSubmissionStore( val updateQuery = sqlu"""update par_in_flight_submission set root_hash_hex = $rootHash - where submission_domain = ${submission.submissionDomain} and change_id_hash = ${submission.changeIdHash} + where submission_domain_id = ${submission.submissionDomain} and change_id_hash = ${submission.changeIdHash} and sequencing_timeout is not null and root_hash_hex is null """ @@ -163,7 +160,7 @@ class DbInFlightSubmissionStore( val updateQuery = """update par_in_flight_submission set sequencing_timeout = null, tracking_data = null, sequencer_counter = ?, sequencing_time = ? - where submission_domain = ? and message_id = ? and sequencing_timeout is not null + where submission_domain_id = ? and message_id = ? and sequencing_timeout is not null """ val batchUpdate = DbStorage.bulkOperation_(updateQuery, submissions.toSeq, storage.profile) { pp => submission => @@ -245,7 +242,7 @@ class DbInFlightSubmissionStore( val (byId, bySequencing) = submissions.toList.map(_.toEither).separate val byIdQuery = - "delete from par_in_flight_submission where submission_domain = ? and message_id = ?" + "delete from par_in_flight_submission where submission_domain_id = ? and message_id = ?" val batchById = DbStorage.bulkOperation_(byIdQuery, byId, storage.profile) { pp => submission => val InFlightByMessageId(domainId, messageId) = submission pp >> domainId @@ -253,7 +250,7 @@ class DbInFlightSubmissionStore( } val bySequencingQuery = - "delete from par_in_flight_submission where submission_domain = ? and sequencing_time = ? and sequencer_counter = ?" + "delete from par_in_flight_submission where submission_domain_id = ? and sequencing_time = ? and sequencer_counter = ?" val batchBySequencing = DbStorage.bulkOperation_(bySequencingQuery, bySequencing, storage.profile) { pp => submission => @@ -284,7 +281,7 @@ class DbInFlightSubmissionStore( sqlu""" update par_in_flight_submission set sequencing_timeout = ${newSequencingInfo.timeout}, tracking_data = ${newSequencingInfo.trackingData} - where change_id_hash = $changeIdHash and submission_domain = $submissionDomain and message_id = $messageId + where change_id_hash = $changeIdHash and submission_domain_id = $submissionDomain and message_id = $messageId and sequencing_timeout >= ${newSequencingInfo.timeout} """ // No need for synchronous commit here because this method is called only from the submission phase @@ -343,7 +340,7 @@ class DbInFlightSubmissionStore( changeIdHash: ChangeIdHash ): DbAction.ReadTransactional[Option[InFlightSubmission[SubmissionSequencingInfo]]] = sql""" - select change_id_hash, submission_id, submission_domain, message_id, root_hash_hex, sequencing_timeout, sequencer_counter, sequencing_time, tracking_data, trace_context + select change_id_hash, submission_id, submission_domain_id, message_id, root_hash_hex, sequencing_timeout, sequencer_counter, sequencing_time, tracking_data, trace_context from par_in_flight_submission where change_id_hash = $changeIdHash """.as[InFlightSubmission[SubmissionSequencingInfo]].headOption } @@ -352,7 +349,6 @@ object DbInFlightSubmissionStore { class RegisterProcessor( override protected val storage: DbStorage, - maxItemsInSqlInClause: PositiveNumeric[Int], releaseProtocolVersion: ReleaseProtocolVersion, override val logger: TracedLogger, )( @@ -449,43 +445,17 @@ object DbInFlightSubmissionStore { )(implicit batchTraceContext: TraceContext ): DBIOAction[Array[Int], NoStream, Effect.All] = { - val insertQuery = storage.profile match { - case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Postgres => - """insert into par_in_flight_submission( - change_id_hash, submission_id, - submission_domain, message_id, root_hash_hex, - sequencing_timeout, sequencer_counter, sequencing_time, tracking_data, - trace_context) - values (?, ?, - ?, ?, ?, - ?, NULL, NULL, ?, - ?) - on conflict do nothing""" - case _: DbStorage.Profile.Oracle => - """merge into par_in_flight_submission - using ( - select - ? change_id_hash, ? submission_id, - ? submission_domain, ? message_id, ? root_hash_hex, - ? sequencing_timeout, ? tracking_data, - ? trace_context - from dual - ) to_insert - on (in_flight_submission.change_id_hash = to_insert.change_id_hash) - when not matched then - insert ( - change_id_hash, submission_id, - submission_domain, message_id, root_hash_hex, - sequencing_timeout, sequencer_counter, sequencing_time, tracking_data, - trace_context - ) values ( - to_insert.change_id_hash, to_insert.submission_id, - to_insert.submission_domain, to_insert.message_id, to_insert.root_hash_hex, - to_insert.sequencing_timeout, NULL, NULL, to_insert.tracking_data, - to_insert.trace_context - ) - """ - } + val insertQuery = + """insert into par_in_flight_submission( + change_id_hash, submission_id, + submission_domain_id, message_id, root_hash_hex, + sequencing_timeout, sequencer_counter, sequencing_time, tracking_data, + trace_context) + values (?, ?, + ?, ?, ?, + ?, NULL, NULL, ?, + ?) + on conflict do nothing""" implicit val loggingContext: ErrorLoggingContext = ErrorLoggingContext.fromTracedLogger(logger) val bulkQuery = DbStorage.bulkOperation( @@ -527,16 +497,16 @@ object DbInFlightSubmissionStore { /** A list of queries for the items that we want to check for */ override protected def checkQuery(submissionsToCheck: NonEmpty[Seq[ChangeIdHash]])(implicit batchTraceContext: TraceContext - ): immutable.Iterable[ReadOnly[immutable.Iterable[CheckData]]] = - DbStorage.toInClauses_("change_id_hash", submissionsToCheck, maxItemsInSqlInClause).map { - inClause => - import DbStorage.Implicits.BuilderChain.* - val query = sql""" - select change_id_hash, submission_id, submission_domain, message_id, root_hash_hex, sequencing_timeout, sequencer_counter, sequencing_time, tracking_data, trace_context - from par_in_flight_submission where """ ++ inClause - query.as[InFlightSubmission[SubmissionSequencingInfo]] - } - + ): ReadOnly[immutable.Iterable[CheckData]] = { + import DbStorage.Implicits.BuilderChain.* + val query = sql""" + select change_id_hash, submission_id, submission_domain_id, message_id, root_hash_hex, sequencing_timeout, sequencer_counter, sequencing_time, tracking_data, trace_context + from par_in_flight_submission where """ ++ DbStorage.toInClause( + "change_id_hash", + submissionsToCheck, + ) + query.as[InFlightSubmission[SubmissionSequencingInfo]] + } override protected def analyzeFoundData( submission: InFlightSubmission[UnsequencedSubmission], foundData: Option[CheckData], diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantPruningSchedulerStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantPruningSchedulerStore.scala index 75a1d8c56289..28e871b17323 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantPruningSchedulerStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantPruningSchedulerStore.scala @@ -50,23 +50,6 @@ final class DbParticipantPruningSchedulerStore( sqlu"""merge into par_pruning_schedules (lock, cron, max_duration, retention, prune_internally_only) values ($singleRowLockValue, ${schedule.cron}, ${schedule.maxDuration}, ${schedule.retention}, ${participantSchedule.pruneInternallyOnly}) """ - case _: Profile.Oracle => - sqlu"""merge into par_pruning_schedules pps - using ( - select ${schedule.cron} cron, - ${schedule.maxDuration} max_duration, - ${schedule.retention} retention, - ${participantSchedule.pruneInternallyOnly} prune_internally_only - from dual - ) excluded - on (pps."LOCK" = 'X') - when matched then - update set pps.cron = excluded.cron, max_duration = excluded.max_duration, - retention = excluded.retention, prune_internally_only = excluded.prune_internally_only - when not matched then - insert (cron, max_duration, retention, prune_internally_only) - values (excluded.cron, excluded.max_duration, excluded.retention, excluded.prune_internally_only) - """ }, functionFullName, ) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantPruningStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantPruningStore.scala index 5d14fccd8068..21dfcd00e167 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantPruningStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantPruningStore.scala @@ -10,7 +10,7 @@ import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.GlobalOffset import com.digitalasset.canton.participant.store.ParticipantPruningStore import com.digitalasset.canton.participant.store.ParticipantPruningStore.ParticipantPruningStatus -import com.digitalasset.canton.resource.DbStorage.Profile.{H2, Oracle, Postgres} +import com.digitalasset.canton.resource.DbStorage.Profile.{H2, Postgres} import com.digitalasset.canton.resource.{DbStorage, DbStore} import com.digitalasset.canton.tracing.TraceContext import slick.jdbc.GetResult @@ -45,14 +45,6 @@ class DbParticipantPruningStore( when not matched then insert (name, started_up_to_inclusive, completed_up_to_inclusive) values ($name, $upToInclusive, null)""" - case _: Oracle => - sqlu"""merge into par_pruning_operation using dual on (name = $name) - when matched then - update set started_up_to_inclusive = $upToInclusive - where started_up_to_inclusive is null or started_up_to_inclusive < $upToInclusive - when not matched then - insert (name, started_up_to_inclusive, completed_up_to_inclusive) - values ($name, $upToInclusive, null)""" } storage.update_(upsertQuery, functionFullName) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantSettingsStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantSettingsStore.scala index bde11d3c2029..99a43cc1aaee 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantSettingsStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbParticipantSettingsStore.scala @@ -79,18 +79,10 @@ class DbParticipantSettingsStore( maxSubmissionRate, maxSubmissionBurstFactor, ) = ResourceLimits.default - val query = storage.profile match { - case _: DbStorage.Profile.Postgres | _: DbStorage.Profile.H2 => - sqlu"""insert into par_settings(client, max_infight_validation_requests, max_submission_rate, max_submission_burst_factor) - values($client, $maxInflightValidationRequests, $maxSubmissionRate, $maxSubmissionBurstFactor) - on conflict do nothing""" - - case _: DbStorage.Profile.Oracle => - sqlu"""merge into par_settings using dual on (1 = 1) - when not matched then - insert(client, max_infight_validation_requests, max_submission_rate, max_submission_burst_factor) - values($client, $maxInflightValidationRequests, $maxSubmissionRate, $maxSubmissionBurstFactor)""" - } + val query = + sqlu"""insert into par_settings(client, max_infight_validation_requests, max_submission_rate, max_submission_burst_factor) + values($client, $maxInflightValidationRequests, $maxSubmissionRate, $maxSubmissionBurstFactor) + on conflict do nothing""" storage.update_(query, functionFullName) case _ => Future.unit @@ -114,7 +106,7 @@ class DbParticipantSettingsStore( sqlu"""insert into par_settings(max_infight_validation_requests, max_submission_rate, max_submission_burst_factor, client) values($maxInflightValidationRequests, $maxSubmissionRate, $maxSubmissionBurstFactor, $client) on conflict(client) do update set max_infight_validation_requests = $maxInflightValidationRequests, max_submission_rate = $maxSubmissionRate, max_submission_burst_factor = $maxSubmissionBurstFactor""" - case _: DbStorage.Profile.Oracle | _: DbStorage.Profile.H2 => + case _: DbStorage.Profile.H2 => sqlu"""merge into par_settings using dual on (1 = 1) when matched then update set max_infight_validation_requests = $maxInflightValidationRequests, max_submission_rate = $maxSubmissionRate, max_submission_burst_factor = $maxSubmissionBurstFactor @@ -144,12 +136,6 @@ class DbParticipantSettingsStore( update set #$columnName = $newValue when not matched then insert (#$columnName, client) values ($newValue, $client)""" - case _: DbStorage.Profile.Oracle => - sqlu"""merge into par_settings using dual on (1 = 1) - when matched then - update set #$columnName = $newValue where #$columnName is null - when not matched then - insert (#$columnName, client) values ($newValue, $client)""" } runQueryAndRefreshCache(query, functionFullName) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStore.scala index a3101919f104..5f0fe4ec41e8 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStore.scala @@ -25,7 +25,10 @@ import com.digitalasset.canton.participant.protocol.reassignment.{ } import com.digitalasset.canton.participant.store.ReassignmentStore import com.digitalasset.canton.participant.store.ReassignmentStore.* -import com.digitalasset.canton.participant.store.db.DbReassignmentStore.RawDeliveredUnassignmentResult +import com.digitalasset.canton.participant.store.db.DbReassignmentStore.{ + DbReassignmentId, + RawDeliveredUnassignmentResult, +} import com.digitalasset.canton.participant.util.TimeOfChange import com.digitalasset.canton.protocol.messages.* import com.digitalasset.canton.protocol.{ @@ -36,28 +39,32 @@ import com.digitalasset.canton.protocol.{ TargetDomainId, TransactionId, } -import com.digitalasset.canton.resource.DbStorage.{DbAction, Profile} +import com.digitalasset.canton.resource.DbStorage.DbAction import com.digitalasset.canton.resource.{DbStorage, DbStore} import com.digitalasset.canton.sequencing.protocol.{NoOpeningErrors, SequencedEvent, SignedContent} import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.store.db.DbDeserializationException +import com.digitalasset.canton.store.{IndexedDomain, IndexedStringStore} import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{Checked, CheckedT, ErrorUtil, MonadUtil, SimpleExecutionQueue} import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.version.Reassignment.{SourceProtocolVersion, TargetProtocolVersion} import com.digitalasset.canton.{LfPartyId, RequestCounter} +import com.google.common.collect.HashBiMap import com.google.protobuf.ByteString import slick.jdbc.TransactionIsolation.Serializable import slick.jdbc.canton.SQLActionBuilder import slick.jdbc.{GetResult, PositionedParameters, SetParameter} import scala.concurrent.{ExecutionContext, Future} +import scala.language.implicitConversions import scala.util.control.NonFatal class DbReassignmentStore( override protected val storage: DbStorage, - domain: TargetDomainId, + indexedTargetDomain: IndexedDomain, // TODO(#21325) - Use target wrapper to enforce an indexed target domain + indexedStringStore: IndexedStringStore, targetDomainProtocolVersion: TargetProtocolVersion, cryptoApi: CryptoPureApi, futureSupervisor: FutureSupervisor, @@ -72,6 +79,44 @@ class DbReassignmentStore( import storage.api.* import storage.converters.* + private implicit def forgetRefinement(id: ReassignmentDomainId): DomainId = id.unwrap + + private def indexedDomainF(domainId: DomainId): Future[IndexedDomain] = + IndexedDomain.indexed(indexedStringStore)(domainId) + + private def indexedDomainET[E](domainId: DomainId): EitherT[Future, E, IndexedDomain] = + EitherT.right[E](indexedDomainF(domainId)) + + private def indexedDomainETUS( + domainId: DomainId + )(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, ReassignmentStoreError, IndexedDomain] = + EitherT.right[ReassignmentStoreError]( + performUnlessClosingF("index-for-source-domain")( + indexedDomainF(domainId) + ) + ) + + private def indexedDomainF( + idx: Int, + attributeName: String, + )(implicit + traceContext: TraceContext + ): Future[IndexedDomain] = + IndexedDomain + .fromDbIndexOT(s"par_reassignments attribute $attributeName", indexedStringStore)(idx) + .value + .flatMap { + case Some(sourceDomainId) => Future.successful(sourceDomainId) + case None => + Future.failed( + new RuntimeException( + s"Unable to find domain ID for domain with index $idx" + ) + ) + } + private def getResultFullUnassignmentTree( sourceDomainProtocolVersion: SourceProtocolVersion ): GetResult[FullUnassignmentTree] = @@ -169,22 +214,18 @@ class DbReassignmentStore( ): EitherT[FutureUnlessShutdown, ReassignmentStoreError, Unit] = { ErrorUtil.requireArgument( - reassignmentData.targetDomain == domain, - s"Domain ${domain.unwrap}: Reassignment store cannot store reassignment for domain ${reassignmentData.targetDomain.unwrap}", + reassignmentData.targetDomain == TargetDomainId(indexedTargetDomain.domainId), + s"Domain ${indexedTargetDomain.domainId}: Reassignment store cannot store reassignment for domain ${reassignmentData.targetDomain.unwrap}", ) - - val reassignmentId: ReassignmentId = reassignmentData.reassignmentId - val newEntry = ReassignmentEntry(reassignmentData, None) - - import DbStorage.Implicits.* - val insert: DBIO[Int] = sqlu""" - insert into par_reassignments(target_domain, origin_domain, unassignment_timestamp, unassignment_request_counter, + def insert(dbReassignmentId: DbReassignmentId): DBIO[Int] = + sqlu""" + insert into par_reassignments(target_domain_idx, source_domain_idx, unassignment_timestamp, unassignment_request_counter, unassignment_request, unassignment_decision_time, contract, creating_transaction_id, unassignment_result, submitter_lf, source_protocol_version, unassignment_global_offset, assignment_global_offset) values ( - $domain, - ${reassignmentId.sourceDomain}, - ${reassignmentId.unassignmentTs}, + $indexedTargetDomain, + ${dbReassignmentId.indexedSourceDomain}, + ${dbReassignmentId.unassignmentTs}, ${reassignmentData.unassignmentRequestCounter}, ${reassignmentData.unassignmentRequest}, ${reassignmentData.unassignmentDecisionTime}, @@ -199,10 +240,10 @@ class DbReassignmentStore( """ def insertExisting( - existingEntry: ReassignmentEntry + existingEntry: ReassignmentEntry, + id: DbReassignmentId, ): Checked[ReassignmentStoreError, ReassignmentAlreadyCompleted, Option[DBIO[Int]]] = { def update(entry: ReassignmentEntry): DBIO[Int] = { - val id = entry.reassignmentData.reassignmentId val data = entry.reassignmentData sqlu""" update par_reassignments @@ -213,37 +254,52 @@ class DbReassignmentStore( source_protocol_version=${data.sourceProtocolVersion}, unassignment_global_offset=${data.unassignmentGlobalOffset}, assignment_global_offset=${data.assignmentGlobalOffset} where - target_domain=$domain and origin_domain=${id.sourceDomain} and unassignment_timestamp=${data.unassignmentTs} + target_domain_idx=$indexedTargetDomain and source_domain_idx=${id.indexedSourceDomain} and unassignment_timestamp=${data.unassignmentTs} """ } + + val newEntry = ReassignmentEntry(reassignmentData, None) existingEntry.mergeWith(newEntry).map(entry => Some(update(entry))) } - insertDependentDeprecated( - entryExists(reassignmentId), - insertExisting, - insert, - dbError => throw dbError, - ) - .map(_ => ()) - .toEitherT + val reassignmentId = reassignmentData.reassignmentId + + for { + indexedSourceDomain <- indexedDomainETUS(reassignmentId.sourceDomain) + dbReassignmentId = DbReassignmentId(indexedSourceDomain, reassignmentId.unassignmentTs) + _ <- insertDependentDeprecated( + dbReassignmentId, + entryExists, + insertExisting, + insert, + dbError => throw dbError, + ) + .map(_ => ()) + .toEitherT + } yield () } override def lookup(reassignmentId: ReassignmentId)(implicit traceContext: TraceContext - ): EitherT[Future, ReassignmentStore.ReassignmentLookupError, ReassignmentData] = - EitherT(storage.query(entryExists(reassignmentId), functionFullName).map { - case None => Left(UnknownReassignmentId(reassignmentId)) - case Some(ReassignmentEntry(_, Some(timeOfCompletion))) => - Left(ReassignmentCompleted(reassignmentId, timeOfCompletion)) - case Some(reassignmentEntry) => Right(reassignmentEntry.reassignmentData) - }) + ): EitherT[Future, ReassignmentStore.ReassignmentLookupError, ReassignmentData] = for { + indexedSourceDomain <- indexedDomainET(reassignmentId.sourceDomain) + dbReassignmentId = DbReassignmentId(indexedSourceDomain, reassignmentId.unassignmentTs) + res <- EitherT( + storage.query(entryExists(dbReassignmentId), functionFullName).map { + case None => Left(UnknownReassignmentId(reassignmentId)) + case Some(ReassignmentEntry(_, Some(timeOfCompletion))) => + Left(ReassignmentCompleted(reassignmentId, timeOfCompletion)) + case Some(reassignmentEntry) => Right(reassignmentEntry.reassignmentData) + } + ) + } yield res - private def entryExists(id: ReassignmentId): DbAction.ReadOnly[Option[ReassignmentEntry]] = sql""" + private def entryExists(id: DbReassignmentId): DbAction.ReadOnly[Option[ReassignmentEntry]] = + sql""" select source_protocol_version, unassignment_timestamp, unassignment_request_counter, unassignment_request, unassignment_decision_time, contract, creating_transaction_id, unassignment_result, unassignment_global_offset, assignment_global_offset, time_of_completion_request_counter, time_of_completion_timestamp - from par_reassignments where target_domain=$domain and origin_domain=${id.sourceDomain} and unassignment_timestamp=${id.unassignmentTs} + from par_reassignments where target_domain_idx=$indexedTargetDomain and source_domain_idx=${id.indexedSourceDomain} and unassignment_timestamp=${id.unassignmentTs} """.as[ReassignmentEntry].headOption override def addUnassignmentResult( @@ -251,41 +307,54 @@ class DbReassignmentStore( )(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, ReassignmentStoreError, Unit] = { - val reassignmentId = unassignmentResult.reassignmentId - - val existsRaw: DbAction.ReadOnly[Option[Option[RawDeliveredUnassignmentResult]]] = sql""" + def exists(id: DbReassignmentId) = { + val existsRaw: DbAction.ReadOnly[Option[Option[RawDeliveredUnassignmentResult]]] = sql""" select unassignment_result, source_protocol_version from par_reassignments where - target_domain=$domain and origin_domain=${reassignmentId.sourceDomain} and unassignment_timestamp=${reassignmentId.unassignmentTs} + target_domain_idx=$indexedTargetDomain and source_domain_idx=${id.indexedSourceDomain} and unassignment_timestamp=${id.unassignmentTs} """.as[Option[RawDeliveredUnassignmentResult]].headOption - val exists = existsRaw.map(_.map(_.map(_.tryCreateDeliveredUnassignmentResul(cryptoApi)))) + existsRaw.map(_.map(_.map(_.tryCreateDeliveredUnassignmentResul(cryptoApi)))) + } - def update(previousResult: Option[DeliveredUnassignmentResult]) = + def update(previousResult: Option[DeliveredUnassignmentResult], id: DbReassignmentId) = previousResult .fold[Checked[ReassignmentStoreError, Nothing, Option[DBIO[Int]]]]( Checked.result(Some(sqlu""" update par_reassignments set unassignment_result=$unassignmentResult - where target_domain=$domain and origin_domain=${reassignmentId.sourceDomain} and unassignment_timestamp=${reassignmentId.unassignmentTs} + where target_domain_idx=$indexedTargetDomain and source_domain_idx=${id.indexedSourceDomain} and unassignment_timestamp=${id.unassignmentTs} """)) )(previous => if (previous == unassignmentResult) Checked.result(None) else Checked.abort( - UnassignmentResultAlreadyExists(reassignmentId, previous, unassignmentResult) + UnassignmentResultAlreadyExists( + unassignmentResult.reassignmentId, + previous, + unassignmentResult, + ) ) ) - updateDependentDeprecated( - exists, - update, - Checked.abort(UnknownReassignmentId(reassignmentId)), - dbError => throw dbError, - ) - .map(_ => ()) - .toEitherT + for { + indexedSourceDomain <- indexedDomainETUS(unassignmentResult.reassignmentId.sourceDomain) + dbReassignmentId = DbReassignmentId( + indexedSourceDomain, + unassignmentResult.reassignmentId.unassignmentTs, + ) + _ <- + updateDependentDeprecated( + dbReassignmentId, + exists, + update, + _ => Checked.abort(UnknownReassignmentId(unassignmentResult.reassignmentId)), + dbError => throw dbError, + ) + .map(_ => ()) + .toEitherT + } yield () } def addReassignmentsOffsets(offsets: Map[ReassignmentId, ReassignmentGlobalOffset])(implicit @@ -309,35 +378,54 @@ class DbReassignmentStore( ): EitherT[FutureUnlessShutdown, ReassignmentStoreError, Unit] = { import DbStorage.Implicits.BuilderChain.* - val reassignmentIdsFilter = offsets - .map { case (reassignmentId, _) => - sql"(origin_domain=${reassignmentId.sourceDomain} and unassignment_timestamp=${reassignmentId.unassignmentTs})" - } - .forgetNE - .intercalate(sql" or ") - .toActionBuilder + def select(ids: Seq[(Int, CantonTimestamp)]) = { + val reassignmentIdsFilter = ids + .map { case (sourceDomainIndex, unassignmentTs) => + sql"(source_domain_idx=$sourceDomainIndex and unassignment_timestamp=$unassignmentTs)" + } + .intercalate(sql" or ") + .toActionBuilder - val select = - sql"""select origin_domain, unassignment_timestamp, unassignment_global_offset, assignment_global_offset + val query = + sql"""select source_domain_idx, unassignment_timestamp, unassignment_global_offset, assignment_global_offset from par_reassignments where - target_domain=$domain and (""" ++ reassignmentIdsFilter ++ sql")" + target_domain_idx=$indexedTargetDomain and (""" ++ reassignmentIdsFilter ++ sql")" + + storage.query( + query.as[(Int, CantonTimestamp, Option[GlobalOffset], Option[GlobalOffset])], + functionFullName, + ) + } val updateQuery = """update par_reassignments set unassignment_global_offset = ?, assignment_global_offset = ? - where target_domain = ? and origin_domain = ? and unassignment_timestamp = ? + where target_domain_idx = ? and source_domain_idx = ? and unassignment_timestamp = ? """ + val deduplicatedSourceDomains = offsets.map(_._1.sourceDomain).toSet + val sourceDomainToIndexBiMap: HashBiMap[SourceDomainId, Int] = + HashBiMap.create[SourceDomainId, Int]() + lazy val task = for { - res <- EitherT.right( - storage.query( - select.as[(ReassignmentId, Option[GlobalOffset], Option[GlobalOffset])], - functionFullName, - ) + _ <- MonadUtil.sequentialTraverse(deduplicatedSourceDomains.forgetNE.toSeq)(sd => + indexedDomainET(sd).map { indexedDomain => + sourceDomainToIndexBiMap.put(sd, indexedDomain.index) + } ) - retrievedItems = res.map { case (reassignmentId, out, in) => - reassignmentId -> (out, in) + selectData = offsets.map { case (reassignmentId, _) => + (sourceDomainToIndexBiMap.get(reassignmentId.sourceDomain), reassignmentId.unassignmentTs) + } + selected <- EitherT.right(select(selectData)) + retrievedItems = selected.map { + case (sourceDomainIndex, unassignmentTs, unassignmentOffset, assignmentOffset) => + val sourceDomainId = sourceDomainToIndexBiMap.inverse().get(sourceDomainIndex) + + ReassignmentId( + sourceDomainId, + unassignmentTs, + ) -> (unassignmentOffset, assignmentOffset, sourceDomainIndex) }.toMap mergedGlobalOffsets <- EitherT.fromEither[Future](offsets.forgetNE.traverse { @@ -345,29 +433,30 @@ class DbReassignmentStore( retrievedItems .get(reassignmentId) .toRight(UnknownReassignmentId(reassignmentId)) - .map { case (offsetOutO, offsetInO) => - ReassignmentGlobalOffset + .map { case (offsetOutO, offsetInO, sourceDomainIndex) => + sourceDomainIndex -> ReassignmentGlobalOffset .create(offsetOutO, offsetInO) .valueOr(err => throw new DbDeserializationException(err)) } - .flatMap( - _.fold[Either[String, ReassignmentGlobalOffset]](Right(newOffsets))( - _.merge(newOffsets) - ) + .flatMap { case (sourceDomainIndex, globalOffsetO) => + globalOffsetO + .fold[Either[String, ReassignmentGlobalOffset]](Right(newOffsets))( + _.merge(newOffsets) + ) .leftMap(ReassignmentGlobalOffsetsMerge(reassignmentId, _)) - .map((reassignmentId, _)) - ) + .map((sourceDomainIndex, reassignmentId.unassignmentTs, _)) + } }) batchUpdate = DbStorage.bulkOperation_(updateQuery, mergedGlobalOffsets, storage.profile) { pp => mergedGlobalOffsetWithId => - val (reassignmentId, mergedGlobalOffset) = mergedGlobalOffsetWithId + val (sourceDomainIndex, unassignmentTs, mergedGlobalOffset) = mergedGlobalOffsetWithId pp >> mergedGlobalOffset.unassignment pp >> mergedGlobalOffset.assignment - pp >> domain.unwrap - pp >> reassignmentId.sourceDomain.unwrap - pp >> reassignmentId.unassignmentTs + pp >> indexedTargetDomain + pp >> sourceDomainIndex + pp >> unassignmentTs } _ <- EitherT.right[ReassignmentStoreError]( @@ -378,47 +467,55 @@ class DbReassignmentStore( sequentialQueue.executeE(task, "addReassignmentsOffsets") } - override def completeReasignment(reassignmentId: ReassignmentId, timeOfCompletion: TimeOfChange)( + override def completeReassignment(reassignmentId: ReassignmentId, timeOfCompletion: TimeOfChange)( implicit traceContext: TraceContext ): CheckedT[Future, Nothing, ReassignmentStoreError, Unit] = { - - val updateSameOrUnset = sqlu""" + def updateSameOrUnset(indexedSourceDomain: IndexedDomain) = + sqlu""" update par_reassignments set time_of_completion_request_counter=${timeOfCompletion.rc}, time_of_completion_timestamp=${timeOfCompletion.timestamp} where - target_domain=$domain and origin_domain=${reassignmentId.sourceDomain} and unassignment_timestamp=${reassignmentId.unassignmentTs} + target_domain_idx=$indexedTargetDomain and source_domain_idx=$indexedSourceDomain and unassignment_timestamp=${reassignmentId.unassignmentTs} and (time_of_completion_request_counter is NULL or (time_of_completion_request_counter = ${timeOfCompletion.rc} and time_of_completion_timestamp = ${timeOfCompletion.timestamp})) """ - val doneE: EitherT[Future, ReassignmentStoreError, Unit] = - EitherT(storage.update(updateSameOrUnset, functionFullName).map { changed => - if (changed > 0) { - if (changed != 1) - logger.error( - s"Reassignment completion query changed $changed lines. It should only change 1." - ) - Right(()) - } else { - if (changed != 0) - logger.error( - s"Reassignment completion query changed $changed lines -- this should not be negative." + val doneE: EitherT[Future, ReassignmentStoreError, Unit] = for { + indexedSourceDomain <- indexedDomainET(reassignmentId.sourceDomain) + _ <- EitherT( + storage.update(updateSameOrUnset(indexedSourceDomain), functionFullName).map { changed => + if (changed > 0) { + if (changed != 1) + logger.error( + s"Reassignment completion query changed $changed lines. It should only change 1." + ) + Right(()) + } else { + if (changed != 0) + logger.error( + s"Reassignment completion query changed $changed lines -- this should not be negative." + ) + Left( + ReassignmentAlreadyCompleted(reassignmentId, timeOfCompletion): ReassignmentStoreError ) - Left(ReassignmentAlreadyCompleted(reassignmentId, timeOfCompletion)) + } } - }) + ) + } yield () CheckedT.fromEitherTNonabort((), doneE) } override def deleteReassignment( reassignmentId: ReassignmentId - )(implicit traceContext: TraceContext): Future[Unit] = - storage.update_( + )(implicit traceContext: TraceContext): Future[Unit] = for { + indexedSourceDomain <- indexedDomainF(reassignmentId.sourceDomain) + _ <- storage.update_( sqlu"""delete from par_reassignments - where target_domain=$domain and origin_domain=${reassignmentId.sourceDomain} and unassignment_timestamp=${reassignmentId.unassignmentTs}""", + where target_domain_idx=$indexedTargetDomain and source_domain_idx=$indexedSourceDomain and unassignment_timestamp=${reassignmentId.unassignmentTs}""", functionFullName, ) + } yield () override def deleteCompletionsSince( criterionInclusive: RequestCounter @@ -426,18 +523,17 @@ class DbReassignmentStore( val query = sqlu""" update par_reassignments set time_of_completion_request_counter=null, time_of_completion_timestamp=null - where target_domain=$domain and time_of_completion_request_counter >= $criterionInclusive + where target_domain_idx=$indexedTargetDomain and time_of_completion_request_counter >= $criterionInclusive """ storage.update_(query, functionFullName) } - private def findPendingBase(domainId: ReassignmentDomainId = domain, onlyNotFinished: Boolean) = { + private def findPendingBase( + onlyNotFinished: Boolean + ) = { import DbStorage.Implicits.BuilderChain.* - val domainFilter = domainId match { - case SourceDomainId(domainId) => sql"origin_domain=$domainId" - case TargetDomainId(domainId) => sql"target_domain=$domainId" - } + val domainFilter = sql"target_domain_idx=$indexedTargetDomain" val notFinishedFilter = if (onlyNotFinished) sql" and time_of_completion_request_counter is null and time_of_completion_timestamp is null" @@ -458,44 +554,50 @@ class DbReassignmentStore( filterTimestamp: Option[CantonTimestamp], filterSubmitter: Option[LfPartyId], limit: Int, - )(implicit traceContext: TraceContext): Future[Seq[ReassignmentData]] = - storage.query( - { - import DbStorage.Implicits.BuilderChain.* - import DbStorage.Implicits.* + )(implicit traceContext: TraceContext): Future[Seq[ReassignmentData]] = { + import DbStorage.Implicits.BuilderChain.* - val sourceFilter = filterSource.fold(sql"")(domain => sql" and origin_domain=$domain") - val timestampFilter = - filterTimestamp.fold(sql"")(ts => sql" and unassignment_timestamp=$ts") - val submitterFilter = - filterSubmitter.fold(sql"")(submitter => sql" and submitter_lf=$submitter") - val limitSql = storage.limitSql(limit) + val timestampFilter = + filterTimestamp.fold(sql"")(ts => sql" and unassignment_timestamp=$ts") + val submitterFilter = + filterSubmitter.fold(sql"")(submitter => sql" and submitter_lf=$submitter") + val limitSql = storage.limitSql(limit) + for { + indexedSourceDomainO <- filterSource.fold(Future.successful(None: Option[IndexedDomain]))( + sd => indexedDomainF(sd).map(Some(_)) + ) + sourceFilter = + indexedSourceDomainO.fold(sql"")(indexedSourceDomain => + sql" and source_domain_idx=$indexedSourceDomain" + ) + res <- storage.query( (findPendingBase(onlyNotFinished = true ) ++ sourceFilter ++ timestampFilter ++ submitterFilter ++ limitSql) - .as[ReassignmentData] - }, - functionFullName, - ) + .as[ReassignmentData], + functionFullName, + ) + } yield res + } override def findAfter( requestAfter: Option[(CantonTimestamp, SourceDomainId)], limit: Int, - )(implicit traceContext: TraceContext): Future[Seq[ReassignmentData]] = - storage.query( + )(implicit traceContext: TraceContext): Future[Seq[ReassignmentData]] = for { + queryData <- requestAfter.fold( + Future.successful(None: Option[(CantonTimestamp, IndexedDomain)]) + ) { case (ts, sd) => + indexedDomainF(sd).map(indexedDomain => Some((ts, indexedDomain))) + } + res <- storage.query( { import DbStorage.Implicits.BuilderChain.* val timestampFilter = - requestAfter.fold(sql"") { case (requestTimestamp, sourceDomain) => - storage.profile match { - case Profile.Oracle(_) => - sql" and (unassignment_timestamp > $requestTimestamp or (unassignment_timestamp = $requestTimestamp and origin_domain > $sourceDomain))" - case _ => - sql" and (unassignment_timestamp, origin_domain) > ($requestTimestamp, $sourceDomain) " - } + queryData.fold(sql"") { case (requestTimestamp, indexedDomain) => + sql" and (unassignment_timestamp, source_domain_idx) > ($requestTimestamp, $indexedDomain) " } - val order = sql" order by unassignment_timestamp, origin_domain " + val order = sql" order by unassignment_timestamp, source_domain_idx " val limitSql = storage.limitSql(limit) (findPendingBase(onlyNotFinished = true) ++ timestampFilter ++ order ++ limitSql) @@ -503,13 +605,17 @@ class DbReassignmentStore( }, functionFullName, ) + } yield res private def findIncomplete( sourceDomain: Option[SourceDomainId], validAt: GlobalOffset, start: Long, - )(implicit traceContext: TraceContext): Future[Seq[ReassignmentData]] = - storage + )(implicit traceContext: TraceContext): Future[Seq[ReassignmentData]] = for { + indexedSourceDomainO <- sourceDomain.fold(Future.successful(None: Option[IndexedDomain]))(sd => + indexedDomainF(sd).map(Some(_)) + ) + res <- storage .query( { import DbStorage.Implicits.BuilderChain.* @@ -521,7 +627,9 @@ class DbReassignmentStore( val incomplete = sql" and (" ++ outCompleted ++ sql" or " ++ inCompleted ++ sql")" val sourceDomainFilter = - sourceDomain.fold(sql"")(sourceDomain => sql" and origin_domain=$sourceDomain") + indexedSourceDomainO.fold(sql"")(indexedSourceDomain => + sql" and source_domain_idx=$indexedSourceDomain" + ) val limitSql = storage.limitSql(numberOfItems = DbReassignmentStore.dbQueryLimit, skipItems = start) @@ -532,6 +640,7 @@ class DbReassignmentStore( }, functionFullName, ) + } yield res /* We cannot do the stakeholders filtering in the DB, so we may need to query the @@ -593,67 +702,87 @@ class DbReassignmentStore( override def findEarliestIncomplete()(implicit traceContext: TraceContext - ): Future[Option[(GlobalOffset, ReassignmentId, TargetDomainId)]] = { - val result = storage - .query( - { - val maxCompletedOffset: SQLActionBuilder = - sql"""select min(coalesce(assignment_global_offset,${GlobalOffset.MaxValue})), + ): Future[Option[(GlobalOffset, ReassignmentId, TargetDomainId)]] = + for { + queryResult <- storage + .query( + { + val maxCompletedOffset: SQLActionBuilder = + sql"""select min(coalesce(assignment_global_offset,${GlobalOffset.MaxValue})), min(coalesce(unassignment_global_offset,${GlobalOffset.MaxValue})), - origin_domain, unassignment_timestamp + source_domain_idx, unassignment_timestamp from par_reassignments - where target_domain=$domain and (unassignment_global_offset is null or assignment_global_offset is null) - group by origin_domain, unassignment_timestamp + where target_domain_idx=$indexedTargetDomain and (unassignment_global_offset is null or assignment_global_offset is null) + group by source_domain_idx, unassignment_timestamp """ - maxCompletedOffset - .as[(Option[GlobalOffset], Option[GlobalOffset], DomainId, CantonTimestamp)] - }, - functionFullName, - ) - - result - .map( - _.toList - .map { case (in, out, source, ts) => - ((in.toList ++ out.toList).minOption, ReassignmentId(SourceDomainId(source), ts)) - } - .foldLeft( - ( - GlobalOffset.MaxValue, - ReassignmentId(SourceDomainId(domain.unwrap), CantonTimestamp.MaxValue), + maxCompletedOffset + .as[(Option[GlobalOffset], Option[GlobalOffset], Int, CantonTimestamp)] + }, + functionFullName, + ) + resultWithSourceDomainId <- MonadUtil.sequentialTraverse(queryResult.toList) { + case (assignmentOffset, unassignmentOffset, domainSourceIndex, unassignmentTs) => + indexedDomainF(domainSourceIndex, "source_domain_idx") + .map(indexedDomain => + ( + assignmentOffset, + unassignmentOffset, + SourceDomainId(indexedDomain.domainId), + unassignmentTs, + ) ) - )((acc: (GlobalOffset, ReassignmentId), n) => - n match { - case (Some(o), tid) => if (acc._1 > o) (o, tid) else acc - case (None, _) => acc - } - ) match { - case (offset, reassignmentId) => - if (offset == GlobalOffset.MaxValue) None else Some((offset, reassignmentId, domain)) + + } + res = resultWithSourceDomainId + .map { case (in, out, sourceDomainId, unassignmentTs) => + ( + (in.toList ++ out.toList).minOption, + ReassignmentId(sourceDomainId, unassignmentTs), + ) } - ) - } + .foldLeft( + ( + GlobalOffset.MaxValue, + ReassignmentId( + SourceDomainId(indexedTargetDomain.domainId), + CantonTimestamp.MaxValue, + ), + ) + )((acc: (GlobalOffset, ReassignmentId), n) => + n match { + case (Some(o), tid) => if (acc._1 > o) (o, tid) else acc + case (None, _) => acc + } + ) match { + case (offset, reassignmentId) => + if (offset == GlobalOffset.MaxValue) None + else Some((offset, reassignmentId, TargetDomainId(indexedTargetDomain.domainId))) + } + } yield res private def insertDependentDeprecated[E, W, A, R]( - exists: DBIO[Option[A]], - insertExisting: A => Checked[E, W, Option[DBIO[R]]], - insertFresh: DBIO[R], + dbReassignmentId: DbReassignmentId, + exists: DbReassignmentId => DBIO[Option[A]], + insertExisting: (A, DbReassignmentId) => Checked[E, W, Option[DBIO[R]]], + insertFresh: DbReassignmentId => DBIO[R], errorHandler: Throwable => E, operationName: String = "insertDependentDeprecated", )(implicit traceContext: TraceContext): CheckedT[FutureUnlessShutdown, E, W, Option[R]] = updateDependentDeprecated( + dbReassignmentId, exists, insertExisting, - Checked.result(Some(insertFresh)), + dbReassignmentId => Checked.result(Some(insertFresh(dbReassignmentId))), errorHandler, operationName, ) private def updateDependentDeprecated[E, W, A, R]( - exists: DBIO[Option[A]], - insertExisting: A => Checked[E, W, Option[DBIO[R]]], - insertNonExisting: Checked[E, W, Option[DBIO[R]]], + dbReassignmentId: DbReassignmentId, + exists: DbReassignmentId => DBIO[Option[A]], + insertExisting: (A, DbReassignmentId) => Checked[E, W, Option[DBIO[R]]], + insertNonExisting: DbReassignmentId => Checked[E, W, Option[DBIO[R]]], errorHandler: Throwable => E, operationName: String = "updateDependentDeprecated", )(implicit traceContext: TraceContext): CheckedT[FutureUnlessShutdown, E, W, Option[R]] = { @@ -661,12 +790,14 @@ class DbReassignmentStore( import storage.api.{DBIO as _, *} val readAndInsert = - exists + exists(dbReassignmentId) .flatMap(existing => - existing.fold(insertNonExisting)(insertExisting(_)).traverse { - case None => DBIO.successful(None): DBIO[Option[R]] - case Some(action) => action.map(Some(_)): DBIO[Option[R]] - } + existing + .fold(insertNonExisting(dbReassignmentId))(insertExisting(_, dbReassignmentId)) + .traverse { + case None => DBIO.successful(None): DBIO[Option[R]] + case Some(action) => action.map(Some(_)): DBIO[Option[R]] + } ) val compoundAction = readAndInsert.transactionally.withTransactionIsolation(Serializable) @@ -679,6 +810,12 @@ class DbReassignmentStore( } object DbReassignmentStore { + + private final case class DbReassignmentId( + indexedSourceDomain: IndexedDomain, + unassignmentTs: CantonTimestamp, + ) + // We tend to use 1000 to limit queries private val dbQueryLimit = 1000 diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStore.scala index 185143871aac..51909e8ebf15 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStore.scala @@ -6,7 +6,7 @@ package com.digitalasset.canton.participant.store.db import cats.data.{EitherT, OptionT} import com.daml.nameof.NameOf.functionFullName import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveNumeric} +import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.config.{BatchAggregatorConfig, ProcessingTimeout} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.{CloseContext, Lifecycle} @@ -34,9 +34,8 @@ import scala.concurrent.{ExecutionContext, Future} import scala.util.{Success, Try} class DbRequestJournalStore( - domainId: IndexedDomain, + indexedDomain: IndexedDomain, override protected val storage: DbStorage, - maxItemsInSqlInClause: PositiveNumeric[Int], insertBatchAggregatorConfig: BatchAggregatorConfig, replaceBatchAggregatorConfig: BatchAggregatorConfig, override protected val timeouts: ProcessingTimeout, @@ -50,7 +49,7 @@ class DbRequestJournalStore( private[store] override val cleanPreheadStore: CursorPreheadStore[RequestCounterDiscriminator] = new DbCursorPreheadStore[RequestCounterDiscriminator]( - domainId, + indexedDomain, storage, cursorTable = "par_head_clean_counters", timeouts, @@ -96,7 +95,7 @@ class DbRequestJournalStore( ): DBIOAction[Array[Int], NoStream, Effect.All] = { def setData(pp: PositionedParameters)(item: RequestData): Unit = { val RequestData(rc, state, requestTimestamp, commitTime, repairContext) = item - pp >> domainId + pp >> indexedDomain pp >> rc pp >> state pp >> requestTimestamp @@ -104,26 +103,12 @@ class DbRequestJournalStore( pp >> repairContext } - storage.profile match { - case _: Profile.Postgres | _: Profile.H2 => - val query = """insert into - par_journal_requests(domain_id, request_counter, request_state_index, request_timestamp, commit_time, repair_context) - values (?, ?, ?, ?, ?, ?) - on conflict do nothing""" - DbStorage.bulkOperation(query, items.map(_.value).toList, storage.profile)(setData) - - case _: Profile.Oracle => - val query = - """merge /*+ INDEX (journal_requests pk_journal_requests) */ - |into par_journal_requests - |using (select ? domain_id, ? request_counter from dual) input - |on (par_journal_requests.request_counter = input.request_counter and - | par_journal_requests.domain_id = input.domain_id) - |when not matched then - | insert (domain_id, request_counter, request_state_index, request_timestamp, commit_time, repair_context) - | values (input.domain_id, input.request_counter, ?, ?, ?, ?)""".stripMargin - DbStorage.bulkOperation(query, items.map(_.value).toList, storage.profile)(setData) - } + val query = + """insert into par_journal_requests(domain_idx, request_counter, request_state_index, request_timestamp, commit_time, repair_context) + values (?, ?, ?, ?, ?, ?) + on conflict do nothing""" + DbStorage.bulkOperation(query, items.map(_.value).toList, storage.profile)(setData) + } private val success: Try[Unit] = Success(()) @@ -136,7 +121,7 @@ class DbRequestJournalStore( override protected def checkQuery(itemsToCheck: NonEmpty[Seq[ItemIdentifier]])(implicit batchTraceContext: TraceContext - ): immutable.Iterable[ReadOnly[immutable.Iterable[CheckData]]] = + ): ReadOnly[immutable.Iterable[CheckData]] = bulkQueryDbio(itemsToCheck) override protected def analyzeFoundData(item: RequestData, foundData: Option[RequestData])( @@ -168,21 +153,24 @@ class DbRequestJournalStore( )(implicit traceContext: TraceContext): OptionT[Future, RequestData] = { val query = sql"""select request_counter, request_state_index, request_timestamp, commit_time, repair_context - from par_journal_requests where request_counter = $rc and domain_id = $domainId""" + from par_journal_requests where request_counter = $rc and domain_idx = $indexedDomain""" .as[RequestData] OptionT(storage.query(query.headOption, functionFullName)) } private def bulkQueryDbio( rcs: NonEmpty[Seq[RequestCounter]] - ): immutable.Iterable[DbAction.ReadOnly[immutable.Iterable[RequestData]]] = - DbStorage.toInClauses_("request_counter", rcs, maxItemsInSqlInClause).map { inClause => - import DbStorage.Implicits.BuilderChain.* - val query = - sql"""select request_counter, request_state_index, request_timestamp, commit_time, repair_context - from par_journal_requests where domain_id = $domainId and """ ++ inClause - query.as[RequestData] - } + ): DbAction.ReadOnly[immutable.Iterable[RequestData]] = { + import DbStorage.Implicits.BuilderChain.* + val query = + sql"""select request_counter, request_state_index, request_timestamp, commit_time, repair_context + from par_journal_requests where domain_idx = $indexedDomain and """ ++ DbStorage + .toInClause( + "request_counter", + rcs, + ) + query.as[RequestData] + } override def firstRequestWithCommitTimeAfter(commitTimeExclusive: CantonTimestamp)(implicit traceContext: TraceContext @@ -198,7 +186,7 @@ class DbRequestJournalStore( with committed_after(request_counter) as ( select request_counter from par_journal_requests - where domain_id = $domainId and commit_time > $commitTimeExclusive) + where domain_idx = $indexedDomain and commit_time > $commitTimeExclusive) select min(request_counter) from committed_after; """.as[Option[RequestCounter]].headOption.map(_.flatten), functionFullName + ".committed_after", @@ -209,17 +197,17 @@ class DbRequestJournalStore( sql""" select request_counter, request_state_index, request_timestamp, commit_time, repair_context from par_journal_requests - where domain_id = $domainId and request_counter = $rc + where domain_idx = $indexedDomain and request_counter = $rc """.as[RequestData].headOption, functionFullName, ) ) } yield requestData - case _: Profile.Oracle | _: Profile.H2 => + case _: Profile.H2 => storage.query( sql""" select request_counter, request_state_index, request_timestamp, commit_time, repair_context - from par_journal_requests where domain_id = $domainId and commit_time > $commitTimeExclusive + from par_journal_requests where domain_idx = $indexedDomain and commit_time > $commitTimeExclusive order by request_counter #${storage.limit(1)} """.as[RequestData].headOption, functionFullName, @@ -267,15 +255,15 @@ class DbRequestJournalStore( batchTraceContext: TraceContext ): DBIOAction[Array[Int], NoStream, Effect.All] = { val updateQuery = - """update /*+ INDEX (journal_requests (request_counter, domain_id)) */ par_journal_requests + """update /*+ INDEX (journal_requests (request_counter, domain_idx)) */ par_journal_requests set request_state_index = ?, commit_time = coalesce (?, commit_time) - where domain_id = ? and request_counter = ? and request_timestamp = ?""" + where domain_idx = ? and request_counter = ? and request_timestamp = ?""" DbStorage.bulkOperation(updateQuery, items.map(_.value).toList, storage.profile) { pp => item => val ReplaceRequest(rc, requestTimestamp, newState, commitTime) = item pp >> newState pp >> commitTime - pp >> domainId + pp >> indexedDomain pp >> rc pp >> requestTimestamp } @@ -292,7 +280,7 @@ class DbRequestJournalStore( override protected def checkQuery(itemsToCheck: NonEmpty[Seq[RequestCounter]])(implicit batchTraceContext: TraceContext - ): immutable.Iterable[ReadOnly[immutable.Iterable[RequestData]]] = bulkQueryDbio(itemsToCheck) + ): ReadOnly[immutable.Iterable[RequestData]] = bulkQueryDbio(itemsToCheck) override protected def analyzeFoundData(item: ReplaceRequest, foundData: Option[RequestData])( implicit traceContext: TraceContext @@ -333,7 +321,7 @@ class DbRequestJournalStore( )(implicit traceContext: TraceContext): Future[Unit] = storage.update_( sqlu""" - delete from par_journal_requests where request_timestamp <= $beforeInclusive and domain_id = $domainId + delete from par_journal_requests where request_timestamp <= $beforeInclusive and domain_idx = $indexedDomain """, functionFullName, ) @@ -348,7 +336,7 @@ class DbRequestJournalStore( val endFilter = end.fold(sql"")(ts => sql" and request_timestamp <= $ts") (sql""" select 1 - from par_journal_requests where domain_id = $domainId and request_timestamp >= $start + from par_journal_requests where domain_idx = $indexedDomain and request_timestamp >= $start """ ++ endFilter).as[Int] }, functionFullName, @@ -360,7 +348,7 @@ class DbRequestJournalStore( )(implicit traceContext: TraceContext): Future[Unit] = { val statement = sqlu""" - delete from par_journal_requests where domain_id = $domainId and request_counter >= $fromInclusive + delete from par_journal_requests where domain_idx = $indexedDomain and request_counter >= $fromInclusive """ storage.update_(statement, functionFullName) } @@ -371,7 +359,7 @@ class DbRequestJournalStore( val statement = sql""" select request_counter, request_state_index, request_timestamp, commit_time, repair_context - from par_journal_requests where domain_id = $domainId and request_counter >= $fromInclusive and repair_context is not null + from par_journal_requests where domain_idx = $indexedDomain and request_counter >= $fromInclusive and repair_context is not null order by request_counter """.as[RequestData] storage.query(statement, functionFullName) @@ -381,7 +369,7 @@ class DbRequestJournalStore( val statement = sql""" select count(*) - from par_journal_requests where domain_id = $domainId and commit_time is null + from par_journal_requests where domain_idx = $indexedDomain and commit_time is null """.as[Int].head storage.query(statement, functionFullName).map(NonNegativeInt.tryCreate) } @@ -395,8 +383,8 @@ class DbRequestJournalStore( sql""" select request_counter from par_journal_requests - where domain_id = $domainId and request_timestamp <= $requestTimestamp - order by (domain_id, request_timestamp) desc + where domain_idx = $indexedDomain and request_timestamp <= $requestTimestamp + order by (domain_idx, request_timestamp) desc #${storage.limit(1)} """.as[RequestCounter].headOption, functionFullName, diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSubmissionTrackerStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSubmissionTrackerStore.scala index 233168827501..66af663a3950 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSubmissionTrackerStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSubmissionTrackerStore.scala @@ -19,7 +19,7 @@ import scala.concurrent.{ExecutionContext, Future} class DbSubmissionTrackerStore( override protected val storage: DbStorage, - override val domainId: IndexedDomain, + override val indexedDomain: IndexedDomain, batchingParametersConfig: PrunableByTimeParameters, override protected val timeouts: ProcessingTimeout, override protected val loggerFactory: NamedLoggerFactory, @@ -43,47 +43,19 @@ class DbSubmissionTrackerStore( val dbRequestId = requestId.unwrap - val insertQuery = storage.profile match { - case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Postgres => - sqlu"""insert into par_fresh_submitted_transaction( - domain_id, + val insertQuery = + sqlu"""insert into par_fresh_submitted_transaction( + domain_idx, root_hash_hex, request_id, max_sequencing_time) - values ($domainId, $rootHash, $dbRequestId, $maxSequencingTime) - on conflict do nothing""" - - case _: DbStorage.Profile.Oracle => - sqlu"""merge into par_fresh_submitted_transaction - using ( - select - $domainId domain_id, - $rootHash root_hash_hex, - $dbRequestId request_id, - $maxSequencingTime max_sequencing_time - from dual - ) to_insert - on (fresh_submitted_transaction.domain_id = to_insert.domain_id - and par_fresh_submitted_transaction.root_hash_hex = to_insert.root_hash_hex) - when not matched then - insert ( - domain_id, - root_hash_hex, - request_id, - max_sequencing_time - ) values ( - to_insert.domain_id, - to_insert.root_hash_hex, - to_insert.request_id, - to_insert.max_sequencing_time - ) - """ - } + values ($indexedDomain, $rootHash, $dbRequestId, $maxSequencingTime) + on conflict do nothing""" val selectQuery = sql"""select count(*) from par_fresh_submitted_transaction - where domain_id=$domainId and root_hash_hex=$rootHash and request_id=$dbRequestId""" + where domain_idx=$indexedDomain and root_hash_hex=$rootHash and request_id=$dbRequestId""" .as[Int] .headOption @@ -104,7 +76,7 @@ class DbSubmissionTrackerStore( )(implicit traceContext: TraceContext): Future[Int] = { val deleteQuery = sqlu"""delete from par_fresh_submitted_transaction - where domain_id = $domainId and max_sequencing_time <= $beforeAndIncluding""" + where domain_idx = $indexedDomain and max_sequencing_time <= $beforeAndIncluding""" storage.queryAndUpdate(deleteQuery, "prune par_fresh_submitted_transaction") } @@ -130,7 +102,7 @@ class DbSubmissionTrackerStore( )(implicit traceContext: TraceContext): Future[Unit] = { val deleteQuery = sqlu"""delete from par_fresh_submitted_transaction - where domain_id = $domainId and request_id >= $including""" + where domain_idx = $indexedDomain and request_id >= $including""" storage.update_(deleteQuery, "cleanup par_fresh_submitted_transaction") } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSyncDomainPersistentState.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSyncDomainPersistentState.scala index 7b2b4960fe18..3ac88a8e7a5c 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSyncDomainPersistentState.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSyncDomainPersistentState.scala @@ -15,7 +15,7 @@ import com.digitalasset.canton.participant.admin.PackageDependencyResolver import com.digitalasset.canton.participant.ledger.api.LedgerApiStore import com.digitalasset.canton.participant.store.{AcsInspection, SyncDomainPersistentState} import com.digitalasset.canton.participant.topology.ParticipantTopologyValidation -import com.digitalasset.canton.protocol.{StaticDomainParameters, TargetDomainId} +import com.digitalasset.canton.protocol.StaticDomainParameters import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.store.db.DbSequencedEventStore import com.digitalasset.canton.store.memory.InMemorySendTrackerStore @@ -38,7 +38,7 @@ import scala.concurrent.ExecutionContext class DbSyncDomainPersistentState( participantId: ParticipantId, - override val domainId: IndexedDomain, + override val indexedDomain: IndexedDomain, val staticDomainParameters: StaticDomainParameters, clock: Clock, storage: DbStorage, @@ -66,9 +66,8 @@ class DbSyncDomainPersistentState( val contractStore: DbContractStore = new DbContractStore( storage, - domainId, + indexedDomain, staticDomainParameters.protocolVersion, - batching.maxItemsInSqlClause, caching.contractStore, dbQueryBatcherConfig = batching.aggregator, insertBatchAggregatorConfig = batching.aggregator, @@ -77,7 +76,8 @@ class DbSyncDomainPersistentState( ) val reassignmentStore: DbReassignmentStore = new DbReassignmentStore( storage, - TargetDomainId(domainId.item), + indexedDomain, + indexedStringStore, TargetProtocolVersion(staticDomainParameters.protocolVersion), pureCryptoApi, futureSupervisor, @@ -88,26 +88,23 @@ class DbSyncDomainPersistentState( val activeContractStore: DbActiveContractStore = new DbActiveContractStore( storage, - domainId, + indexedDomain, enableAdditionalConsistencyChecks, - batching.maxItemsInSqlClause, parameters.stores.journalPruning.toInternal, indexedStringStore, - staticDomainParameters.protocolVersion, timeouts, loggerFactory, ) val sequencedEventStore = new DbSequencedEventStore( storage, - domainId, + indexedDomain, staticDomainParameters.protocolVersion, timeouts, loggerFactory, ) val requestJournalStore: DbRequestJournalStore = new DbRequestJournalStore( - domainId, + indexedDomain, storage, - batching.maxItemsInSqlClause, insertBatchAggregatorConfig = batching.aggregator, replaceBatchAggregatorConfig = batching.aggregator, timeouts, @@ -115,7 +112,7 @@ class DbSyncDomainPersistentState( ) val acsCommitmentStore = new DbAcsCommitmentStore( storage, - domainId, + indexedDomain, staticDomainParameters.protocolVersion, timeouts, futureSupervisor, @@ -124,14 +121,14 @@ class DbSyncDomainPersistentState( ) val parameterStore: DbDomainParameterStore = - new DbDomainParameterStore(domainId.item, storage, timeouts, loggerFactory) + new DbDomainParameterStore(indexedDomain.domainId, storage, timeouts, loggerFactory) // TODO(i5660): Use the db-based send tracker store val sendTrackerStore = new InMemorySendTrackerStore() val submissionTrackerStore = new DbSubmissionTrackerStore( storage, - domainId, + indexedDomain, parameters.stores.journalPruning.toInternal, timeouts, loggerFactory, @@ -140,7 +137,7 @@ class DbSyncDomainPersistentState( override val topologyStore = new DbTopologyStore( storage, - DomainStore(domainId.item), + DomainStore(indexedDomain.domainId), timeouts, loggerFactory, ) @@ -171,7 +168,7 @@ class DbSyncDomainPersistentState( currentlyVettedPackages, nextPackageIds, packageDependencyResolver, - acsInspections = () => Map(domainId.domainId -> acsInspection), + acsInspections = () => Map(indexedDomain.domainId -> acsInspection), forceFlags, ) @@ -184,7 +181,7 @@ class DbSyncDomainPersistentState( checkCannotDisablePartyWithActiveContracts( partyId, forceFlags, - acsInspections = () => Map(domainId.domainId -> acsInspection), + acsInspections = () => Map(indexedDomain.domainId -> acsInspection), ) } @@ -206,5 +203,5 @@ class DbSyncDomainPersistentState( override def isMemory: Boolean = false override def acsInspection: AcsInspection = - new AcsInspection(domainId.domainId, activeContractStore, contractStore, ledgerApiStore) + new AcsInspection(indexedDomain.domainId, activeContractStore, contractStore, ledgerApiStore) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala index daa95356a0dd..fdecd33443cf 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala @@ -44,7 +44,6 @@ import com.digitalasset.canton.store.memory.InMemoryPrunableByTime import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.* -import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{ReassignmentCounter, RequestCounter} import com.digitalasset.daml.lf.data.Ref.PackageId @@ -60,7 +59,6 @@ import scala.concurrent.{ExecutionContext, Future} /** Implements an [[ActiveContractStore!]] in memory. */ class InMemoryActiveContractStore( val indexedStringStore: IndexedStringStore, - protocolVersion: ProtocolVersion, override val loggerFactory: NamedLoggerFactory, )(implicit val ec: ExecutionContext) extends ActiveContractStore diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryInFlightSubmissionStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryInFlightSubmissionStore.scala index c7d2ed902e5e..3477ea6bfc64 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryInFlightSubmissionStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryInFlightSubmissionStore.scala @@ -205,7 +205,7 @@ class InMemoryInFlightSubmissionStore(override protected val loggerFactory: Name ): Future[Option[InFlightSubmission[SubmissionSequencingInfo]]] = Future.successful { inFlight.collectFirst { - case (changeIdHash, inFlight) + case (_changeIdHash, inFlight) if inFlight.submissionDomain == domainId && inFlight.messageId == messageId => inFlight } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryReassignmentStore.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryReassignmentStore.scala index d5c742817dc8..ac8db139d54e 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryReassignmentStore.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryReassignmentStore.scala @@ -110,7 +110,7 @@ class InMemoryReassignmentStore( editReassignmentEntry(reassignmentId, _.addUnassignmentGlobalOffset(newGlobalOffset)) } - override def completeReasignment(reassignmentId: ReassignmentId, timeOfCompletion: TimeOfChange)( + override def completeReassignment(reassignmentId: ReassignmentId, timeOfCompletion: TimeOfChange)( implicit traceContext: TraceContext ): CheckedT[Future, Nothing, ReassignmentStoreError, Unit] = CheckedT(Future.successful { diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySyncDomainPersistentState.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySyncDomainPersistentState.scala index 693294fdef9f..ef1281aef1d1 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySyncDomainPersistentState.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySyncDomainPersistentState.scala @@ -37,7 +37,7 @@ class InMemorySyncDomainPersistentState( participantId: ParticipantId, clock: Clock, crypto: Crypto, - override val domainId: IndexedDomain, + override val indexedDomain: IndexedDomain, val staticDomainParameters: StaticDomainParameters, override val enableAdditionalConsistencyChecks: Boolean, indexedStringStore: IndexedStringStore, @@ -56,11 +56,10 @@ class InMemorySyncDomainPersistentState( val activeContractStore = new InMemoryActiveContractStore( indexedStringStore, - staticDomainParameters.protocolVersion, loggerFactory, ) val reassignmentStore = - new InMemoryReassignmentStore(TargetDomainId(domainId.item), loggerFactory) + new InMemoryReassignmentStore(TargetDomainId(indexedDomain.domainId), loggerFactory) val sequencedEventStore = new InMemorySequencedEventStore(loggerFactory) val requestJournalStore = new InMemoryRequestJournalStore(loggerFactory) val acsCommitmentStore = @@ -71,7 +70,7 @@ class InMemorySyncDomainPersistentState( override val topologyStore = new InMemoryTopologyStore( - DomainStore(domainId.item), + DomainStore(indexedDomain.domainId), loggerFactory, timeouts, ) @@ -101,7 +100,7 @@ class InMemorySyncDomainPersistentState( currentlyVettedPackages, nextPackageIds, packageDependencyResolver, - acsInspections = () => Map(domainId.domainId -> acsInspection), + acsInspections = () => Map(indexedDomain.domainId -> acsInspection), forceFlags, ) override def checkCannotDisablePartyWithActiveContracts( @@ -113,7 +112,7 @@ class InMemorySyncDomainPersistentState( checkCannotDisablePartyWithActiveContracts( partyId, forceFlags, - acsInspections = () => Map(domainId.domainId -> acsInspection), + acsInspections = () => Map(indexedDomain.domainId -> acsInspection), ) } @@ -122,5 +121,5 @@ class InMemorySyncDomainPersistentState( override def close(): Unit = () override def acsInspection: AcsInspection = - new AcsInspection(domainId.domainId, activeContractStore, contractStore, ledgerApiStore) + new AcsInspection(indexedDomain.domainId, activeContractStore, contractStore, ledgerApiStore) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/ReassignmentCache.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/ReassignmentCache.scala index 43b8d73461fb..bd49495975f7 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/ReassignmentCache.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/ReassignmentCache.scala @@ -65,7 +65,7 @@ class ReassignmentCache( PendingReassignmentCompletion(timeOfCompletion)(), ) match { case None => - reassignmentStore.completeReasignment(reassignmentId, timeOfCompletion).value.map { + reassignmentStore.completeReassignment(reassignmentId, timeOfCompletion).value.map { result => logger .trace( @@ -114,7 +114,7 @@ class ReassignmentCache( _ = logger.trace( s"Request ${timeOfCompletion.rc}: Overwriting the reassignment completion of the later request ${previousTimeOfCompletion.rc}" ) - result <- reassignmentStore.completeReasignment(reassignmentId, timeOfCompletion).value + result <- reassignmentStore.completeReassignment(reassignmentId, timeOfCompletion).value } yield result } } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala index 0dae6a010bb9..424d7e89a273 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala @@ -19,6 +19,7 @@ import com.digitalasset.canton.config.{ProcessingTimeout, TestingConfigInternal} import com.digitalasset.canton.crypto.{CryptoPureApi, SyncCryptoApiProvider} import com.digitalasset.canton.data.{ CantonTimestamp, + Offset, ProcessedDisclosedContract, ReassignmentSubmitterMetadata, } @@ -43,6 +44,7 @@ import com.digitalasset.canton.participant.admin.inspection.{ SyncStateInspection, } import com.digitalasset.canton.participant.admin.repair.RepairService +import com.digitalasset.canton.participant.admin.repair.RepairService.DomainLookup import com.digitalasset.canton.participant.domain.* import com.digitalasset.canton.participant.ledger.api.LedgerApiIndexer import com.digitalasset.canton.participant.metrics.ParticipantMetrics @@ -326,12 +328,22 @@ class CantonSyncService( packageService.value.packageDependencyResolver, repairServiceDAMLe, ledgerApiIndexer.asEval(TraceContext.empty), - syncDomainPersistentStateManager, aliasManager, parameters, Storage.threadsAvailableForWriting(storage), - connectedDomainsLookup.isConnected, - () => connectedDomainsMap.nonEmpty, + new DomainLookup { + override def isConnected(domainId: DomainId): Boolean = + connectedDomainsLookup.isConnected(domainId) + + override def isConnectedToAnyDomain: Boolean = + connectedDomainsMap.nonEmpty + + override def persistentStateFor(domainId: DomainId): Option[SyncDomainPersistentState] = + syncDomainPersistentStateManager.get(domainId) + + override def topologyFactoryFor(domainId: DomainId): Option[TopologyComponentFactory] = + syncDomainPersistentStateManager.topologyFactoryFor(domainId) + }, // Share the sync service queue with the repair service, so that repair operations cannot run concurrently with // domain connections. connectQueue, @@ -426,7 +438,7 @@ class CantonSyncService( ) override def prune( - pruneUpToInclusive: LedgerSyncOffset, + pruneUpToInclusive: Offset, submissionId: LedgerSubmissionId, _pruneAllDivulgedContracts: Boolean, // Canton always prunes divulged contracts ignoring this flag ): CompletionStage[PruningResult] = @@ -443,7 +455,7 @@ class CantonSyncService( }).asJava def pruneInternally( - pruneUpToInclusive: LedgerSyncOffset + pruneUpToInclusive: Offset )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, CantonError, Unit] = (for { pruneUpToMultiDomainGlobalOffset <- EitherT @@ -626,7 +638,7 @@ class CantonSyncService( override def validateDar(dar: ByteString, darName: String)(implicit traceContext: TraceContext ): Future[SubmissionResult] = - withSpan("CantonSyncService.validateDar") { implicit traceContext => span => + withSpan("CantonSyncService.validateDar") { implicit traceContext => _span => if (!isActive()) { logger.debug(s"Rejecting DAR validation request on passive replica.") Future.successful(SyncServiceError.Synchronous.PassiveNode) @@ -1740,9 +1752,9 @@ class CantonSyncService( .map(_.flatten) override def incompleteReassignmentOffsets( - validAt: LedgerSyncOffset, + validAt: Offset, stakeholders: Set[LfPartyId], - )(implicit traceContext: TraceContext): Future[Vector[LedgerSyncOffset]] = + )(implicit traceContext: TraceContext): Future[Vector[Offset]] = UpstreamOffsetConvert .toGlobalOffset(validAt) .fold( diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala index 3f5589b8e976..4775818b378f 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala @@ -793,14 +793,14 @@ class SyncDomain( // Wait to see a timestamp >= now from the domain -- when we see such a timestamp, it means that the participant // has "caught up" on messages from the domain (and so should have seen all the assignments) // TODO(i9009): This assumes the participant and domain clocks are synchronized, which may not be the case - waitForReplay <- FutureUnlessShutdown.outcomeF( + _waitForReplay <- FutureUnlessShutdown.outcomeF( timeTracker .awaitTick(clock.now) .map(_.void) .getOrElse(Future.unit) ) - params <- performUnlessClosingF(functionFullName)( + _params <- performUnlessClosingF(functionFullName)( topologyClient.currentSnapshotApproximation.findDynamicDomainParametersOrDefault( staticDomainParameters.protocolVersion ) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomainMigration.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomainMigration.scala index 7d9e8153fdc9..aa515ca38795 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomainMigration.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomainMigration.scala @@ -277,8 +277,8 @@ class SyncDomainMigration( s"About to prune deactivated sync domain $domainId sequenced event store'" ) - repair.syncDomainPersistentStateManager - .get(domainId) + repair.domainLookup + .persistentStateFor(domainId) .fold(Future.unit)(_.sequencedEventStore.delete(SequencerCounter.Genesis)) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomainPersistentStateManager.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomainPersistentStateManager.scala index 8afcf133797a..e47ea2422199 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomainPersistentStateManager.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomainPersistentStateManager.scala @@ -135,11 +135,11 @@ class SyncDomainPersistentStateManager( } private def createPersistentState( - domainId: IndexedDomain, + indexedDomain: IndexedDomain, staticDomainParameters: StaticDomainParameters, ): SyncDomainPersistentState = - get(domainId.item) - .getOrElse(mkPersistentState(domainId, staticDomainParameters)) + get(indexedDomain.domainId) + .getOrElse(mkPersistentState(indexedDomain, staticDomainParameters)) private def checkAndUpdateDomainParameters( alias: DomainAlias, @@ -170,14 +170,14 @@ class SyncDomainPersistentStateManager( TrieMap[DomainId, SyncDomainPersistentState]() private def put(state: SyncDomainPersistentState): Unit = { - val domainId = state.domainId - val previous = domainStates.putIfAbsent(domainId.item, state) + val domainId = state.indexedDomain.domainId + val previous = domainStates.putIfAbsent(domainId, state) if (previous.isDefined) throw new IllegalArgumentException(s"domain state already exists for $domainId") } private def putIfAbsent(state: SyncDomainPersistentState): Unit = - domainStates.putIfAbsent(state.domainId.item, state).discard + domainStates.putIfAbsent(state.indexedDomain.domainId, state).discard def get(domainId: DomainId): Option[SyncDomainPersistentState] = domainStates.get(domainId) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/TimelyRejectNotifier.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/TimelyRejectNotifier.scala index f2ed9b3cdb6d..133593be8051 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/TimelyRejectNotifier.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/TimelyRejectNotifier.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.participant.sync import cats.Monad -import cats.syntax.either.* import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.lifecycle.UnlessShutdown.{AbortedDueToShutdown, Outcome} @@ -24,7 +23,7 @@ import scala.concurrent.{ExecutionContext, Future} class TimelyRejectNotifier( rejecter: TimelyRejectNotifier.TimelyRejecter, - initialUpperBound: Option[CantonTimestamp], + initialUpperBound: CantonTimestamp, override protected val loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext) extends NamedLogging { @@ -34,9 +33,8 @@ class TimelyRejectNotifier( /** A non-strict upper bound on the timestamps with which the `rejecter` has been notified. * Also stores the internal state of the notification state machine so that they can be updated atomically. */ - private val upperBoundOnNotification - : AtomicReference[(Option[CantonTimestamp], UnlessShutdown[State])] = - new AtomicReference[(Option[CantonTimestamp], UnlessShutdown[State])]( + private val upperBoundOnNotification: AtomicReference[(CantonTimestamp, UnlessShutdown[State])] = + new AtomicReference[(CantonTimestamp, UnlessShutdown[State])]( (initialUpperBound, Outcome(Idle)) ) @@ -82,19 +80,19 @@ class TimelyRejectNotifier( ): Boolean = { // First advance the upper bound, then notify, to make sure that the upper bound really is an upper bound. val (oldBound, oldState) = upperBoundOnNotification.getAndUpdate { case (oldBound, oldState) => - val newBound = if (increaseBound) Some(oldBound.fold(bound)(_ max bound)) else oldBound - val shouldNotify = oldBound.forall(_ < bound) == increaseBound + val newBound = if (increaseBound) oldBound max bound else oldBound + val shouldNotify = (oldBound < bound) == increaseBound val newState = oldState.map { case Idle => if (shouldNotify) Running else Idle case Running => if (shouldNotify) Pending(traceContext) else Running case pending @ Pending(_) => // Update the trace context only if we increase the bound - if (increaseBound && oldBound.forall(_ < bound)) Pending(traceContext) + if (increaseBound && oldBound < bound) Pending(traceContext) else pending } newBound -> newState } - val shouldNotify = oldBound.forall(_ < bound) == increaseBound + val shouldNotify = (oldBound < bound) == increaseBound oldState match { case Outcome(Idle) if shouldNotify => val notifiedF = @@ -151,9 +149,9 @@ class TimelyRejectNotifier( case Outcome(Running) => Right(()) case Outcome(Pending(newTraceContext)) => if (notificationOutcome.isOutcome) { - bound.toLeft(()).leftMap { newBound => - val boundIncreased = newBound > theBound - LoopState(newBound, boundIncreased, newTraceContext) + Left { + val boundIncreased = bound > theBound + LoopState(bound, boundIncreased, newTraceContext) } } else Right(()) case _ => @@ -168,7 +166,7 @@ object TimelyRejectNotifier { def apply( participantNodeEphemeralState: ParticipantNodeEphemeralState, domainId: DomainId, - initialUpperBound: Option[CantonTimestamp], + initialUpperBound: CantonTimestamp, loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext): TimelyRejectNotifier = { class InFlightSubmissionTimelyRejecter extends TimelyRejecter { diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/UpstreamOffsetConvert.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/UpstreamOffsetConvert.scala index 2d22b2bff0fd..0dbf1189a5f1 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/UpstreamOffsetConvert.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/UpstreamOffsetConvert.scala @@ -4,7 +4,8 @@ package com.digitalasset.canton.participant.sync import cats.syntax.either.* -import com.digitalasset.canton.participant.{GlobalOffset, LedgerSyncOffset} +import com.digitalasset.canton.data.Offset +import com.digitalasset.canton.participant.GlobalOffset import com.digitalasset.daml.lf.data.{Bytes as LfBytes, Ref} import com.google.protobuf.ByteString @@ -18,10 +19,10 @@ object UpstreamOffsetConvert { private val versionUpstreamOffsetsAsLong: Byte = 0 private val longBasedByteLength: Int = 9 // One byte for the version plus 8 bytes for Long - def fromGlobalOffset(offset: GlobalOffset): LedgerSyncOffset = + def fromGlobalOffset(offset: GlobalOffset): Offset = fromGlobalOffset(offset.toLong) - def fromGlobalOffset(i: Long) = LedgerSyncOffset( + def fromGlobalOffset(i: Long) = Offset( LfBytes.fromByteString( ByteString.copyFrom( ByteBuffer @@ -33,10 +34,10 @@ object UpstreamOffsetConvert { ) ) - def toGlobalOffset(offset: LedgerSyncOffset): Either[String, GlobalOffset] = { + def toGlobalOffset(offset: Offset): Either[String, GlobalOffset] = { val bytes = offset.bytes.toByteArray if (bytes.lengthCompare(longBasedByteLength) != 0) { - if (offset == LedgerSyncOffset.beforeBegin) { + if (offset == Offset.beforeBegin) { Left(s"Invalid canton offset: before ledger begin is not allowed") } else { Left(s"Invalid canton offset length: expected $longBasedByteLength, actual ${bytes.length}") @@ -55,9 +56,9 @@ object UpstreamOffsetConvert { def toStringOffset(offset: GlobalOffset): String = fromGlobalOffset(offset).toHexString - def tryToLedgerSyncOffset(offset: String): LedgerSyncOffset = + def tryToLedgerSyncOffset(offset: String): Offset = toLedgerSyncOffset(offset).valueOr(err => throw new IllegalArgumentException(err)) - def toLedgerSyncOffset(offset: String): Either[String, LedgerSyncOffset] = - Ref.HexString.fromString(offset).map(LedgerSyncOffset.fromHexString) + def toLedgerSyncOffset(offset: String): Either[String, Offset] = + Ref.HexString.fromString(offset).map(Offset.fromHexString) } diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PackageOps.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PackageOps.scala index 9b08ec40d242..ef5eb794f0e9 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PackageOps.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/PackageOps.scala @@ -91,7 +91,7 @@ class PackageOpsImpl( .packageUsage(packageId, state.contractStore) .map(opt => opt.fold[Either[PackageInUse, Unit]](Right(()))(contractId => - Left(new PackageInUse(packageId, contractId, state.domainId.domainId)) + Left(new PackageInUse(packageId, contractId, state.indexedDomain.domainId)) ) ) ) diff --git a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala index 729302f64da0..36949c29990c 100644 --- a/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala +++ b/sdk/canton/community/participant/src/main/scala/com/digitalasset/canton/participant/topology/ParticipantTopologyDispatcher.scala @@ -23,7 +23,6 @@ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.participant.domain.DomainRegistryError import com.digitalasset.canton.participant.store.SyncDomainPersistentState import com.digitalasset.canton.participant.sync.SyncDomainPersistentStateManager -import com.digitalasset.canton.protocol.StaticDomainParameters import com.digitalasset.canton.sequencing.client.SequencerClient import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.* @@ -147,7 +146,7 @@ class ParticipantTopologyDispatcher( } }) - def trustDomain(domainId: DomainId, parameters: StaticDomainParameters)(implicit + def trustDomain(domainId: DomainId)(implicit traceContext: TraceContext ): EitherT[FutureUnlessShutdown, String, Unit] = { def alreadyTrustedInStore( diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsTest.scala index b66fad10d6b5..9400f3968fd3 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/PackageOpsTest.scala @@ -95,7 +95,7 @@ trait PackageOpsTestBase extends AsyncWordSpec with BaseTest with ArgumentMatche when(activeContractStore.packageUsage(eqTo(pkgId1), eqTo(contractStore))(anyTraceContext)) .thenReturn(Future.successful(Some(contractId))) val indexedDomain = IndexedDomain.tryCreate(domainId1, 1) - when(syncDomainPersistentState.domainId).thenReturn(indexedDomain) + when(syncDomainPersistentState.indexedDomain).thenReturn(indexedDomain) packageOps.checkPackageUnused(pkgId1).leftOrFail("active contract with package id").map { err => diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspectionTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspectionTest.scala index 3b43a84f56b3..cc2c36acca68 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspectionTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspectionTest.scala @@ -225,7 +225,7 @@ object AcsInspectionTest extends MockitoSugar with ArgumentMatchersSugar { when(state.contractStore).thenAnswer(cs) when(state.activeContractStore).thenAnswer(acs) when(state.requestJournalStore).thenAnswer(rjs) - when(state.domainId).thenAnswer(IndexedDomain.tryCreate(FakeDomainId, 1)) + when(state.indexedDomain).thenAnswer(IndexedDomain.tryCreate(FakeDomainId, 1)) when(state.acsInspection).thenAnswer(acsInspection) state diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/MessageDispatcherTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/MessageDispatcherTest.scala index c801dcbdd653..0e2289b49a39 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/MessageDispatcherTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/MessageDispatcherTest.scala @@ -564,7 +564,7 @@ trait MessageDispatcherTest { sut.recordOrderPublisher.scheduleEmptyAcsChangePublication( any[SequencerCounter], any[CantonTimestamp], - ) + )(any[TraceContext]) ) .thenAnswer { checkTickTopologyProcessor(sut, sc, ts).discard @@ -575,7 +575,9 @@ trait MessageDispatcherTest { } handle(sut, deliver) { - verify(sut.recordOrderPublisher).scheduleEmptyAcsChangePublication(isEq(sc), isEq(ts)) + verify(sut.recordOrderPublisher).scheduleEmptyAcsChangePublication(isEq(sc), isEq(ts))( + any[TraceContext] + ) checkTicks(sut, sc, ts) }.futureValue } diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectionHelpers.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectionHelpers.scala index a7199b58bf38..6fc3e795c537 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectionHelpers.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectionHelpers.scala @@ -49,7 +49,7 @@ private[protocol] trait ConflictDetectionHelpers { private lazy val indexedStringStore = new InMemoryIndexedStringStore(minIndex = 1, maxIndex = 2) def mkEmptyAcs(): ActiveContractStore = - new InMemoryActiveContractStore(indexedStringStore, testedProtocolVersion, loggerFactory)( + new InMemoryActiveContractStore(indexedStringStore, loggerFactory)( parallelExecutionContext ) @@ -62,8 +62,10 @@ private[protocol] trait ConflictDetectionHelpers { def mkReassignmentCache( loggerFactory: NamedLoggerFactory, - store: ReassignmentStore = - new InMemoryReassignmentStore(ReassignmentStoreTest.targetDomain, loggerFactory), + store: ReassignmentStore = new InMemoryReassignmentStore( + ReassignmentStoreTest.targetDomainId, + loggerFactory, + ), )( entries: (ReassignmentId, MediatorGroupRecipient)* )(implicit traceContext: TraceContext): Future[ReassignmentCache] = @@ -73,7 +75,7 @@ private[protocol] trait ConflictDetectionHelpers { reassignmentData <- ReassignmentStoreTest.mkReassignmentDataForDomain( reassignmentId, sourceMediator, - targetDomainId = ReassignmentStoreTest.targetDomain, + targetDomainId = ReassignmentStoreTest.targetDomainId, ) result <- store .addReassignment(reassignmentData) diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectorTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectorTest.scala index 4983b6f6447b..8cc3d4c1a09c 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectorTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectorTest.scala @@ -46,7 +46,12 @@ import com.digitalasset.canton.participant.store.memory.{ ReassignmentCacheTest, } import com.digitalasset.canton.participant.util.{StateChange, TimeOfChange} -import com.digitalasset.canton.protocol.{ExampleTransactionFactory, LfContractId, ReassignmentId} +import com.digitalasset.canton.protocol.{ + ExampleTransactionFactory, + LfContractId, + ReassignmentId, + TargetDomainId, +} import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.{Checked, CheckedT} import com.digitalasset.canton.version.HasTestCloseContext @@ -87,7 +92,10 @@ class ConflictDetectorTest private val active = Active(initialReassignmentCounter) private def defaultReassignmentCache: ReassignmentCache = - new ReassignmentCache(new InMemoryReassignmentStore(targetDomain, loggerFactory), loggerFactory) + new ReassignmentCache( + new InMemoryReassignmentStore(targetDomainId, loggerFactory), + loggerFactory, + ) private def mkCd( acs: ActiveContractStore = mkEmptyAcs(), @@ -1624,7 +1632,10 @@ class ConflictDetectorTest "detect conflicts between racing assignments" in { val reassignmentStore = - new InMemoryReassignmentStore(ReassignmentStoreTest.targetDomain, loggerFactory) + new InMemoryReassignmentStore( + TargetDomainId(ReassignmentStoreTest.indexedTargetDomain.domainId), + loggerFactory, + ) val hookedStore = new ReassignmentCacheTest.HookReassignmentStore(reassignmentStore) for { reassignmentCache <- mkReassignmentCache(loggerFactory, hookedStore)( diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/NaiveRequestTrackerTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/NaiveRequestTrackerTest.scala index a3c197d7f495..f792bed6e2e9 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/NaiveRequestTrackerTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/NaiveRequestTrackerTest.scala @@ -31,7 +31,10 @@ class NaiveRequestTrackerTest ): NaiveRequestTracker = { val reassignmentCache = new ReassignmentCache( - new InMemoryReassignmentStore(ReassignmentStoreTest.targetDomain, loggerFactory), + new InMemoryReassignmentStore( + ReassignmentStoreTest.targetDomainId, + loggerFactory, + ), loggerFactory, ) diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala index 76a78ac77062..55a6f156eb19 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala @@ -136,7 +136,7 @@ sealed trait AcsCommitmentProcessorBaseTest contracts: Map[LfContractId, NonEmpty[Seq[Lifespan]]] )(implicit ec: ExecutionContext, traceContext: TraceContext): Future[ActiveContractSnapshot] = { val acs = - new InMemoryActiveContractStore(indexedStringStore, testedProtocolVersion, loggerFactory) + new InMemoryActiveContractStore(indexedStringStore, loggerFactory) contracts.toList .flatMap { case (cid, seq) => seq.forgetNE.map(lifespan => (cid, lifespan)) } .parTraverse_ { case (cid, lifespan) => @@ -339,7 +339,7 @@ sealed trait AcsCommitmentProcessorBaseTest DefaultProcessingTimeouts.testing .copy(storageMaxRetryInterval = NonNegativeDuration.tryFromDuration(1.millisecond)), futureSupervisor, - new InMemoryActiveContractStore(indexedStringStore, testedProtocolVersion, loggerFactory), + new InMemoryActiveContractStore(indexedStringStore, loggerFactory), new InMemoryContractStore(loggerFactory), // no additional consistency checks; if enabled, one needs to populate the above ACS and contract stores // correctly, otherwise the test will fail diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ContractStoreTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ContractStoreTest.scala index c43af8d0a306..d0f65f7f3857 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ContractStoreTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ContractStoreTest.scala @@ -92,7 +92,7 @@ trait ContractStoreTest { this: AsyncWordSpec & BaseTest => } } - "update a created contract with instance size > 32kB (oracle related, see DbContractStore)" in { + "update a created contract with instance size > 32kB" in { val store = mk() val manySignatories = 1 diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/DamlLfSerializersTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/DamlLfSerializersTest.scala deleted file mode 100644 index dd719e3609e3..000000000000 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/DamlLfSerializersTest.scala +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.store - -import com.digitalasset.canton.protocol.WellFormedTransaction.WithSuffixes -import com.digitalasset.canton.protocol.{ExampleTransactionFactory, WellFormedTransaction} -import com.digitalasset.canton.{BaseTest, HasExecutionContext} -import org.scalatest.wordspec.AsyncWordSpec - -class DamlLfSerializersTest extends AsyncWordSpec with BaseTest with HasExecutionContext { - - "transaction serialization and deserialization" when { - - val etf = new ExampleTransactionFactory()() - forEvery(etf.standardHappyCases) { transaction => - transaction.toString must { - "have the same value after serialization and deserialization" in { - val wellFormedTransaction = transaction.wellFormedSuffixedTransaction - val proto = valueOrFail( - DamlLfSerializers.serializeTransaction(wellFormedTransaction.unwrap) - )("failed to serialize transaction") - val decodedVersionedTx = DamlLfSerializers.deserializeTransaction(proto).value - val wellFormedDecoded = - WellFormedTransaction.normalizeAndAssert( - decodedVersionedTx, - wellFormedTransaction.metadata, - WithSuffixes, - ) - - wellFormedDecoded shouldEqual wellFormedTransaction - } - } - } - } -} diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ReassignmentStoreTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ReassignmentStoreTest.scala index 4c7bfac8f189..f29826c51756 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ReassignmentStoreTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ReassignmentStoreTest.scala @@ -42,6 +42,7 @@ import com.digitalasset.canton.protocol.{ } import com.digitalasset.canton.sequencing.protocol.* import com.digitalasset.canton.sequencing.traffic.TrafficReceipt +import com.digitalasset.canton.store.IndexedDomain import com.digitalasset.canton.time.TimeProofTestUtil import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex import com.digitalasset.canton.topology.* @@ -75,7 +76,7 @@ trait ReassignmentStoreTest { private implicit def toGlobalOffset(i: Long): GlobalOffset = GlobalOffset.tryFromLong(i) - protected def reassignmentStore(mk: TargetDomainId => ReassignmentStore): Unit = { + protected def reassignmentStore(mk: IndexedDomain => ReassignmentStore): Unit = { val reassignmentData = config .NonNegativeFiniteDuration(10.seconds) @@ -114,7 +115,7 @@ trait ReassignmentStoreTest { "lookup" should { "find previously stored reassignments" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add failed") lookup10 <- valueOrFail(store.lookup(reassignment10))( @@ -124,7 +125,7 @@ trait ReassignmentStoreTest { } "not invent reassignments" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add failed") lookup10 <- store.lookup(reassignment11).value @@ -137,7 +138,7 @@ trait ReassignmentStoreTest { "find" should { "filter by party" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { aliceReassignment <- mkReassignmentData( reassignment10, @@ -166,7 +167,7 @@ trait ReassignmentStoreTest { } "filter by timestamp" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { reassignment1 <- mkReassignmentData( @@ -190,7 +191,7 @@ trait ReassignmentStoreTest { } } "filter by domain" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { reassignment1 <- mkReassignmentData( ReassignmentId(sourceDomain1, CantonTimestamp.ofEpochMilli(100L)), @@ -208,7 +209,7 @@ trait ReassignmentStoreTest { } } "limit the number of results" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { reassignmentData10 <- mkReassignmentData(reassignment10, mediator1) reassignmentData11 <- mkReassignmentData(reassignment11, mediator1) @@ -228,7 +229,7 @@ trait ReassignmentStoreTest { } } "apply filters conjunctively" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { // Correct timestamp @@ -302,7 +303,7 @@ trait ReassignmentStoreTest { } yield (List(reassignment1, reassignment2, reassignment3, reassignment4)) "order pending reassignments" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { reassignments <- populate(store) @@ -315,7 +316,7 @@ trait ReassignmentStoreTest { } "give pending reassignments after the given timestamp" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { reassignments <- populate(store) @@ -331,13 +332,13 @@ trait ReassignmentStoreTest { } } "give no pending reassignments when empty" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { lookup <- store.findAfter(None, 10) } yield { lookup shouldBe empty } } "limit the results" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { reassignments <- populate(store) @@ -349,14 +350,14 @@ trait ReassignmentStoreTest { } } "exclude completed reassignments" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { reassignments <- populate(store) List(reassignment1, reassignment2, reassignment3, reassignment4) = reassignments: @unchecked checked <- store - .completeReasignment( + .completeReassignment( reassignment2.reassignmentId, TimeOfChange(RequestCounter(3), CantonTimestamp.Epoch.plusSeconds(3)), ) @@ -388,7 +389,7 @@ trait ReassignmentStoreTest { ) "allow batch updates" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val data = (1L until 13).flatMap { i => val tid = reassignmentId.copy(unassignmentTs = CantonTimestamp.ofEpochSecond(i)) @@ -447,7 +448,7 @@ trait ReassignmentStoreTest { } "be idempotent" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add") @@ -497,7 +498,7 @@ trait ReassignmentStoreTest { } "return an error if assignment offset is the same as the unassignment" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add") @@ -518,7 +519,7 @@ trait ReassignmentStoreTest { } "return an error if unassignment offset is the same as the assignment" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add") @@ -539,7 +540,7 @@ trait ReassignmentStoreTest { } "return an error if the new value differs from the old one" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add") @@ -613,7 +614,7 @@ trait ReassignmentStoreTest { incompletes.map(_.toReassignmentData) shouldBe Seq(expectedReassignmentData) "list incomplete reassignments (unassignment done)" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val reassignmentId = reassignmentData.reassignmentId val unassignmentOsset = 10L @@ -684,7 +685,7 @@ trait ReassignmentStoreTest { } "list incomplete reassignments (assignment done)" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val reassignmentId = reassignmentData.reassignmentId val assignmentOffset = 10L @@ -757,7 +758,7 @@ trait ReassignmentStoreTest { } "take stakeholders filter into account" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val alice = ReassignmentStoreTest.alice val bob = ReassignmentStoreTest.bob @@ -812,7 +813,7 @@ trait ReassignmentStoreTest { } "take domain filter into account" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val offset = 10L val reassignment = @@ -832,7 +833,7 @@ trait ReassignmentStoreTest { } "limit the results" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val offset = 42L for { @@ -856,7 +857,7 @@ trait ReassignmentStoreTest { "find first incomplete" should { "find incomplete reassignments (unassignment done)" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val reassignmentId = reassignmentData.reassignmentId val unassignmentOffset = 10L @@ -891,7 +892,7 @@ trait ReassignmentStoreTest { } "find incomplete reassignments (assignment done)" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val reassignmentId = reassignmentData.reassignmentId val unassignmentOffset = 10L @@ -927,7 +928,7 @@ trait ReassignmentStoreTest { } "returns None when reassignment store is empty or each reassignment is either complete or has no offset information" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val reassignmentId1 = reassignmentData.reassignmentId val reassignmentId3 = reassignmentData3.reassignmentId @@ -988,7 +989,7 @@ trait ReassignmentStoreTest { } "works in complex scenario" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val reassignmentId1 = reassignmentData.reassignmentId val reassignmentId2 = reassignmentData2.reassignmentId val reassignmentId3 = reassignmentData3.reassignmentId @@ -1059,7 +1060,7 @@ trait ReassignmentStoreTest { "addReassignment" should { "be idempotent" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)( "first add failed" @@ -1071,7 +1072,7 @@ trait ReassignmentStoreTest { } "detect modified reassignment data" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val modifiedContract = asSerializable( reassignmentData.contract.contractId, @@ -1093,7 +1094,7 @@ trait ReassignmentStoreTest { } "handle unassignment results" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(withUnassignmentResult).failOnShutdown)( "first add failed" @@ -1109,7 +1110,7 @@ trait ReassignmentStoreTest { } "add several reassignments" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { reassignmentData10 <- mkReassignmentData(reassignment10, mediator1) reassignmentData11 <- mkReassignmentData(reassignment11, mediator1) @@ -1134,7 +1135,7 @@ trait ReassignmentStoreTest { } "complain about reassignments for a different domain" in { - val store = mk(TargetDomainId(sourceDomain1.unwrap)) + val store = mk(IndexedDomain.tryCreate(sourceDomain1.unwrap, 2)) loggerFactory.assertInternalError[IllegalArgumentException]( store.addReassignment(reassignmentData), _.getMessage shouldBe "Domain domain1::DOMAIN1: Reassignment store cannot store reassignment for domain target::DOMAIN", @@ -1145,14 +1146,14 @@ trait ReassignmentStoreTest { "addUnassignmentResult" should { "report missing reassignments" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { missing <- store.addUnassignmentResult(unassignmentResult).failOnShutdown.value } yield missing shouldBe Left(UnknownReassignmentId(reassignment10)) } "add the result" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add failed") _ <- valueOrFail(store.addUnassignmentResult(unassignmentResult).failOnShutdown)( @@ -1166,7 +1167,7 @@ trait ReassignmentStoreTest { } "report mismatching results" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val modifiedUnassignmentResult = unassignmentResult.copy( result = unassignmentResult.result.copy( content = @@ -1201,38 +1202,38 @@ trait ReassignmentStoreTest { "completeReassignment" should { "mark the reassignment as completed" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add failed") _ <- valueOrFail(store.addUnassignmentResult(unassignmentResult).failOnShutdown)( "addResult failed" ) - _ <- valueOrFail(store.completeReasignment(reassignment10, toc))("completion failed") + _ <- valueOrFail(store.completeReassignment(reassignment10, toc))("completion failed") lookup <- store.lookup(reassignment10).value } yield lookup shouldBe Left(ReassignmentCompleted(reassignment10, toc)) } "be idempotent" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add failed") _ <- valueOrFail(store.addUnassignmentResult(unassignmentResult).failOnShutdown)( "addResult failed" ) - _ <- valueOrFail(store.completeReasignment(reassignment10, toc))( + _ <- valueOrFail(store.completeReassignment(reassignment10, toc))( "first completion failed" ) - _ <- valueOrFail(store.completeReasignment(reassignment10, toc))( + _ <- valueOrFail(store.completeReassignment(reassignment10, toc))( "second completion failed" ) } yield succeed } "be allowed before the result" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add failed") - _ <- valueOrFail(store.completeReasignment(reassignment10, toc))( + _ <- valueOrFail(store.completeReassignment(reassignment10, toc))( "first completion failed" ) lookup1 <- store.lookup(reassignment10).value @@ -1240,7 +1241,7 @@ trait ReassignmentStoreTest { "addResult failed" ) lookup2 <- store.lookup(reassignment10).value - _ <- valueOrFail(store.completeReasignment(reassignment10, toc))( + _ <- valueOrFail(store.completeReassignment(reassignment10, toc))( "second completion failed" ) } yield { @@ -1250,7 +1251,7 @@ trait ReassignmentStoreTest { } "detect mismatches" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val toc2 = TimeOfChange(RequestCounter(0), CantonTimestamp.ofEpochSecond(4)) val modifiedReassignmentData = reassignmentData.copy(unassignmentRequestCounter = RequestCounter(100)) @@ -1265,10 +1266,10 @@ trait ReassignmentStoreTest { _ <- valueOrFail(store.addUnassignmentResult(unassignmentResult).failOnShutdown)( "addResult failed" ) - _ <- valueOrFail(store.completeReasignment(reassignment10, toc))( + _ <- valueOrFail(store.completeReassignment(reassignment10, toc))( "first completion failed" ) - complete2 <- store.completeReasignment(reassignment10, toc2).value + complete2 <- store.completeReassignment(reassignment10, toc2).value add2 <- store.addReassignment(modifiedReassignmentData).failOnShutdown.value addResult2 <- store.addUnassignmentResult(modifiedUnassignmentResult).failOnShutdown.value } yield { @@ -1287,17 +1288,17 @@ trait ReassignmentStoreTest { } "store the first completion" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val toc2 = TimeOfChange(RequestCounter(1), CantonTimestamp.ofEpochSecond(4)) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add failed") _ <- valueOrFail(store.addUnassignmentResult(unassignmentResult).failOnShutdown)( "addResult failed" ) - _ <- valueOrFail(store.completeReasignment(reassignment10, toc2))( + _ <- valueOrFail(store.completeReassignment(reassignment10, toc2))( "later completion failed" ) - complete2 <- store.completeReasignment(reassignment10, toc).value + complete2 <- store.completeReassignment(reassignment10, toc).value lookup <- store.lookup(reassignment10).value } yield { complete2 shouldBe Checked.continue(ReassignmentAlreadyCompleted(reassignment10, toc)) @@ -1308,7 +1309,7 @@ trait ReassignmentStoreTest { "delete" should { "remove the reassignment" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add failed") _ <- valueOrFail(store.addUnassignmentResult(unassignmentResult).failOnShutdown)( @@ -1320,26 +1321,26 @@ trait ReassignmentStoreTest { } "purge completed reassignments" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add failed") _ <- valueOrFail(store.addUnassignmentResult(unassignmentResult).failOnShutdown)( "addResult failed" ) - _ <- valueOrFail(store.completeReasignment(reassignment10, toc))("completion failed") + _ <- valueOrFail(store.completeReassignment(reassignment10, toc))("completion failed") _ <- store.deleteReassignment(reassignment10) } yield succeed } "ignore unknown reassignment IDs" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { () <- store.deleteReassignment(reassignment10) } yield succeed } "be idempotent" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) for { _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add failed") () <- store.deleteReassignment(reassignment10) @@ -1349,8 +1350,8 @@ trait ReassignmentStoreTest { } "reassignment stores should be isolated" in { - val storeTarget = mk(targetDomain) - val store1 = mk(TargetDomainId(sourceDomain1.unwrap)) + val storeTarget = mk(indexedTargetDomain) + val store1 = mk(IndexedDomain.tryCreate(sourceDomain1.unwrap, 2)) for { _ <- valueOrFail(storeTarget.addReassignment(reassignmentData).failOnShutdown)("add failed") found <- store1.lookup(reassignmentData.reassignmentId).value @@ -1359,7 +1360,7 @@ trait ReassignmentStoreTest { "deleteCompletionsSince" should { "remove the completions from the criterion on" in { - val store = mk(targetDomain) + val store = mk(indexedTargetDomain) val toc1 = TimeOfChange(RequestCounter(1), CantonTimestamp.ofEpochSecond(5)) val toc2 = TimeOfChange(RequestCounter(2), CantonTimestamp.ofEpochSecond(7)) @@ -1381,19 +1382,23 @@ trait ReassignmentStoreTest { ) _ <- valueOrFail(store.addReassignment(bobReassignment).failOnShutdown)("add bob failed") _ <- valueOrFail(store.addReassignment(eveReassignment).failOnShutdown)("add eve failed") - _ <- valueOrFail(store.completeReasignment(reassignment10, toc))( + _ <- valueOrFail(store.completeReassignment(reassignment10, toc))( "completion alice failed" ) - _ <- valueOrFail(store.completeReasignment(reassignment11, toc1))("completion bob failed") - _ <- valueOrFail(store.completeReasignment(reassignment20, toc2))("completion eve failed") + _ <- valueOrFail(store.completeReassignment(reassignment11, toc1))( + "completion bob failed" + ) + _ <- valueOrFail(store.completeReassignment(reassignment20, toc2))( + "completion eve failed" + ) _ <- store.deleteCompletionsSince(RequestCounter(1)) alice <- leftOrFail(store.lookup(reassignment10))("alice must still be completed") bob <- valueOrFail(store.lookup(reassignment11))("bob must not be completed") eve <- valueOrFail(store.lookup(reassignment20))("eve must not be completed") - _ <- valueOrFail(store.completeReasignment(reassignment11, toc2))( + _ <- valueOrFail(store.completeReassignment(reassignment11, toc2))( "second completion bob failed" ) - _ <- valueOrFail(store.completeReasignment(reassignment20, toc1))( + _ <- valueOrFail(store.completeReassignment(reassignment20, toc1))( "second completion eve failed" ) } yield { @@ -1438,7 +1443,9 @@ object ReassignmentStoreTest extends EitherValues with NoTracing { val targetDomain2 = TargetDomainId(DomainId(UniqueIdentifier.tryCreate("domain2", "DOMAIN2"))) val mediator2 = MediatorGroupRecipient(MediatorGroupIndex.one) - val targetDomain = TargetDomainId(DomainId(UniqueIdentifier.tryCreate("target", "DOMAIN"))) + val indexedTargetDomain = + IndexedDomain.tryCreate(DomainId(UniqueIdentifier.tryCreate("target", "DOMAIN")), 1) + val targetDomainId = TargetDomainId(indexedTargetDomain.domainId) val reassignment10 = ReassignmentId(sourceDomain1, CantonTimestamp.Epoch) val reassignment11 = ReassignmentId(sourceDomain1, CantonTimestamp.ofEpochMilli(1)) @@ -1553,7 +1560,7 @@ object ReassignmentStoreTest extends EitherValues with NoTracing { reassignmentId, sourceMediator, submitter, - targetDomain, + targetDomainId, creatingTransactionId, contract, unassignmentGlobalOffset, diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStoreTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStoreTest.scala index 75f63f1b77d9..391278eaeaf4 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStoreTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStoreTest.scala @@ -5,7 +5,6 @@ package com.digitalasset.canton.participant.store.db import com.daml.nameof.NameOf.functionFullName import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric import com.digitalasset.canton.participant.store.ActiveContractStoreTest import com.digitalasset.canton.participant.store.db.DbActiveContractStoreTest.maxDomainIndex import com.digitalasset.canton.participant.store.db.DbContractStoreTest.createDbContractStoreForTesting @@ -28,7 +27,7 @@ trait DbActiveContractStoreTest extends AsyncWordSpec with BaseTest with ActiveC DBIO.seq( sqlu"truncate table par_active_contracts", sqlu"truncate table par_active_contract_pruning", - sqlu"delete from par_contracts where domain_id >= $domainIndex and domain_id <= $maxDomainIndex", + sqlu"delete from par_contracts where domain_idx >= $domainIndex and domain_idx <= $maxDomainIndex", ), functionFullName, ) @@ -49,10 +48,8 @@ trait DbActiveContractStoreTest extends AsyncWordSpec with BaseTest with ActiveC storage, domainId, enableAdditionalConsistencyChecks = true, - maxContractIdSqlInListSize = PositiveNumeric.tryCreate(10), PrunableByTimeParameters.testingParams, indexStore, - testedProtocolVersion, timeouts, loggerFactory, )(ec) diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbContractStoreTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbContractStoreTest.scala index cd8a75fcde9a..07ef0a3c9a86 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbContractStoreTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbContractStoreTest.scala @@ -5,7 +5,6 @@ package com.digitalasset.canton.participant.store.db import com.daml.nameof.NameOf.functionFullName import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric import com.digitalasset.canton.config.{ BatchAggregatorConfig, CachingConfigs, @@ -31,7 +30,10 @@ trait DbContractStoreTest extends AsyncWordSpec with BaseTest with ContractStore override def cleanDb(storage: DbStorage): Future[Int] = { import storage.api.* - storage.update(sqlu"delete from par_contracts where domain_id = $domainIndex", functionFullName) + storage.update( + sqlu"delete from par_contracts where domain_idx = $domainIndex", + functionFullName, + ) } "DbContractStore" should { @@ -57,12 +59,11 @@ object DbContractStoreTest { )(implicit ec: ExecutionContext): DbContractStore = new DbContractStore( storage = storage, - domainIdIndexed = IndexedDomain.tryCreate( + indexedDomain = IndexedDomain.tryCreate( domainId, domainIndex, ), protocolVersion = protocolVersion, - maxContractIdSqlInListSize = PositiveNumeric.tryCreate(2), cacheConfig = CachingConfigs.testing.contractStore, dbQueryBatcherConfig = BatchAggregatorConfig.defaultsForTesting, insertBatchAggregatorConfig = BatchAggregatorConfig.defaultsForTesting, diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStoreTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStoreTest.scala index a68834b1e933..33dee4690191 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStoreTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbDamlPackageStoreTest.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.participant.store.db import com.daml.nameof.NameOf.functionFullName -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric import com.digitalasset.canton.participant.store.DamlPackageStoreTest import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} @@ -29,7 +28,6 @@ trait DbDamlPackageStoreTest extends DamlPackageStoreTest { "DbDamlPackagesDarsStore" should { behave like damlPackageStore(() => new DbDamlPackageStore( - PositiveNumeric.tryCreate(2), storage, timeouts, futureSupervisor, diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStoreTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStoreTest.scala index f75bb5c3fb10..b23891ce6baf 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStoreTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbInFlightSubmissionStoreTest.scala @@ -5,7 +5,6 @@ package com.digitalasset.canton.participant.store.db import com.digitalasset.canton.BaseTest import com.digitalasset.canton.config.BatchAggregatorConfig -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric import com.digitalasset.canton.participant.store.InFlightSubmissionStoreTest import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} @@ -31,7 +30,6 @@ trait DbInFlightSubmissionStoreTest behave like inFlightSubmissionStore { () => new DbInFlightSubmissionStore( storage, - PositiveNumeric.tryCreate(2), BatchAggregatorConfig.defaultsForTesting, testedReleaseProtocolVersion, timeouts, diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStoreTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStoreTest.scala index 72b7edb198bc..9c3683b016fb 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStoreTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbReassignmentStoreTest.scala @@ -9,6 +9,7 @@ import com.digitalasset.canton.crypto.provider.symbolic.SymbolicPureCrypto import com.digitalasset.canton.participant.store.ReassignmentStoreTest import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.store.db.{DbTest, H2Test, PostgresTest} +import com.digitalasset.canton.store.memory.InMemoryIndexedStringStore import com.digitalasset.canton.version.Reassignment.TargetProtocolVersion import org.scalatest.wordspec.AsyncWordSpec @@ -23,10 +24,14 @@ trait DbReassignmentStoreTest extends AsyncWordSpec with BaseTest with Reassignm } "DbReassignmentStore" should { + + val indexStore = new InMemoryIndexedStringStore(minIndex = 1, maxIndex = 100) + behave like reassignmentStore(domainId => new DbReassignmentStore( storage, domainId, + indexStore, TargetProtocolVersion(testedProtocolVersion), new SymbolicPureCrypto, futureSupervisor, diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStoreTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStoreTest.scala index b781b6c1408c..118c78469963 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStoreTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbRequestJournalStoreTest.scala @@ -6,7 +6,6 @@ package com.digitalasset.canton.participant.store.db import com.daml.nameof.NameOf.functionFullName import com.digitalasset.canton.BaseTest import com.digitalasset.canton.config.BatchAggregatorConfig -import com.digitalasset.canton.config.RequireTypes.PositiveNumeric import com.digitalasset.canton.participant.store.RequestJournalStoreTest import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.store.IndexedDomain @@ -34,7 +33,6 @@ trait DbRequestJournalStoreTest extends AsyncWordSpec with BaseTest with Request new DbRequestJournalStore( IndexedDomain.tryCreate(domainId, 1), storage, - PositiveNumeric.tryCreate(2), BatchAggregatorConfig.defaultsForTesting, BatchAggregatorConfig.defaultsForTesting, timeouts, diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ActiveContractStoreTestInMemory.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ActiveContractStoreTestInMemory.scala index 5a5708af89c7..ce9907da782b 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ActiveContractStoreTestInMemory.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ActiveContractStoreTestInMemory.scala @@ -27,7 +27,7 @@ class ActiveContractStoreTestInMemory "InMemoryActiveContractStore" should { behave like activeContractStore( ec => - new InMemoryActiveContractStore(indexedStringStore, testedProtocolVersion, loggerFactory)( + new InMemoryActiveContractStore(indexedStringStore, loggerFactory)( ec ), ec => new InMemoryContractStore(loggerFactory)(ec), diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ReassignmentCacheTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ReassignmentCacheTest.scala index c59633553b83..1fca12a5579b 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ReassignmentCacheTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ReassignmentCacheTest.scala @@ -37,12 +37,15 @@ class ReassignmentCacheTest extends AsyncWordSpec with BaseTest with HasExecutor mkReassignmentDataForDomain( reassignment10, mediator1, - targetDomainId = ReassignmentStoreTest.targetDomain, + targetDomainId = targetDomainId, ) val toc = TimeOfChange(RequestCounter(0), CantonTimestamp.Epoch) + def createStore: InMemoryReassignmentStore = + new InMemoryReassignmentStore(targetDomainId, loggerFactory) + "find reassignments in the backing store" in { - val store = new InMemoryReassignmentStore(targetDomain, loggerFactory) + val store = createStore val cache = new ReassignmentCache(store, loggerFactory) for { @@ -60,7 +63,7 @@ class ReassignmentCacheTest extends AsyncWordSpec with BaseTest with HasExecutor "completeReassignment" should { "immediately report the reassignment as completed" in { - val backingStore = new InMemoryReassignmentStore(targetDomain, loggerFactory) + val backingStore = createStore val store = new HookReassignmentStore(backingStore) val cache = new ReassignmentCache(store, loggerFactory) for { @@ -84,7 +87,7 @@ class ReassignmentCacheTest extends AsyncWordSpec with BaseTest with HasExecutor } "report missing reassignments" in { - val store = new InMemoryReassignmentStore(targetDomain, loggerFactory) + val store = createStore val cache = new ReassignmentCache(store, loggerFactory) for { @@ -95,7 +98,7 @@ class ReassignmentCacheTest extends AsyncWordSpec with BaseTest with HasExecutor } "report mismatches" in { - val backingStore = new InMemoryReassignmentStore(targetDomain, loggerFactory) + val backingStore = createStore val store = new HookReassignmentStore(backingStore) val cache = new ReassignmentCache(store, loggerFactory) val toc2 = TimeOfChange(RequestCounter(0), CantonTimestamp.ofEpochSecond(1)) @@ -127,7 +130,7 @@ class ReassignmentCacheTest extends AsyncWordSpec with BaseTest with HasExecutor } "report mismatches coming from the store" in { - val backingStore = new InMemoryReassignmentStore(targetDomain, loggerFactory) + val backingStore = createStore val store = new HookReassignmentStore(backingStore) val cache = new ReassignmentCache(store, loggerFactory) val toc2 = TimeOfChange(RequestCounter(0), CantonTimestamp.ofEpochSecond(1)) @@ -137,7 +140,9 @@ class ReassignmentCacheTest extends AsyncWordSpec with BaseTest with HasExecutor for { reassignmentData <- reassignmentDataF _ <- valueOrFail(store.addReassignment(reassignmentData).failOnShutdown)("add failed") - _ <- valueOrFail(store.completeReasignment(reassignment10, toc2))("first completion failed") + _ <- valueOrFail(store.completeReassignment(reassignment10, toc2))( + "first completion failed" + ) _ = store.preComplete { (reassignmentId, _) => assert(reassignmentId == reassignment10) promise.completeWith(cache.completeReassignment(reassignment10, toc).value) @@ -154,7 +159,7 @@ class ReassignmentCacheTest extends AsyncWordSpec with BaseTest with HasExecutor } "complete only after having persisted the completion" in { - val backingStore = new InMemoryReassignmentStore(targetDomain, loggerFactory) + val backingStore = createStore val store = new HookReassignmentStore(backingStore) val cache = new ReassignmentCache(store, loggerFactory) @@ -186,7 +191,7 @@ class ReassignmentCacheTest extends AsyncWordSpec with BaseTest with HasExecutor TimeOfChange(RequestCounter(2), CantonTimestamp.ofEpochSecond(2)) "store the first completing request" in { - val store = new InMemoryReassignmentStore(targetDomain, loggerFactory) + val store = createStore val cache = new ReassignmentCache(store, loggerFactory) for { @@ -209,7 +214,7 @@ class ReassignmentCacheTest extends AsyncWordSpec with BaseTest with HasExecutor import cats.implicits.* implicit val ec: ExecutionContextIdlenessExecutorService = executorService - val store = new InMemoryReassignmentStore(targetDomain, loggerFactory) + val store = createStore val cache = new ReassignmentCache(store, loggerFactory)(executorService) val timestamps = (1L to 100L).toList.map { ts => @@ -287,7 +292,7 @@ object ReassignmentCacheTest { ): EitherT[FutureUnlessShutdown, ReassignmentStoreError, Unit] = baseStore.addReassignmentsOffsets(offsets) - override def completeReasignment( + override def completeReassignment( reassignmentId: ReassignmentId, timeOfCompletion: TimeOfChange, )(implicit @@ -295,7 +300,7 @@ object ReassignmentCacheTest { ): CheckedT[Future, Nothing, ReassignmentStoreError, Unit] = { val hook = preCompleteHook.getAndSet(HookReassignmentStore.preCompleteNoHook) hook(reassignmentId, timeOfCompletion).flatMap[Nothing, ReassignmentStoreError, Unit](_ => - baseStore.completeReasignment(reassignmentId, timeOfCompletion) + baseStore.completeReassignment(reassignmentId, timeOfCompletion) ) } diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ReassignmentStoreTestInMemory.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ReassignmentStoreTestInMemory.scala index 41278d5b0c05..b51a9f8985c3 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ReassignmentStoreTestInMemory.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ReassignmentStoreTestInMemory.scala @@ -5,6 +5,7 @@ package com.digitalasset.canton.participant.store.memory import com.digitalasset.canton.participant.store.ReassignmentStoreTest import com.digitalasset.canton.protocol.TargetDomainId +import com.digitalasset.canton.store.IndexedDomain import com.digitalasset.canton.{BaseTest, HasExecutionContext} import org.scalatest.wordspec.AsyncWordSpec @@ -14,8 +15,8 @@ class ReassignmentStoreTestInMemory with HasExecutionContext with ReassignmentStoreTest { - private def mk(domain: TargetDomainId): InMemoryReassignmentStore = - new InMemoryReassignmentStore(domain, loggerFactory) + private def mk(domain: IndexedDomain): InMemoryReassignmentStore = + new InMemoryReassignmentStore(TargetDomainId(domain.domainId), loggerFactory) "ReassignmentStoreTestInMemory" should { behave like reassignmentStore(mk) diff --git a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/TimelyRejectNotifierTest.scala b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/TimelyRejectNotifierTest.scala index acda5e358df4..e2cb30dd0f13 100644 --- a/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/TimelyRejectNotifierTest.scala +++ b/sdk/canton/community/participant/src/test/scala/com/digitalasset/canton/participant/sync/TimelyRejectNotifierTest.scala @@ -42,7 +42,7 @@ class TimelyRejectNotifierTest extends AnyWordSpec with BaseTest with HasExecuti traceContext: TraceContext ): FutureUnlessShutdown[Unit] = ??? } - val notifier = new TimelyRejectNotifier(rejecter, None, loggerFactory) + val notifier = new TimelyRejectNotifier(rejecter, CantonTimestamp.MinValue, loggerFactory) cell.putIfAbsent(notifier) notifier.notifyAsync(Traced(CursorPrehead(SequencerCounter.MinValue, CantonTimestamp.Epoch))) eventually() { @@ -82,7 +82,7 @@ class TimelyRejectNotifierTest extends AnyWordSpec with BaseTest with HasExecuti traceContext: TraceContext ): FutureUnlessShutdown[Unit] = ??? } - val notifier = new TimelyRejectNotifier(rejecter, None, loggerFactory) + val notifier = new TimelyRejectNotifier(rejecter, CantonTimestamp.MinValue, loggerFactory) cell.putIfAbsent(notifier) notifier.notifyAsync(Traced(CursorPrehead(SequencerCounter.MinValue, CantonTimestamp.Epoch))) eventually() { @@ -97,9 +97,13 @@ class TimelyRejectNotifierTest extends AnyWordSpec with BaseTest with HasExecuti "notify only if the timestamp is in the correct relation with the current bound" in { val rejecter = new MockTimelyRejecter(abort = false) - val notifier = new TimelyRejectNotifier(rejecter, None, loggerFactory) + val notifier = new TimelyRejectNotifier( + rejecter, + CantonTimestamp.MinValue, + loggerFactory, + ) - notifier.notifyIfInPastAsync(CantonTimestamp.MinValue) shouldBe false + notifier.notifyIfInPastAsync(CantonTimestamp.MinValue.immediateSuccessor) shouldBe false always(timeout) { rejecter.invocations shouldBe Seq.empty } @@ -171,7 +175,7 @@ class TimelyRejectNotifierTest extends AnyWordSpec with BaseTest with HasExecuti FutureUnlessShutdown.unit } } - val notifier = new TimelyRejectNotifier(rejecter, None, loggerFactory) + val notifier = new TimelyRejectNotifier(rejecter, CantonTimestamp.MinValue, loggerFactory) cell.putIfAbsent(notifier) notifier.notifyAsync(Traced(CursorPrehead(SequencerCounter.MinValue, CantonTimestamp.Epoch))) @@ -187,7 +191,7 @@ class TimelyRejectNotifierTest extends AnyWordSpec with BaseTest with HasExecuti "stop upon AbortedDueToShutdown" in { val rejecter = new MockTimelyRejecter(abort = true) - val notifier = new TimelyRejectNotifier(rejecter, None, loggerFactory) + val notifier = new TimelyRejectNotifier(rejecter, CantonTimestamp.MinValue, loggerFactory) notifier.notifyAsync(Traced(CursorPrehead(SequencerCounter.Genesis, CantonTimestamp.Epoch))) eventually(timeout) { @@ -210,7 +214,7 @@ class TimelyRejectNotifierTest extends AnyWordSpec with BaseTest with HasExecuti "deal with a lot of concurrent aborts" in { val rejecter = new MockTimelyRejecter(abort = true) - val notifier = new TimelyRejectNotifier(rejecter, None, loggerFactory) + val notifier = new TimelyRejectNotifier(rejecter, CantonTimestamp.MinValue, loggerFactory) for (i <- 1 to 100) { notifier.notifyAsync( @@ -228,7 +232,7 @@ class TimelyRejectNotifierTest extends AnyWordSpec with BaseTest with HasExecuti "take initial bound into account" in { val rejecter = new MockTimelyRejecter(abort = false) val notifier = - new TimelyRejectNotifier(rejecter, Some(CantonTimestamp.ofEpochSecond(1)), loggerFactory) + new TimelyRejectNotifier(rejecter, CantonTimestamp.ofEpochSecond(1), loggerFactory) notifier.notifyAsync(Traced(CursorPrehead(SequencerCounter.Genesis, CantonTimestamp.Epoch))) always(timeout) { diff --git a/sdk/canton/community/sequencer-driver/src/main/scala/com/digitalasset/canton/domain/block/BlockFormat.scala b/sdk/canton/community/sequencer-driver/src/main/scala/com/digitalasset/canton/domain/block/BlockFormat.scala index bc6110232c29..66e2a2df6662 100644 --- a/sdk/canton/community/sequencer-driver/src/main/scala/com/digitalasset/canton/domain/block/BlockFormat.scala +++ b/sdk/canton/community/sequencer-driver/src/main/scala/com/digitalasset/canton/domain/block/BlockFormat.scala @@ -11,9 +11,14 @@ object BlockFormat { val DefaultFirstBlockHeight: Long = 0 + /** @param tickTopology Whether the block should tick the sequencer's topology processor, so that it can return + * an up-to-date topology. Set to `true` by block orderers whenever they assess they may need + * to retrieve an up-to-date topology. + */ final case class Block( blockHeight: Long, requests: Seq[Traced[OrderedRequest]], + tickTopology: Boolean, ) final case class OrderedRequest( @@ -26,7 +31,7 @@ object BlockFormat { logger: TracedLogger )(block: Block): RawLedgerBlock = block match { - case Block(blockHeight, requests) => + case Block(blockHeight, requests, tickTopology) => RawLedgerBlock( blockHeight, requests.map { case event @ Traced(OrderedRequest(orderingTime, tag, body)) => @@ -43,6 +48,7 @@ object BlockFormat { sys.exit(1) } }, + tickTopology, ) } diff --git a/sdk/canton/community/sequencer-driver/src/main/scala/com/digitalasset/canton/domain/block/SequencerDriver.scala b/sdk/canton/community/sequencer-driver/src/main/scala/com/digitalasset/canton/domain/block/SequencerDriver.scala index c89a7098b8ea..add6299628ff 100644 --- a/sdk/canton/community/sequencer-driver/src/main/scala/com/digitalasset/canton/domain/block/SequencerDriver.scala +++ b/sdk/canton/community/sequencer-driver/src/main/scala/com/digitalasset/canton/domain/block/SequencerDriver.scala @@ -178,10 +178,13 @@ object SequencerDriver { * * @param blockHeight The height of the block. Block heights must be consecutive. * @param events The events in the given block. + * @param tickTopology Whether the sequencer's topology processor should be ticked after processing this block, so + * that it can return an up-to-date topology. */ final case class RawLedgerBlock( blockHeight: Long, events: Seq[Traced[RawLedgerBlock.RawBlockEvent]], + tickTopology: Boolean, ) object RawLedgerBlock { diff --git a/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala b/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala index 73aba09e5ad8..5a64b8a3e913 100644 --- a/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala +++ b/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala @@ -164,7 +164,6 @@ class PostgresCISetup( 5432, ) - @SuppressWarnings(Array("com.digitalasset.canton.SlickString")) override protected def prepareDatabase(): Unit = if (useDb != envDb) { // for provided db name or dev migrations val envDbConfig = @@ -360,14 +359,5 @@ object DbStorageSetup { ) def toH2DbConfig: H2 = H2(toH2Config) - - def toOracleConfig: Config = configOfMap( - Map( - "driver" -> "oracle.jdbc.OracleDriver", - "url" -> DbConfig.oracleUrl(host, port, dbName), - "user" -> username, - "password" -> password, - ) - ) } } diff --git a/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala b/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala index 86856c8f6b46..0ea8a1f62e49 100644 --- a/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala +++ b/sdk/canton/community/testing/src/main/scala/com/digitalasset/canton/topology/TestingIdentityFactory.scala @@ -293,7 +293,10 @@ class TestingIdentityFactory( override def trySnapshot(timestamp: CantonTimestamp)(implicit traceContext: TraceContext ): TopologySnapshot = { - require(timestamp <= upToInclusive, "Topology information not yet available") + require( + timestamp <= upToInclusive, + s"Topology information not yet available for $timestamp", + ) topologySnapshot(domainId, timestampForDomainParameters = timestamp) } diff --git a/sdk/canton/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TraceContext.scala b/sdk/canton/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TraceContext.scala index f0c0f7463118..76c7e185b4bd 100644 --- a/sdk/canton/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TraceContext.scala +++ b/sdk/canton/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TraceContext.scala @@ -101,6 +101,8 @@ object TraceContext { result } + def createNew(): TraceContext = withNewTraceContext(identity) + def wrapWithNewTraceContext[A](item: A): Traced[A] = withNewTraceContext(implicit traceContext => Traced(item)) diff --git a/sdk/canton/ref b/sdk/canton/ref index 23f74d547452..7312fcfce300 100644 --- a/sdk/canton/ref +++ b/sdk/canton/ref @@ -1 +1 @@ -20240930.14177.v70a8a41f +20241003.14216.vfdbf1885