diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml index 22d205443b..3b29a5e96f 100644 --- a/.github/workflows/reviewdog.yml +++ b/.github/workflows/reviewdog.yml @@ -11,6 +11,16 @@ jobs: - name: Checkout repository uses: actions/checkout@v2 + with: + fetch-depth: 0 # required for new-from-rev option in .golangci.yml + + - uses: actions/setup-go@v2 + with: + go-version: "^1.16.2" + + - uses: actions/setup-go@v2 + with: + go-version: "^1.16.2" - name: Run golangci-lint # reviewdog v1.19.0, golangci-lint v1.38.0 uses: reviewdog/action-golangci-lint@93be4324306dcbba508544d891a7b0576bb28ddd diff --git a/CHANGELOG.md b/CHANGELOG.md index f758316182..a61b7ccb70 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,18 @@ +# v0.5.4 - 2021-03-29 +* Add new diagnostic APIs +* Add new docs sections +* Add branch inclusion state check before issuing new transactions +* Refactor the Faucet plugin +* Optimize transaction's past cone check +* Make issued messages pass through the parser filters +* Fix Faucet time usage +* Fix markers issue +* Fix max inputs count check +* Fix nil pointer in diagnostic API +* Update to latest hive.go +* Enhance golangci-lint +* **Breaking**: bumps network and database versions + # v0.5.3 - 2021-03-25 * Added new API endpoints * Added models navigation through the Dashboard Explorer diff --git a/README.md b/README.md index 0349a0f82d..432b1ce0a3 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,12 @@


- +

Prototype node software for an IOTA network without the Coordinator

- + Developer documentation portal

@@ -87,11 +87,11 @@ The following Coordicide modules are a work in progress: You can use the Go client-library to interact with GoShimmer (located under `github.com/iotaledger/goshimmer/client`). -You can find more info about this on our [client-lib](https://github.com/iotaledger/goshimmer/wiki/Client-Lib:-Interaction-with-layers) wiki page. +You can find more info about this on our [client-lib](https://goshimmer.docs.iota.org/apis/api.html) and [Web API](https://goshimmer.docs.iota.org/apis/webAPI.html) GitHub page. ## Getting started -You can find tutorials on how to [setup a GoShimmer node](https://github.com/iotaledger/goshimmer/wiki/Setup-up-a-GoShimmer-node-(Joining-the-pollen-testnet)), [writing a dApp](https://github.com/iotaledger/goshimmer/wiki/How-to-create-a-simple-dApp), [obtaining tokens from the faucet](https://github.com/iotaledger/goshimmer/wiki/How-to-obtain-tokens-from-the-faucet) and more on our [wiki](https://github.com/iotaledger/goshimmer/wiki). +You can find tutorials on how to [setup a GoShimmer node](https://goshimmer.docs.iota.org/tutorials/setup.html), [writing a dApp](https://goshimmer.docs.iota.org/tutorials/dApp.html), [obtaining tokens from the faucet](https://goshimmer.docs.iota.org/tutorials/request_funds.html) and more on our [GitHub Page](https://goshimmer.docs.iota.org/goshimmer.html). ## Supporting the project @@ -107,4 +107,4 @@ When creating a pull request, we recommend that you do the following: ## Joining the discussion -If you want to get involved in the community, need help getting started, have any issues related to the repository or just want to discuss blockchain, distributed ledgers, and IoT with other people, feel free to join our [Discord](https://discord.iota.org/). \ No newline at end of file +If you want to get involved in the community, need help getting started, have any issues related to the repository or just want to discuss blockchain, distributed ledgers, and IoT with other people, feel free to join our [Discord](https://discord.iota.org/). diff --git a/docs/implementation_design/object_storage.md b/docs/implementation_design/object_storage.md index 649f239c9f..e564a235cc 100644 --- a/docs/implementation_design/object_storage.md +++ b/docs/implementation_design/object_storage.md @@ -1 +1,177 @@ # Object storage +In GoShimmer `ObjectStorage` is used as a base data structure for many data collection elements such as `branchStorage`, `conflictStorage`, `messageStorage` and others. +It can be described by the following characteristics, it: +- is a manual cache which keeps objects in memory as long as consumers are using it +- uses key-value storage type +- provides mutex options for guarding shared variables and preventing changing the object state by multiple goroutines at the same time +- takes care of dynamic creation of different object types depending on the key, and the serialized data it receives through the utility `objectstorage.Factory` +- helps with the creation of multiple `ObjectStorage` instances from the same package and automatic configuration. + +In order to create an object storage we need to provide the underlying `kvstore.KVStore` structure backed by the database. + + + +## Database +GoShimmer stores data in the form of an object storage system. The data is stored in one large repository with flat structure. It is a scalable solution that allows for fast data retrieval because of its categorization structure. + +Additionally, GoShimmer leaves the possibility to store data only in memory that can be specified with the parameter `CfgDatabaseInMemory` value. In-memory storage is purely based on a Go map, package `mapdb` from hive.go. +For the persistent storage in a database it uses package `badger` from hive.go. It is a simple and fast key-value database that performs well for both reads and writes simultaneously. + +Both solutions are implemented in the `database` package, along with prefix definitions that can be used during the creation of new object storage elements. + +The database plugin is responsible for creating a `store` instance of the chosen database under the directory specified with `CfgDatabaseDir` parameter. It will manage a proper closure of the database upon receiving a shutdown signal. During the start configuration, the database is marked as unhealthy, and it will be marked as healthy on shutdown. Then the garbage collector is run and the database can be closed. + +## ObjectStorage + + +Assume we need to store data for some newly created object `A`. Then we need to define a new prefix for our package in the `database` package, and prefixes for single storage objects. They will be later used during `ObjectStorage` creation. A package prefix will be combined with a store specific prefix to create a specific realm. +```Go +package example + +type Storage struct { + A *objectstorage.ObjectStorage + ... + shutdownOnce sync.Once +} +``` +### ObjectStorage factory +To easily create multiple storage objects instances for one package, the most convenient way is to use the factory function. +```Go +osFactory := objectstorage.NewFactory(store, database.Prefix) +``` +It needs two parameters: +- `store` - the key value `kvstore` instance +- `database.Prefix` - a prefix defined in the `database` package for our new `example` package. It will be responsible for automatic configuration of the newly provided `kvstore` instance. + + +After defining the storage factory for the group of objects, we can use it to create an `*objectstorage.ObjectStorage` instance: +```Go +AStorage = osFactory.New(objPrefix, FromObjectStorage) +AStorage = osFactory.New(objPrefix, FromObjectStorage, optionalOptions...) +``` +For the function parameter we should provide: +- `objPrefix` - mentioned before, we provide the object specific prefix. +- `FromObjectStorage` - a function that allows the dynamic creation of different object types depending on the stored data. +- `optionalOptions` - an optional parameter provided in the form of options array `[]objectstorage.Option`. All possible options are defined in `objectstorage.Options`. If we do not specify them during creation, the default values will be used, such as enabled persistence or setting cache time to 0. + +### StorableObject +`StorableObject` is an interface that allows the dynamic creation of different object types depending on the stored data. We need to make sure that all methods required by the interface are implemented to use the object storage factory. + +- `SetModified` - marks the object as modified, which will be written to the disk (if persistence is enabled). +- `IsModified` - returns true if the object is marked as modified +- `Delete` - marks the object to be deleted from the persistence layer +- `IsDeleted` - returns true if the object was marked as deleted +- `Persist` - enables or disables persistence for this object +- `ShouldPersist` - returns true if this object is going to be persisted +- `Update` - updates the object with the values of another object - requires an explicit implementation +- `ObjectStorageKey` - returns the key that is used to store the object in the database - requires an explicit implementation +- `ObjectStorageValue` - marshals the object data into a sequence of bytes that are used as the value part in the object storage - requires an explicit implementation + +Most of these have their default implementation in `objectstorage` library, except from `Update`, `ObjectStorageKey`, `ObjectStorageValue` which need to be provided. + +### StorableObjectFactory function +The function `ObjectFromObjectStorage` from object storage provides functionality to restore objects from the `ObjectStorage`. By convention the implementation of this function usually follows the schema: +`ObjectFromObjectStorage` uses `ObjectFromBytes` +```Go +func ObjectFromObjectStorage(key []byte, data []byte) (result StorableObject, err error) { + result, err := ObjectFromBytes(marshalutil.New(data)) + ... + return +} +``` + +`ObjectFromBytes` unmarshals the object sequence of bytes with a help of `marshalutil` library. The returned `consumedBytes` can be used for the testing purposes. +The created `marshalUtil` instance stores the stream of bytes and keeps track of what has been already read (`readOffset`). +```Go +func ObjectFromBytes(bytes []byte) (object *ObjectType, consumedBytes int, err error) { + marshalUtil := marshalutil.New(bytes) + if object, err = ObjectFromMarshalUtil(marshalUtil); err != nil { + ... + consumedBytes = marshalUtil.ReadOffset() + return +} +``` +The key logic is implemented in `ObjectFromMarshalUtil` that takes the marshaled object and transforms it into the object of specified type. +Because the data is stored in a sequence of bytes, it has no information about the form of an object and any data types it had before writing to the database. +Thus, we need to serialize any data into a stream of bytes in order to write it (marshaling), and deserialize the stream of bytes back into correct data structures when reading it (unmarshaling). +Let's consider as an example, unmarshaling of the `Approver` object. +```Go +type Approver struct { + approverType ApproverType // 8 bytes + referencedMessageID MessageID // 32 bytes + approverMessageID MessageID // 32 bytes +} +``` + +The order in which we read bytes has to reflect the order in which it was written down during marshaling. As in the example, the order: `referencedMessageID`, `approverType`, `approverMessageID` is the same in both marshalling and unmarshalling. + +```Go +// Unmarshalling +func ApproverFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) (result *Approver) { + result = &Approver{} + result.referencedMessageID = MessageIDFromMarshalUtil(marshalUtil) + result.approverType = ApproverTypeFromMarshalUtil(marshalUtil) + result.approverMessageID = MessageIDFromMarshalUtil(marshalUtil) + return +} +// Marshalling +func (a *Approver) ObjectStorageApprover() []byte { + return marshalutil.New(). + Write(a.referencedMessageID). + Write(a.approverType). + Write(a.approverMessageID). + Bytes() +} +``` + +We continue to decompose our object into smaller pieces with help of `MarshalUtil` struct that keeps track of bytes, and a read offset. +Then we use `marshalutil` build in methods on the appropriate parts of the byte stream with its length defined by the data +type of the struct field. This way, we are able to parse bytes to the correct Go data structure. + +### ObjectStorage methods +After defining marshalling and unmarshalling mechanism for`objectStorage` bytes conversion, +we can start using it for its sole purpose, to actually store and read the particular parts of the project elements. + + - `Load` allows retrieving the corresponding object based on the provided id. For example, the method on the message `objectStorage` + is getting the cached object. +- To convert an object retrieved in the form of a cache to its own corresponding type, we can use `Unwrap`. + In the code below it will return the message wrapped by the cached object. +- `Exists` - checks weather the object has been deleted. If so it is released from memory with the `Release` method. + ```Go + func (s *Storage) Message(messageID MessageID) *CachedMessage { + return &CachedMessage{CachedObject: s.messageStorage.Load(messageID[:])} + } + + cachedMessage := messagelayer.Tangle().Storage.Message(msgID) + if !cachedMessage.Exists() { + msgObject.Release() + } + message := cachedMessage.Unwrap() + ``` +- `Consume` will be useful when we want to apply a function on the cached object. `Consume` unwraps the `CachedObject` and passes a type-casted version to the consumer function. + Right after the object is consumed and when the callback is finished, the object is released. + + ```Go + cachedMessage.Consume(func(message *tangle.Message) { + doSomething(message) + }) + ``` +- `ForEach` - allows to apply a `Consumer` function for every object residing within the cache and the underlying persistence layer. + For example, this is how we can count the number of messages. + ```Go + messageCount := 0 + messageStorage.ForEach(func(key []byte, cachedObject objectstorage.CachedObject) bool { + cachedObject.Consume(func(object objectstorage.StorableObject) { + messageCount++ + }) + } + ``` +- `Store` - storing an object in the objectStorage. An extended version is method `StoreIfAbsent` + that stores an object only if it was not stored before and returns boolean indication if the object was stored. + `ComputeIfAbsent` works similarly but does not access the value log. + ```Go + cachedMessage := messageStorage.Store(newMessage) + cachedMessage, stored := messageStorage.StoreIfAbsent(newMessage) + cachedMessage := messageStorage.ComputeIfAbsent(newMessage, remappingFunction) + ``` + diff --git a/packages/consensus/fcob/consensusmechanism.go b/packages/consensus/fcob/consensusmechanism.go index 863458d663..e502214ff8 100644 --- a/packages/consensus/fcob/consensusmechanism.go +++ b/packages/consensus/fcob/consensusmechanism.go @@ -61,8 +61,16 @@ func (f *ConsensusMechanism) Setup() { // TransactionLiked returns a boolean value indicating whether the given Transaction is liked. func (f *ConsensusMechanism) TransactionLiked(transactionID ledgerstate.TransactionID) (liked bool) { - f.storage.Opinion(transactionID).Consume(func(opinion *Opinion) { - liked = opinion.OpinionEssence.liked + f.tangle.LedgerState.TransactionMetadata(transactionID).Consume(func(transactionMetadata *ledgerstate.TransactionMetadata) { + f.tangle.LedgerState.BranchDAG.Branch(transactionMetadata.BranchID()).Consume(func(branch ledgerstate.Branch) { + if !branch.MonotonicallyLiked() { + return + } + + f.storage.Opinion(transactionID).Consume(func(opinion *Opinion) { + liked = opinion.OpinionEssence.liked + }) + }) }) return diff --git a/packages/ledgerstate/input.go b/packages/ledgerstate/input.go index 238894ea0a..b6705b5191 100644 --- a/packages/ledgerstate/input.go +++ b/packages/ledgerstate/input.go @@ -20,6 +20,12 @@ import ( const ( // UTXOInputType is the type of an Input that references an UTXO Output. UTXOInputType InputType = iota + + // MinInputCount defines the minimum amount of Inputs in a Transaction. + MinInputCount = 1 + + // MaxInputCount defines the maximum amount of Inputs in a Transaction. + MaxInputCount = 127 ) // InputType represents the type of an Input. @@ -161,6 +167,14 @@ func InputsFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) (inputs Inputs, err = xerrors.Errorf("failed to parse inputs count (%v): %w", err, cerrors.ErrParseBytesFailed) return } + if inputsCount < MinInputCount { + err = xerrors.Errorf("amount of Inputs (%d) failed to reach MinInputCount (%d): %w", inputsCount, MinInputCount, cerrors.ErrParseBytesFailed) + return + } + if inputsCount > MaxInputCount { + err = xerrors.Errorf("amount of Inputs (%d) exceeds MaxInputCount (%d): %w", inputsCount, MaxInputCount, cerrors.ErrParseBytesFailed) + return + } var previousInput Input parsedInputs := make([]Input, inputsCount) diff --git a/packages/mana/consensusbasevector.go b/packages/mana/consensusbasevector.go index 31b34ca515..1770e0ea2f 100644 --- a/packages/mana/consensusbasevector.go +++ b/packages/mana/consensusbasevector.go @@ -41,7 +41,6 @@ func (c *ConsensusBaseManaVector) Has(nodeID identity.ID) bool { // BuildPastBaseVector builds a consensus base mana vector from past events upto time `t`. // `eventLogs` is expected to be sorted chronologically. func (c *ConsensusBaseManaVector) BuildPastBaseVector(eventsLog []Event, t time.Time) error { - emptyID := identity.ID{} if c.vector == nil { c.vector = make(map[identity.ID]*ConsensusBaseMana) } @@ -52,9 +51,6 @@ func (c *ConsensusBaseManaVector) BuildPastBaseVector(eventsLog []Event, t time. if ev.Time.After(t) { return nil } - if ev.NodeID == emptyID { - continue - } if _, exist := c.vector[ev.NodeID]; !exist { c.vector[ev.NodeID] = &ConsensusBaseMana{} } @@ -64,9 +60,6 @@ func (c *ConsensusBaseManaVector) BuildPastBaseVector(eventsLog []Event, t time. if ev.Time.After(t) { return nil } - if ev.NodeID == emptyID { - continue - } if _, exist := c.vector[ev.NodeID]; !exist { c.vector[ev.NodeID] = &ConsensusBaseMana{} } diff --git a/packages/markers/manager.go b/packages/markers/manager.go index e8c770b6e9..f995c22955 100644 --- a/packages/markers/manager.go +++ b/packages/markers/manager.go @@ -87,6 +87,8 @@ func (m *Manager) InheritStructureDetails(referencedStructureDetails []*Structur inheritedStructureDetails.IsPastMarker = true // sequence has just been created, so lowestIndex = highestIndex inheritedStructureDetails.PastMarkers = NewMarkers(&Marker{sequenceID: sequence.id, index: sequence.lowestIndex}) + + m.registerReferencingMarker(referencedMarkers, NewMarker(sequence.id, sequence.lowestIndex)) }) return } @@ -101,6 +103,9 @@ func (m *Manager) InheritStructureDetails(referencedStructureDetails []*Structur if newIndex, increased := sequence.IncreaseHighestIndex(referencedMarkers); increased { inheritedStructureDetails.IsPastMarker = true inheritedStructureDetails.PastMarkers = NewMarkers(&Marker{sequenceID: sequence.id, index: newIndex}) + + m.registerReferencingMarker(referencedMarkers, NewMarker(sequence.id, newIndex)) + return } } @@ -108,13 +113,13 @@ func (m *Manager) InheritStructureDetails(referencedStructureDetails []*Structur inheritedStructureDetails.PastMarkers = referencedMarkers }) - return + return inheritedStructureDetails, newSequenceCreated } // UpdateStructureDetails updates the StructureDetails of an existing node in the DAG by propagating new Markers of its // children into its future Markers. It returns two boolean flags that indicate if the future Markers were updated and // if the new Marker should be propagated further to the parents of the given node. -func (m *Manager) UpdateStructureDetails(structureDetailsToUpdate *StructureDetails, markerToInherit *Marker) (futureMarkersUpdated bool, inheritFutureMarkerFurther bool) { +func (m *Manager) UpdateStructureDetails(structureDetailsToUpdate *StructureDetails, markerToInherit *Marker) (futureMarkersUpdated, inheritFutureMarkerFurther bool) { structureDetailsToUpdate.futureMarkersUpdateMutex.Lock() defer structureDetailsToUpdate.futureMarkersUpdateMutex.Unlock() @@ -125,18 +130,14 @@ func (m *Manager) UpdateStructureDetails(structureDetailsToUpdate *StructureDeta structureDetailsToUpdate.FutureMarkers.Set(markerToInherit.sequenceID, markerToInherit.index) futureMarkersUpdated = true - // stop propagating further if structureDetailsToUpadate is a marker + // stop propagating further if structureDetailsToUpdate is a marker inheritFutureMarkerFurther = !structureDetailsToUpdate.IsPastMarker return } // IsInPastCone checks if the earlier node is directly or indirectly referenced by the later node in the DAG. -func (m *Manager) IsInPastCone(earlierStructureDetails *StructureDetails, laterStructureDetails *StructureDetails) (isInPastCone types.TriBool) { - if earlierStructureDetails.FutureMarkers.Size() == 0 && laterStructureDetails.FutureMarkers.Size() == 0 { - return types.Maybe - } - +func (m *Manager) IsInPastCone(earlierStructureDetails, laterStructureDetails *StructureDetails) (isInPastCone types.TriBool) { if earlierStructureDetails.Rank >= laterStructureDetails.Rank { return types.False } @@ -188,7 +189,7 @@ func (m *Manager) IsInPastCone(earlierStructureDetails *StructureDetails, laterS // Iterate the future markers of laterStructureDetails and check if the earlier one has future markers in the same sequence, // if yes, then make sure the index is smaller than the one of laterStructureDetails. - if !laterStructureDetails.FutureMarkers.ForEach(func(sequenceID SequenceID, laterIndex Index) bool { + if laterStructureDetails.FutureMarkers.Size() != 0 && !laterStructureDetails.FutureMarkers.ForEach(func(sequenceID SequenceID, laterIndex Index) bool { earlierIndex, similarSequenceExists := earlierStructureDetails.FutureMarkers.Get(sequenceID) return !similarSequenceExists || earlierIndex < laterIndex }) { @@ -214,7 +215,7 @@ func (m *Manager) IsInPastCone(earlierStructureDetails *StructureDetails, laterS } } - if m.markersReferenceMarkers(laterStructureDetails.PastMarkers, earlierStructureDetails.FutureMarkers, false) { + if earlierStructureDetails.FutureMarkers.Size() != 0 && m.markersReferenceMarkers(laterStructureDetails.PastMarkers, earlierStructureDetails.FutureMarkers, false) { return types.True } @@ -222,13 +223,22 @@ func (m *Manager) IsInPastCone(earlierStructureDetails *StructureDetails, laterS return types.False } - if m.markersReferenceMarkers(earlierStructureDetails.FutureMarkers, laterStructureDetails.PastMarkers, true) { + if earlierStructureDetails.FutureMarkers.Size() != 0 && m.markersReferenceMarkers(earlierStructureDetails.FutureMarkers, laterStructureDetails.PastMarkers, true) { + return types.Maybe + } + + if earlierStructureDetails.FutureMarkers.Size() == 0 && laterStructureDetails.FutureMarkers.Size() == 0 { return types.Maybe } return types.False } +// Sequence retrieves a Sequence from the object storage. +func (m *Manager) Sequence(sequenceID SequenceID) *CachedSequence { + return &CachedSequence{CachedObject: m.sequenceStore.Load(sequenceID.Bytes())} +} + // Shutdown shuts down the Manager and persists its state. func (m *Manager) Shutdown() { m.shutdownOnce.Do(func() { @@ -274,9 +284,9 @@ func (m *Manager) normalizeMarkers(markers *Markers) (normalizedMarkersByRank *m return false } - if !(&CachedSequence{CachedObject: m.sequenceStore.Load(sequenceID.Bytes())}).Consume(func(sequence *Sequence) { + if !m.Sequence(sequenceID).Consume(func(sequence *Sequence) { // for each of the parentMarkers of this particular index - sequence.HighestReferencedParentMarkers(index).ForEach(func(referencedSequenceID SequenceID, referencedIndex Index) bool { + sequence.ReferencedMarkers(index).ForEach(func(referencedSequenceID SequenceID, referencedIndex Index) bool { // of this marker delete the referenced sequences since they are no sequence tips anymore in the sequence DAG delete(normalizedSequences, referencedSequenceID) @@ -315,12 +325,12 @@ func (m *Manager) normalizeMarkers(markers *Markers) (normalizedMarkersByRank *m } } - return + return normalizedMarkersByRank, normalizedSequences } // markersReferenceMarkersOfSameSequence is an internal utility function that determines if the given markers reference // each other as part of the same Sequence. -func (m *Manager) markersReferenceMarkersOfSameSequence(laterMarkers *Markers, earlierMarkers *Markers, requireBiggerMarkers bool) (sameSequenceFound bool, referenceFound bool) { +func (m *Manager) markersReferenceMarkersOfSameSequence(laterMarkers, earlierMarkers *Markers, requireBiggerMarkers bool) (sameSequenceFound, referenceFound bool) { sameSequenceFound = !laterMarkers.ForEach(func(sequenceID SequenceID, laterIndex Index) bool { earlierIndex, sequenceExists := earlierMarkers.Get(sequenceID) if !sequenceExists { @@ -340,7 +350,7 @@ func (m *Manager) markersReferenceMarkersOfSameSequence(laterMarkers *Markers, e // markersReferenceMarkers is an internal utility function that returns true if the later Markers reference the earlier // Markers. If requireBiggerMarkers is false then a Marker with an equal Index is considered to be a valid reference. -func (m *Manager) markersReferenceMarkers(laterMarkers *Markers, earlierMarkers *Markers, requireBiggerMarkers bool) (result bool) { +func (m *Manager) markersReferenceMarkers(laterMarkers, earlierMarkers *Markers, requireBiggerMarkers bool) (result bool) { rankCache := make(map[SequenceID]uint64) futureMarkersByRank := newMarkersByRank() @@ -362,8 +372,8 @@ func (m *Manager) markersReferenceMarkers(laterMarkers *Markers, earlierMarkers // queue parents for additional checks laterMarkers.ForEach(func(sequenceID SequenceID, index Index) bool { - (&CachedSequence{CachedObject: m.sequenceStore.Load(sequenceID.Bytes())}).Consume(func(sequence *Sequence) { - sequence.HighestReferencedParentMarkers(index).ForEach(func(referencedSequenceID SequenceID, referencedIndex Index) bool { + m.Sequence(sequenceID).Consume(func(sequence *Sequence) { + sequence.ReferencedMarkers(index).ForEach(func(referencedSequenceID SequenceID, referencedIndex Index) bool { futureMarkersByRank.Add(m.rankOfSequence(referencedSequenceID, rankCache), referencedSequenceID, referencedIndex) return true }) @@ -398,7 +408,19 @@ func (m *Manager) markersReferenceMarkers(laterMarkers *Markers, earlierMarkers } } - return + return result +} + +// registerReferencingMarker is an internal utility function that adds a referencing Marker to the internal data +// structure. +func (m *Manager) registerReferencingMarker(referencedMarkers *Markers, marker *Marker) { + referencedMarkers.ForEach(func(sequenceID SequenceID, index Index) bool { + m.Sequence(sequenceID).Consume(func(sequence *Sequence) { + sequence.AddReferencingMarker(index, marker) + }) + + return true + }) } // fetchSequence is an internal utility function that retrieves or creates the Sequence that represents the given @@ -429,7 +451,7 @@ func (m *Manager) fetchSequence(referencedMarkers *Markers, rank uint64, sequenc } cachedSequenceAliasMapping.Consume(func(sequenceAliasMapping *SequenceAliasMapping) { - cachedSequence = &CachedSequence{CachedObject: m.sequenceStore.Load(sequenceAliasMapping.SequenceID().Bytes())} + cachedSequence = m.Sequence(sequenceAliasMapping.SequenceID()) }) return @@ -441,7 +463,7 @@ func (m *Manager) rankOfSequence(sequenceID SequenceID, ranksCache map[SequenceI return rank } - if !(&CachedSequence{CachedObject: m.sequenceStore.Load(sequenceID.Bytes())}).Consume(func(sequence *Sequence) { + if !m.Sequence(sequenceID).Consume(func(sequence *Sequence) { ranksCache[sequenceID] = sequence.rank }) { panic(fmt.Sprintf("failed to load Sequence with %s", sequenceID)) diff --git a/packages/markers/manager_test.go b/packages/markers/manager_test.go index 79626b612b..db33491dfc 100644 --- a/packages/markers/manager_test.go +++ b/packages/markers/manager_test.go @@ -153,7 +153,7 @@ func TestManager(t *testing.T) { } } -func messageReferencesMessage(laterMessage *message, earlierMessage *message, messageDB map[string]*message) types.TriBool { +func messageReferencesMessage(laterMessage, earlierMessage *message, messageDB map[string]*message) types.TriBool { for _, parentID := range laterMessage.parents { if parentID == earlierMessage.id { return types.True diff --git a/packages/markers/marker.go b/packages/markers/marker.go index 32e4c47f75..e50e7c7c8b 100644 --- a/packages/markers/marker.go +++ b/packages/markers/marker.go @@ -2,11 +2,13 @@ package markers import ( "fmt" + "sort" "strconv" "strings" "sync" "github.com/iotaledger/hive.go/cerrors" + "github.com/iotaledger/hive.go/datastructure/thresholdmap" "github.com/iotaledger/hive.go/marshalutil" "github.com/iotaledger/hive.go/stringify" "golang.org/x/xerrors" @@ -179,7 +181,7 @@ func (m *Markers) Get(sequenceID SequenceID) (index Index, exists bool) { // Set adds a new Marker to the collection and updates the Index of an existing entry if it is higher than a possible // previously stored one. The method returns two boolean flags that indicate if an entry was updated and/or added. -func (m *Markers) Set(sequenceID SequenceID, index Index) (updated bool, added bool) { +func (m *Markers) Set(sequenceID SequenceID, index Index) (updated, added bool) { m.markersMutex.Lock() defer m.markersMutex.Unlock() @@ -212,10 +214,8 @@ func (m *Markers) Set(sequenceID SequenceID, index Index) (updated bool, added b } m.markers[sequenceID] = index - updated = true - added = true - return + return true, true } // Delete removes the Marker with the given SequenceID from the collection and returns a boolean flag that indicates if @@ -369,8 +369,6 @@ func (m *Markers) String() (humanReadableMarkers string) { return true }) - structBuilder.AddField(stringify.StructField("lowestIndex", m.LowestIndex())) - structBuilder.AddField(stringify.StructField("highestIndex", m.HighestIndex())) return structBuilder.String() } @@ -388,6 +386,319 @@ func (m *Markers) SequenceToString() (s string) { // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// +// region ReferencingMarkers /////////////////////////////////////////////////////////////////////////////////////////// + +// ReferencingMarkers is a data structure that allows to denote which Markers of child Sequences in the Sequence DAG +// reference a given Marker in a Sequence. +type ReferencingMarkers struct { + referencingIndexesBySequence markerReferences + mutex sync.RWMutex +} + +// NewReferencingMarkers is the constructor for the ReferencingMarkers. +func NewReferencingMarkers() (referencingMarkers *ReferencingMarkers) { + referencingMarkers = &ReferencingMarkers{ + referencingIndexesBySequence: make(map[SequenceID]*thresholdmap.ThresholdMap), + } + + return +} + +// ReferencingMarkersFromBytes unmarshals ReferencingMarkers from a sequence of bytes. +func ReferencingMarkersFromBytes(referencingMarkersBytes []byte) (referencingMarkers *ReferencingMarkers, consumedBytes int, err error) { + marshalUtil := marshalutil.New(referencingMarkersBytes) + if referencingMarkers, err = ReferencingMarkersFromMarshalUtil(marshalUtil); err != nil { + err = xerrors.Errorf("failed to parse ReferencingMarkers from MarshalUtil: %w", err) + return + } + consumedBytes = marshalUtil.ReadOffset() + + return +} + +// ReferencingMarkersFromMarshalUtil unmarshals ReferencingMarkers using a MarshalUtil (for easier unmarshaling). +func ReferencingMarkersFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) (referencingMarkers *ReferencingMarkers, err error) { + referencingMarkers = &ReferencingMarkers{ + referencingIndexesBySequence: make(map[SequenceID]*thresholdmap.ThresholdMap), + } + + referencingMarkers.referencingIndexesBySequence, err = markerReferencesFromMarshalUtil(marshalUtil, thresholdmap.UpperThresholdMode) + return referencingMarkers, err +} + +// Add adds a new referencing Marker to the ReferencingMarkers. +func (r *ReferencingMarkers) Add(index Index, referencingMarker *Marker) { + r.mutex.Lock() + defer r.mutex.Unlock() + + thresholdMap, thresholdMapExists := r.referencingIndexesBySequence[referencingMarker.SequenceID()] + if !thresholdMapExists { + thresholdMap = thresholdmap.New(thresholdmap.UpperThresholdMode) + r.referencingIndexesBySequence[referencingMarker.SequenceID()] = thresholdMap + } + + thresholdMap.Set(uint64(index), referencingMarker.Index()) +} + +// Get returns the Markers of child Sequences that reference the given Index. +func (r *ReferencingMarkers) Get(index Index) (referencingMarkers *Markers) { + r.mutex.RLock() + defer r.mutex.RUnlock() + + referencingMarkers = NewMarkers() + for sequenceID, thresholdMap := range r.referencingIndexesBySequence { + if referencingIndex, exists := thresholdMap.Get(uint64(index)); exists { + referencingMarkers.Set(sequenceID, referencingIndex.(Index)) + } + } + + return +} + +// Bytes returns a marshaled version of the ReferencingMarkers. +func (r *ReferencingMarkers) Bytes() (marshaledReferencingMarkers []byte) { + r.mutex.RLock() + defer r.mutex.RUnlock() + + marshalUtil := marshalutil.New() + marshalUtil.WriteUint64(uint64(len(r.referencingIndexesBySequence))) + for sequenceID, thresholdMap := range r.referencingIndexesBySequence { + marshalUtil.Write(sequenceID) + marshalUtil.WriteUint64(uint64(thresholdMap.Size())) + thresholdMap.ForEach(func(node *thresholdmap.Element) bool { + marshalUtil.WriteUint64(node.Key().(uint64)) + marshalUtil.WriteUint64(uint64(node.Value().(Index))) + + return true + }) + } + + return marshalUtil.Bytes() +} + +// String returns a human readable version of the ReferencingMarkers. +func (r *ReferencingMarkers) String() (humanReadableReferencingMarkers string) { + r.mutex.RLock() + defer r.mutex.RUnlock() + + indexes := make([]Index, 0) + referencingMarkersByReferencingIndex := make(map[Index]*Markers) + for sequenceID, thresholdMap := range r.referencingIndexesBySequence { + thresholdMap.ForEach(func(node *thresholdmap.Element) bool { + index := Index(node.Key().(uint64)) + referencingIndex := node.Value().(Index) + if _, exists := referencingMarkersByReferencingIndex[index]; !exists { + referencingMarkersByReferencingIndex[index] = NewMarkers() + + indexes = append(indexes, index) + } + + referencingMarkersByReferencingIndex[index].Set(sequenceID, referencingIndex) + + return true + }) + } + sort.Slice(indexes, func(i, j int) bool { + return indexes[i] < indexes[j] + }) + + for i, index := range indexes { + for j := i + 1; j < len(indexes); j++ { + referencingMarkersByReferencingIndex[indexes[j]].ForEach(func(referencingSequenceID SequenceID, referencingIndex Index) bool { + if _, exists := referencingMarkersByReferencingIndex[index].Get(referencingSequenceID); exists { + return true + } + + referencingMarkersByReferencingIndex[index].Set(referencingSequenceID, referencingIndex) + + return true + }) + } + } + + thresholdStart := "0" + referencingMarkers := stringify.StructBuilder("ReferencingMarkers") + for _, index := range indexes { + thresholdEnd := strconv.FormatUint(uint64(index), 10) + + if thresholdStart == thresholdEnd { + referencingMarkers.AddField(stringify.StructField("Index("+thresholdStart+")", referencingMarkersByReferencingIndex[index])) + } else { + referencingMarkers.AddField(stringify.StructField("Index("+thresholdStart+" ... "+thresholdEnd+")", referencingMarkersByReferencingIndex[index])) + } + + thresholdStart = strconv.FormatUint(uint64(index)+1, 10) + } + + return referencingMarkers.String() +} + +// endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// region ReferencedMarkers //////////////////////////////////////////////////////////////////////////////////////////// + +// ReferencedMarkers is a data structure that allows to denote which Marker of a Sequence references which other Markers +// of its parent Sequences in the Sequence DAG. +type ReferencedMarkers struct { + referencedIndexesBySequence markerReferences + mutex sync.RWMutex +} + +// NewReferencedMarkers is the constructor for the ReferencedMarkers. +func NewReferencedMarkers(markers *Markers) (referencedMarkers *ReferencedMarkers) { + referencedMarkers = &ReferencedMarkers{ + referencedIndexesBySequence: make(map[SequenceID]*thresholdmap.ThresholdMap), + } + + initialSequenceIndex := markers.HighestIndex() + 1 + markers.ForEach(func(sequenceID SequenceID, index Index) bool { + thresholdMap := thresholdmap.New(thresholdmap.LowerThresholdMode) + thresholdMap.Set(uint64(initialSequenceIndex), index) + + referencedMarkers.referencedIndexesBySequence[sequenceID] = thresholdMap + + return true + }) + + return +} + +// ReferencedMarkersFromBytes unmarshals ReferencedMarkers from a sequence of bytes. +func ReferencedMarkersFromBytes(parentReferencesBytes []byte) (referencedMarkers *ReferencedMarkers, consumedBytes int, err error) { + marshalUtil := marshalutil.New(parentReferencesBytes) + if referencedMarkers, err = ReferencedMarkersFromMarshalUtil(marshalUtil); err != nil { + err = xerrors.Errorf("failed to parse ReferencedMarkers from MarshalUtil: %w", err) + return + } + consumedBytes = marshalUtil.ReadOffset() + + return +} + +// ReferencedMarkersFromMarshalUtil unmarshals ReferencedMarkers using a MarshalUtil (for easier unmarshaling). +func ReferencedMarkersFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) (referencedMarkers *ReferencedMarkers, err error) { + referencedMarkers = &ReferencedMarkers{ + referencedIndexesBySequence: make(map[SequenceID]*thresholdmap.ThresholdMap), + } + + referencedMarkers.referencedIndexesBySequence, err = markerReferencesFromMarshalUtil(marshalUtil, thresholdmap.LowerThresholdMode) + return referencedMarkers, err +} + +// Add adds new referenced Markers to the ReferencedMarkers. +func (r *ReferencedMarkers) Add(index Index, referencedMarkers *Markers) { + r.mutex.Lock() + defer r.mutex.Unlock() + + referencedMarkers.ForEach(func(referencedSequenceID SequenceID, referencedIndex Index) bool { + thresholdMap, exists := r.referencedIndexesBySequence[referencedSequenceID] + if !exists { + thresholdMap = thresholdmap.New(thresholdmap.LowerThresholdMode) + r.referencedIndexesBySequence[referencedSequenceID] = thresholdMap + } + + thresholdMap.Set(uint64(index), referencedIndex) + + return true + }) +} + +// Get returns the Markers of parent Sequences that were referenced by the given Index. +func (r *ReferencedMarkers) Get(index Index) (referencedMarkers *Markers) { + r.mutex.RLock() + defer r.mutex.RUnlock() + + referencedMarkers = NewMarkers() + for sequenceID, thresholdMap := range r.referencedIndexesBySequence { + if referencedIndex, exists := thresholdMap.Get(uint64(index)); exists { + referencedMarkers.Set(sequenceID, referencedIndex.(Index)) + } + } + + return +} + +// Bytes returns a marshaled version of the ReferencedMarkers. +func (r *ReferencedMarkers) Bytes() (marshaledReferencedMarkers []byte) { + r.mutex.RLock() + defer r.mutex.RUnlock() + + marshalUtil := marshalutil.New() + marshalUtil.WriteUint64(uint64(len(r.referencedIndexesBySequence))) + for sequenceID, thresholdMap := range r.referencedIndexesBySequence { + marshalUtil.Write(sequenceID) + marshalUtil.WriteUint64(uint64(thresholdMap.Size())) + thresholdMap.ForEach(func(node *thresholdmap.Element) bool { + marshalUtil.WriteUint64(node.Key().(uint64)) + marshalUtil.WriteUint64(uint64(node.Value().(Index))) + + return true + }) + } + + return marshalUtil.Bytes() +} + +// String returns a human readable version of the ReferencedMarkers. +func (r *ReferencedMarkers) String() (humanReadableReferencedMarkers string) { + r.mutex.RLock() + defer r.mutex.RUnlock() + + indexes := make([]Index, 0) + referencedMarkersByReferencingIndex := make(map[Index]*Markers) + for sequenceID, thresholdMap := range r.referencedIndexesBySequence { + thresholdMap.ForEach(func(node *thresholdmap.Element) bool { + index := Index(node.Key().(uint64)) + referencedIndex := Index(node.Value().(uint64)) + if _, exists := referencedMarkersByReferencingIndex[index]; !exists { + referencedMarkersByReferencingIndex[index] = NewMarkers() + + indexes = append(indexes, index) + } + + referencedMarkersByReferencingIndex[index].Set(sequenceID, referencedIndex) + + return true + }) + } + sort.Slice(indexes, func(i, j int) bool { + return indexes[i] < indexes[j] + }) + + for i, referencedIndex := range indexes { + for j := 0; j < i; j++ { + referencedMarkersByReferencingIndex[indexes[j]].ForEach(func(sequenceID SequenceID, index Index) bool { + if _, exists := referencedMarkersByReferencingIndex[referencedIndex].Get(sequenceID); exists { + return true + } + + referencedMarkersByReferencingIndex[referencedIndex].Set(sequenceID, index) + + return true + }) + } + } + + referencedMarkers := stringify.StructBuilder("ReferencedMarkers") + for i, index := range indexes { + thresholdStart := strconv.FormatUint(uint64(index), 10) + thresholdEnd := "INF" + if len(indexes) > i+1 { + thresholdEnd = strconv.FormatUint(uint64(indexes[i+1])-1, 10) + } + + if thresholdStart == thresholdEnd { + referencedMarkers.AddField(stringify.StructField("Index("+thresholdStart+")", referencedMarkersByReferencingIndex[index])) + } else { + referencedMarkers.AddField(stringify.StructField("Index("+thresholdStart+" ... "+thresholdEnd+")", referencedMarkersByReferencingIndex[index])) + } + } + + return referencedMarkers.String() +} + +// endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// + // region markersByRank //////////////////////////////////////////////////////////////////////////////////////////////// // markersByRank is a collection of Markers that groups them by the rank of their Sequence. @@ -411,7 +722,7 @@ func newMarkersByRank() (newMarkersByRank *markersByRank) { // Add adds a new Marker to the collection and returns two boolean flags that indicate if a Marker was added and/or // updated. -func (m *markersByRank) Add(rank uint64, sequenceID SequenceID, index Index) (updated bool, added bool) { +func (m *markersByRank) Add(rank uint64, sequenceID SequenceID, index Index) (updated, added bool) { m.markersByRankMutex.Lock() defer m.markersByRankMutex.Unlock() @@ -516,7 +827,7 @@ func (m *markersByRank) Delete(rank uint64, sequenceID SequenceID) (deleted bool } } - return + return deleted } // LowestRank returns the lowest rank that has Markers. @@ -581,3 +892,73 @@ func (m *markersByRank) String() (humanReadableMarkersByRank string) { } // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// region markerReferences ///////////////////////////////////////////////////////////////////////////////////////////// + +// markerReferences represents a type that encodes the reference between Markers of different Sequences. +type markerReferences map[SequenceID]*thresholdmap.ThresholdMap + +// markerReferencesFromMarshalUtil unmarshals markerReferences using a MarshalUtil (for easier unmarshaling). +func markerReferencesFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil, mode thresholdmap.Mode) (referenceMarkers markerReferences, err error) { + referenceMarkers = make(map[SequenceID]*thresholdmap.ThresholdMap) + + sequenceCount, err := marshalUtil.ReadUint64() + if err != nil { + err = xerrors.Errorf("failed to parse Sequence count (%v): %w", err, cerrors.ErrParseBytesFailed) + return + } + for i := uint64(0); i < sequenceCount; i++ { + sequenceID, sequenceIDErr := SequenceIDFromMarshalUtil(marshalUtil) + if sequenceIDErr != nil { + err = xerrors.Errorf("failed to parse SequenceID from MarshalUtil: %w", sequenceIDErr) + return + } + + referenceCount, referenceCountErr := marshalUtil.ReadUint64() + if referenceCountErr != nil { + err = xerrors.Errorf("failed to parse reference count (%v): %w", referenceCountErr, cerrors.ErrParseBytesFailed) + return + } + thresholdMap := thresholdmap.New(mode) + switch mode { + case thresholdmap.LowerThresholdMode: + for j := uint64(0); j < referenceCount; j++ { + referencingIndex, referencingIndexErr := marshalUtil.ReadUint64() + if referencingIndexErr != nil { + err = xerrors.Errorf("failed to read referencing Index (%v): %w", referencingIndexErr, cerrors.ErrParseBytesFailed) + return + } + + referencedIndex, referencedIndexErr := marshalUtil.ReadUint64() + if referencedIndexErr != nil { + err = xerrors.Errorf("failed to read referenced Index (%v): %w", referencedIndexErr, cerrors.ErrParseBytesFailed) + return + } + + thresholdMap.Set(referencingIndex, Index(referencedIndex)) + } + case thresholdmap.UpperThresholdMode: + for j := uint64(0); j < referenceCount; j++ { + referencedIndex, referencedIndexErr := marshalUtil.ReadUint64() + if referencedIndexErr != nil { + err = xerrors.Errorf("failed to read referenced Index (%v): %w", referencedIndexErr, cerrors.ErrParseBytesFailed) + return + } + + referencingIndex, referencingIndexErr := marshalUtil.ReadUint64() + if referencingIndexErr != nil { + err = xerrors.Errorf("failed to read referencing Index (%v): %w", referencingIndexErr, cerrors.ErrParseBytesFailed) + return + } + + thresholdMap.Set(referencedIndex, Index(referencingIndex)) + } + } + + referenceMarkers[sequenceID] = thresholdMap + } + + return referenceMarkers, err +} + +// endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/packages/markers/marker_test.go b/packages/markers/marker_test.go index 4d2d7b6ba4..8b42940647 100644 --- a/packages/markers/marker_test.go +++ b/packages/markers/marker_test.go @@ -1,6 +1,7 @@ package markers import ( + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -77,3 +78,158 @@ func TestMarkersByRank(t *testing.T) { assert.Equal(t, uint64(0), markersByRank.HighestRank()) assert.Equal(t, uint64(0), markersByRank.Size()) } + +func TestReferencedMarkers(t *testing.T) { + referencedMarkers := NewReferencedMarkers(NewMarkers( + &Marker{1, 3}, + &Marker{2, 7}, + )) + + referencedMarkers.Add(8, NewMarkers( + &Marker{4, 9}, + )) + + referencedMarkers.Add(9, NewMarkers( + &Marker{1, 5}, + &Marker{2, 8}, + )) + + referencedMarkers.Add(12, NewMarkers( + &Marker{1, 7}, + &Marker{2, 10}, + )) + + assert.Equal(t, NewMarkers( + &Marker{1, 3}, + &Marker{2, 7}, + &Marker{4, 9}, + ), referencedMarkers.Get(8)) + + assert.Equal(t, NewMarkers( + &Marker{1, 5}, + &Marker{2, 8}, + &Marker{4, 9}, + ), referencedMarkers.Get(10)) + + assert.Equal(t, NewMarkers( + &Marker{1, 5}, + &Marker{2, 8}, + &Marker{4, 9}, + ), referencedMarkers.Get(11)) + + assert.Equal(t, NewMarkers( + &Marker{1, 7}, + &Marker{2, 10}, + &Marker{4, 9}, + ), referencedMarkers.Get(12)) + + marshaledReferencedMarkers := referencedMarkers.Bytes() + unmarshaledReferencedMarkers, consumedBytes, err := ReferencedMarkersFromBytes(marshaledReferencedMarkers) + require.NoError(t, err) + assert.Equal(t, len(marshaledReferencedMarkers), consumedBytes) + + assert.Equal(t, NewMarkers( + &Marker{1, 3}, + &Marker{2, 7}, + &Marker{4, 9}, + ), unmarshaledReferencedMarkers.Get(8)) + + assert.Equal(t, NewMarkers( + &Marker{1, 5}, + &Marker{2, 8}, + &Marker{4, 9}, + ), unmarshaledReferencedMarkers.Get(10)) + + assert.Equal(t, NewMarkers( + &Marker{1, 5}, + &Marker{2, 8}, + &Marker{4, 9}, + ), unmarshaledReferencedMarkers.Get(11)) + + assert.Equal(t, NewMarkers( + &Marker{1, 7}, + &Marker{2, 10}, + &Marker{4, 9}, + ), unmarshaledReferencedMarkers.Get(12)) + + fmt.Println(unmarshaledReferencedMarkers) +} + +func TestReferencedMarkersPanic(t *testing.T) { + referencedMarkers := NewReferencedMarkers(NewMarkers( + &Marker{1, 3}, + )) + + referencedMarkers.Add(7, NewMarkers( + &Marker{4, 9}, + )) + + assert.Equal(t, NewMarkers( + &Marker{1, 3}, + ), referencedMarkers.Get(4)) +} + +func TestReferencingMarkers(t *testing.T) { + referencingMarkers := NewReferencingMarkers() + referencingMarkers.Add(9, &Marker{1, 5}) + referencingMarkers.Add(10, &Marker{3, 4}) + referencingMarkers.Add(12, &Marker{1, 7}) + referencingMarkers.Add(12, &Marker{2, 10}) + + assert.Equal(t, NewMarkers( + &Marker{1, 5}, + &Marker{2, 10}, + &Marker{3, 4}, + ), referencingMarkers.Get(8)) + + assert.Equal(t, NewMarkers( + &Marker{1, 5}, + &Marker{2, 10}, + &Marker{3, 4}, + ), referencingMarkers.Get(9)) + + assert.Equal(t, NewMarkers( + &Marker{1, 7}, + &Marker{2, 10}, + &Marker{3, 4}, + ), referencingMarkers.Get(10)) + + assert.Equal(t, NewMarkers( + &Marker{1, 7}, + &Marker{2, 10}, + ), referencingMarkers.Get(12)) + + assert.Equal(t, NewMarkers(), referencingMarkers.Get(13)) + + marshaledReferencingMarkers := referencingMarkers.Bytes() + unmarshaledReferencingMarkers, consumedBytes, err := ReferencingMarkersFromBytes(marshaledReferencingMarkers) + require.NoError(t, err) + assert.Equal(t, len(marshaledReferencingMarkers), consumedBytes) + + assert.Equal(t, NewMarkers( + &Marker{1, 5}, + &Marker{2, 10}, + &Marker{3, 4}, + ), unmarshaledReferencingMarkers.Get(8)) + + assert.Equal(t, NewMarkers( + &Marker{1, 5}, + &Marker{2, 10}, + &Marker{3, 4}, + ), unmarshaledReferencingMarkers.Get(9)) + + assert.Equal(t, NewMarkers( + &Marker{1, 7}, + &Marker{2, 10}, + &Marker{3, 4}, + ), unmarshaledReferencingMarkers.Get(10)) + + assert.Equal(t, NewMarkers( + &Marker{1, 7}, + &Marker{2, 10}, + ), unmarshaledReferencingMarkers.Get(12)) + + assert.Equal(t, NewMarkers(), unmarshaledReferencingMarkers.Get(13)) + + fmt.Println(unmarshaledReferencingMarkers) +} diff --git a/packages/markers/objecstorage.go b/packages/markers/objecstorage.go index acc56760e4..956a1e12e5 100644 --- a/packages/markers/objecstorage.go +++ b/packages/markers/objecstorage.go @@ -12,9 +12,12 @@ const ( // PrefixSequenceAliasMapping defines the storage prefix for the SequenceAliasMapping object storage. PrefixSequenceAliasMapping + + // CacheTime defines how long objects are cached in the object storage. + CacheTime = 60 * time.Second ) // objectStorageOptions contains a list of default settings for the object storage. var objectStorageOptions = []objectstorage.Option{ - objectstorage.CacheTime(60 * time.Second), + objectstorage.CacheTime(CacheTime), } diff --git a/packages/markers/parent_references.go b/packages/markers/parent_references.go deleted file mode 100644 index f0b333f6d6..0000000000 --- a/packages/markers/parent_references.go +++ /dev/null @@ -1,251 +0,0 @@ -package markers - -import ( - "fmt" - "sort" - "strconv" - "sync" - - "github.com/iotaledger/hive.go/cerrors" - "github.com/iotaledger/hive.go/datastructure/thresholdmap" - "github.com/iotaledger/hive.go/marshalutil" - "github.com/iotaledger/hive.go/stringify" - "golang.org/x/xerrors" -) - -// region ParentReferences ///////////////////////////////////////////////////////////////////////////////////////////// - -// ParentReferences models the relationship between Sequences by providing a way to encode which Marker references which -// other Markers of other Sequences. -type ParentReferences struct { - parentSequences SequenceIDs - references map[SequenceID]*thresholdmap.ThresholdMap - referencesMutex sync.RWMutex -} - -// NewParentReferences creates a new set of ParentReferences. -func NewParentReferences(referencedMarkers *Markers) (newParentReferences *ParentReferences) { - newParentReferences = &ParentReferences{ - parentSequences: referencedMarkers.SequenceIDs(), - references: make(map[SequenceID]*thresholdmap.ThresholdMap), - } - - initialSequenceIndex := referencedMarkers.HighestIndex() + 1 - referencedMarkers.ForEach(func(sequenceID SequenceID, index Index) bool { - thresholdMap := thresholdmap.New(thresholdmap.LowerThresholdMode) - - thresholdMap.Set(uint64(initialSequenceIndex), uint64(index)) - newParentReferences.references[sequenceID] = thresholdMap - - return true - }) - - return -} - -// ParentReferencesFromBytes unmarshals a ParentReferences from a sequence of bytes. -func ParentReferencesFromBytes(parentReferencesBytes []byte) (parentReferences *ParentReferences, consumedBytes int, err error) { - marshalUtil := marshalutil.New(parentReferencesBytes) - if parentReferences, err = ParentReferencesFromMarshalUtil(marshalUtil); err != nil { - err = xerrors.Errorf("failed to parse Markers from MarshalUtil: %w", err) - return - } - consumedBytes = marshalUtil.ReadOffset() - - return -} - -// ParentReferencesFromMarshalUtil unmarshals a ParentReferences object using a MarshalUtil (for easier unmarshaling). -func ParentReferencesFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) (parentReferences *ParentReferences, err error) { - parentReferences = &ParentReferences{ - references: make(map[SequenceID]*thresholdmap.ThresholdMap), - } - if parentReferences.parentSequences, err = SequenceIDsFromMarshalUtil(marshalUtil); err != nil { - err = xerrors.Errorf("failed to parse parent SequenceIDs from MarshalUtil: %w", err) - return - } - - sequenceCount, err := marshalUtil.ReadUint64() - if err != nil { - err = xerrors.Errorf("failed to parse Sequence count (%v): %w", err, cerrors.ErrParseBytesFailed) - return - } - for i := uint64(0); i < sequenceCount; i++ { - sequenceID, sequenceIDErr := SequenceIDFromMarshalUtil(marshalUtil) - if sequenceIDErr != nil { - err = xerrors.Errorf("failed to parse SequenceID from MarshalUtil: %w", sequenceIDErr) - return - } - - referenceCount, referenceCountErr := marshalUtil.ReadUint64() - if referenceCountErr != nil { - err = xerrors.Errorf("failed to parse reference count (%v): %w", referenceCountErr, cerrors.ErrParseBytesFailed) - return - } - thresholdMap := thresholdmap.New(thresholdmap.LowerThresholdMode) - for j := uint64(0); j < referenceCount; j++ { - referencingIndex, referencingIndexErr := marshalUtil.ReadUint64() - if referencingIndexErr != nil { - err = xerrors.Errorf("failed to read referencing Index (%v): %w", referencingIndexErr, cerrors.ErrParseBytesFailed) - return - } - - referencedIndex, referencedIndexErr := marshalUtil.ReadUint64() - if referencedIndexErr != nil { - err = xerrors.Errorf("failed to read referenced Index (%v): %w", referencedIndexErr, cerrors.ErrParseBytesFailed) - return - } - - thresholdMap.Set(referencingIndex, referencedIndex) - } - parentReferences.references[sequenceID] = thresholdMap - } - - return -} - -// ParentSequences returns the parent Sequences of the ParentReferences. -func (p *ParentReferences) ParentSequences() (parentSequences SequenceIDs) { - return p.parentSequences -} - -// AddReferences adds referenced Markers to the ParentReferences. -func (p *ParentReferences) AddReferences(referencedMarkers *Markers, referencingIndex Index) { - p.referencesMutex.Lock() - defer p.referencesMutex.Unlock() - - referencedMarkers.ForEach(func(referencedSequenceID SequenceID, referencedIndex Index) bool { - thresholdMap, exists := p.references[referencedSequenceID] - if !exists { - thresholdMap = thresholdmap.New(thresholdmap.LowerThresholdMode) - p.references[referencedSequenceID] = thresholdMap - } - - thresholdMap.Set(uint64(referencingIndex), uint64(referencedIndex)) - - return true - }) -} - -// HighestReferencedMarker returns the referenced Marker with the highest Index of a given Sequence. -func (p *ParentReferences) HighestReferencedMarker(sequenceID SequenceID, referencingIndex Index) (highestReferencedMarker *Marker) { - p.referencesMutex.RLock() - defer p.referencesMutex.RUnlock() - - thresholdMap, exists := p.references[sequenceID] - if !exists { - panic(fmt.Sprintf("Sequence with %s does not exist in ParentReferences", sequenceID)) - } - - highestReferencedIndex, exists := thresholdMap.Get(uint64(referencingIndex)) - if !exists { - panic(fmt.Sprintf("%s references an unknown Index", referencingIndex)) - } - - return &Marker{ - sequenceID: sequenceID, - index: Index(highestReferencedIndex.(uint64)), - } -} - -// HighestReferencedMarkers returns a collection of Markers that were referenced by the given Index. -func (p *ParentReferences) HighestReferencedMarkers(index Index) (highestReferencedMarkers *Markers) { - p.referencesMutex.RLock() - defer p.referencesMutex.RUnlock() - - highestReferencedMarkers = NewMarkers() - for sequenceID, thresholdMap := range p.references { - referencedIndex, exists := thresholdMap.Get(uint64(index)) - if !exists { - panic(fmt.Sprintf("%s is smaller than the lowest known Index", index)) - } - highestReferencedMarkers.Set(sequenceID, Index(referencedIndex.(uint64))) - } - - return -} - -// SequenceIDs returns the SequenceIDs of all referenced Sequences (and not just the parents in the Sequence DAG). -func (p *ParentReferences) SequenceIDs() (sequenceIDs SequenceIDs) { - p.referencesMutex.RLock() - defer p.referencesMutex.RUnlock() - - sequenceIDsSlice := make([]SequenceID, 0, len(p.references)) - for sequenceID := range p.references { - sequenceIDsSlice = append(sequenceIDsSlice, sequenceID) - } - - return NewSequenceIDs(sequenceIDsSlice...) -} - -// Bytes returns a marshaled version of the ParentReferences. -func (p *ParentReferences) Bytes() (marshaledParentReferences []byte) { - p.referencesMutex.RLock() - defer p.referencesMutex.RUnlock() - - marshalUtil := marshalutil.New() - marshalUtil.Write(p.parentSequences) - marshalUtil.WriteUint64(uint64(len(p.references))) - for sequenceID, thresholdMap := range p.references { - marshalUtil.Write(sequenceID) - marshalUtil.WriteUint64(uint64(thresholdMap.Size())) - thresholdMap.ForEach(func(node *thresholdmap.Element) bool { - marshalUtil.WriteUint64(node.Key().(uint64)) - marshalUtil.WriteUint64(node.Value().(uint64)) - - return true - }) - } - - return marshalUtil.Bytes() -} - -// String returns a human readable version of the ParentReferences. -func (p *ParentReferences) String() (humanReadableParentReferences string) { - p.referencesMutex.RLock() - defer p.referencesMutex.RUnlock() - - referencingIndexes := make([]Index, 0) - referencedMarkersByReferencingIndex := make(map[Index][]*Marker) - for sequenceID, thresholdMap := range p.references { - thresholdMap.ForEach(func(node *thresholdmap.Element) bool { - referencingIndex := Index(node.Key().(uint64)) - referencedIndex := Index(node.Value().(uint64)) - if _, exists := referencedMarkersByReferencingIndex[referencingIndex]; !exists { - referencedMarkersByReferencingIndex[referencingIndex] = make([]*Marker, 0) - - referencingIndexes = append(referencingIndexes, referencingIndex) - } - - referencedMarkersByReferencingIndex[referencingIndex] = append(referencedMarkersByReferencingIndex[referencingIndex], &Marker{sequenceID, referencedIndex}) - - return true - }) - } - sort.Slice(referencingIndexes, func(i, j int) bool { - return referencingIndexes[i] < referencingIndexes[j] - }) - - referencedMarkers := stringify.StructBuilder("ReferencedMarkers") - for i, referencingIndex := range referencingIndexes { - thresholdStart := strconv.FormatUint(uint64(referencingIndex), 10) - thresholdEnd := "INF" - if len(referencingIndexes) > i+1 { - thresholdEnd = strconv.FormatUint(uint64(referencingIndexes[i+1])-1, 10) - } - - if thresholdStart == thresholdEnd { - referencedMarkers.AddField(stringify.StructField("Index("+thresholdStart+")", referencedMarkersByReferencingIndex[referencingIndex])) - } else { - referencedMarkers.AddField(stringify.StructField("Index("+thresholdStart+" ... "+thresholdEnd+")", referencedMarkersByReferencingIndex[referencingIndex])) - } - } - - return stringify.Struct("ParentReferences", - stringify.StructField("parentSequences", p.ParentSequences()), - stringify.StructField("referencedSequenceIDs", p.SequenceIDs()), - stringify.StructField("referencedMarkers", referencedMarkers), - ) -} - -// endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/packages/markers/parent_references_test.go b/packages/markers/parent_references_test.go deleted file mode 100644 index 70174448b0..0000000000 --- a/packages/markers/parent_references_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package markers - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestParentReferences(t *testing.T) { - parentReferences := NewParentReferences(NewMarkers( - &Marker{1, 3}, - &Marker{2, 7}, - )) - assert.Equal(t, NewSequenceIDs(1, 2), parentReferences.ParentSequences()) - - parentReferences.AddReferences(NewMarkers( - &Marker{1, 5}, - &Marker{2, 8}, - ), 9) - - parentReferences.AddReferences(NewMarkers( - &Marker{1, 7}, - &Marker{2, 10}, - ), 12) - - assert.Equal(t, NewSequenceIDs(1, 2), parentReferences.SequenceIDs()) - assert.Equal(t, &Marker{1, 3}, parentReferences.HighestReferencedMarker(1, 8)) - assert.Equal(t, &Marker{1, 5}, parentReferences.HighestReferencedMarker(1, 10)) - assert.Equal(t, &Marker{1, 5}, parentReferences.HighestReferencedMarker(1, 11)) - assert.Equal(t, &Marker{1, 7}, parentReferences.HighestReferencedMarker(1, 12)) - - marshaledParentReferences := parentReferences.Bytes() - unmarshalParentReferences, consumedBytes, err := ParentReferencesFromBytes(marshaledParentReferences) - require.NoError(t, err) - assert.Equal(t, len(marshaledParentReferences), consumedBytes) - - assert.Equal(t, NewSequenceIDs(1, 2), unmarshalParentReferences.SequenceIDs()) - assert.Equal(t, &Marker{1, 3}, unmarshalParentReferences.HighestReferencedMarker(1, 8)) - assert.Equal(t, &Marker{1, 5}, unmarshalParentReferences.HighestReferencedMarker(1, 10)) - assert.Equal(t, &Marker{1, 5}, unmarshalParentReferences.HighestReferencedMarker(1, 11)) - assert.Equal(t, &Marker{1, 7}, unmarshalParentReferences.HighestReferencedMarker(1, 12)) - - fmt.Println(unmarshalParentReferences) -} diff --git a/packages/markers/sequence.go b/packages/markers/sequence.go index 380abf396e..8b6241c23f 100644 --- a/packages/markers/sequence.go +++ b/packages/markers/sequence.go @@ -20,12 +20,13 @@ import ( // Sequence represents a set of ever increasing Indexes that are encapsulating a certain part of the DAG. type Sequence struct { - id SequenceID - parentReferences *ParentReferences - rank uint64 - lowestIndex Index - highestIndex Index - highestIndexMutex sync.RWMutex + id SequenceID + referencedMarkers *ReferencedMarkers + referencingMarkers *ReferencingMarkers + rank uint64 + lowestIndex Index + highestIndex Index + highestIndexMutex sync.RWMutex objectstorage.StorableObjectFlags } @@ -35,11 +36,12 @@ func NewSequence(id SequenceID, referencedMarkers *Markers, rank uint64) *Sequen initialIndex := referencedMarkers.HighestIndex() + 1 return &Sequence{ - id: id, - parentReferences: NewParentReferences(referencedMarkers), - rank: rank, - lowestIndex: initialIndex, - highestIndex: initialIndex, + id: id, + referencedMarkers: NewReferencedMarkers(referencedMarkers), + referencingMarkers: NewReferencingMarkers(), + rank: rank, + lowestIndex: initialIndex, + highestIndex: initialIndex, } } @@ -62,8 +64,12 @@ func SequenceFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) (sequence *Se err = xerrors.Errorf("failed to parse SequenceID from MarshalUtil: %w", err) return } - if sequence.parentReferences, err = ParentReferencesFromMarshalUtil(marshalUtil); err != nil { - err = xerrors.Errorf("failed to parse ParentReferences from MarshalUtil: %w", err) + if sequence.referencedMarkers, err = ReferencedMarkersFromMarshalUtil(marshalUtil); err != nil { + err = xerrors.Errorf("failed to parse ReferencedMarkers from MarshalUtil: %w", err) + return + } + if sequence.referencingMarkers, err = ReferencingMarkersFromMarshalUtil(marshalUtil); err != nil { + err = xerrors.Errorf("failed to parse ReferencingMarkers from MarshalUtil: %w", err) return } if sequence.rank, err = marshalUtil.ReadUint64(); err != nil { @@ -83,7 +89,7 @@ func SequenceFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) (sequence *Se } // SequenceFromObjectStorage restores an Sequence that was stored in the object storage. -func SequenceFromObjectStorage(key []byte, data []byte) (sequence objectstorage.StorableObject, err error) { +func SequenceFromObjectStorage(key, data []byte) (sequence objectstorage.StorableObject, err error) { if sequence, _, err = SequenceFromBytes(byteutils.ConcatBytes(key, data)); err != nil { err = xerrors.Errorf("failed to parse Sequence from bytes: %w", err) return @@ -97,14 +103,14 @@ func (s *Sequence) ID() SequenceID { return s.id } -// ParentSequences returns the SequenceIDs of the parent Sequences in the Sequence DAG. -func (s *Sequence) ParentSequences() SequenceIDs { - return s.parentReferences.SequenceIDs() +// ReferencedMarkers returns a collection of Markers that were referenced by the given Index. +func (s *Sequence) ReferencedMarkers(index Index) *Markers { + return s.referencedMarkers.Get(index) } -// HighestReferencedParentMarkers returns a collection of Markers that were referenced by the given Index. -func (s *Sequence) HighestReferencedParentMarkers(index Index) *Markers { - return s.parentReferences.HighestReferencedMarkers(index) +// ReferencingMarkers returns a collection of Markers that reference the given Index. +func (s *Sequence) ReferencingMarkers(index Index) *Markers { + return s.referencingMarkers.Get(index) } // Rank returns the rank of the Sequence (maximum distance from the root of the Sequence DAG). @@ -143,7 +149,7 @@ func (s *Sequence) IncreaseHighestIndex(referencedMarkers *Markers) (index Index if referencedMarkers.Size() > 1 { referencedMarkers.Delete(s.id) - s.parentReferences.AddReferences(referencedMarkers, s.highestIndex) + s.referencedMarkers.Add(s.highestIndex, referencedMarkers) } s.SetModified() @@ -153,6 +159,13 @@ func (s *Sequence) IncreaseHighestIndex(referencedMarkers *Markers) (index Index return } +// AddReferencingMarker register a Marker that referenced the given Index of this Sequence. +func (s *Sequence) AddReferencingMarker(index Index, referencingMarker *Marker) { + s.referencingMarkers.Add(index, referencingMarker) + + s.SetModified() +} + // String returns a human readable version of the Sequence. func (s *Sequence) String() string { return stringify.Struct("Sequence", @@ -183,7 +196,8 @@ func (s *Sequence) ObjectStorageKey() []byte { // a key in the object storage. func (s *Sequence) ObjectStorageValue() []byte { return marshalutil.New(). - Write(s.parentReferences). + Write(s.referencedMarkers). + Write(s.referencingMarkers). WriteUint64(s.rank). Write(s.lowestIndex). Write(s.HighestIndex()). @@ -353,11 +367,14 @@ func (s SequenceIDs) Bytes() (marshaledSequenceIDs []byte) { // String returns a human readable version of the SequenceIDs. func (s SequenceIDs) String() (humanReadableSequenceIDs string) { result := "SequenceIDs(" + firstItem := true for sequenceID := range s { - if len(result) != 12 { + if !firstItem { result += ", " } result += strconv.FormatUint(uint64(sequenceID), 10) + + firstItem = false } result += ")" @@ -481,7 +498,7 @@ func SequenceAliasMappingFromMarshalUtil(marshalUtil *marshalutil.MarshalUtil) ( } // SequenceAliasMappingFromObjectStorage restores a SequenceAlias that was stored in the object storage. -func SequenceAliasMappingFromObjectStorage(key []byte, data []byte) (mapping objectstorage.StorableObject, err error) { +func SequenceAliasMappingFromObjectStorage(key, data []byte) (mapping objectstorage.StorableObject, err error) { if mapping, _, err = SequenceAliasMappingFromBytes(byteutils.ConcatBytes(key, data)); err != nil { err = xerrors.Errorf("failed to parse SequenceAliasMapping from bytes: %w", err) return diff --git a/packages/markers/sequence_test.go b/packages/markers/sequence_test.go index c44a8538a4..e4bbf60f43 100644 --- a/packages/markers/sequence_test.go +++ b/packages/markers/sequence_test.go @@ -14,7 +14,6 @@ func TestSequence(t *testing.T) { ), 7) assert.Equal(t, SequenceID(1337), sequence.ID()) - assert.Equal(t, NewSequenceIDs(1, 2), sequence.ParentSequences()) assert.Equal(t, uint64(7), sequence.Rank()) assert.Equal(t, Index(7), sequence.HighestIndex()) @@ -25,5 +24,4 @@ func TestSequence(t *testing.T) { assert.Equal(t, sequence.ID(), unmarshaledSequence.ID()) assert.Equal(t, sequence.Rank(), unmarshaledSequence.Rank()) assert.Equal(t, sequence.HighestIndex(), unmarshaledSequence.HighestIndex()) - assert.Equal(t, sequence.ParentSequences(), unmarshaledSequence.ParentSequences()) } diff --git a/packages/tangle/errors.go b/packages/tangle/errors.go index 2601dc42e7..93b869a8ca 100644 --- a/packages/tangle/errors.go +++ b/packages/tangle/errors.go @@ -2,5 +2,9 @@ package tangle import "errors" -// ErrNotSynced is triggered when somebody tries to issue a Payload before the Tangle is fully synced. -var ErrNotSynced = errors.New("tangle not synced") +var ( + // ErrNotSynced is triggered when somebody tries to issue a Payload before the Tangle is fully synced. + ErrNotSynced = errors.New("tangle not synced") + // ErrInvalidInputs is returned when one or more inputs are rejected or non-monotonically liked. + ErrInvalidInputs = errors.New("one or more inputs are rejected or non-monotonically liked") +) diff --git a/packages/tangle/storage.go b/packages/tangle/storage.go index 1fa34116b7..4b9828c5a3 100644 --- a/packages/tangle/storage.go +++ b/packages/tangle/storage.go @@ -91,7 +91,6 @@ func (s *Storage) Setup() { s.tangle.Parser.Events.MessageParsed.Attach(events.NewClosure(func(msgParsedEvent *MessageParsedEvent) { s.tangle.Storage.StoreMessage(msgParsedEvent.Message) })) - s.tangle.MessageFactory.Events.MessageConstructed.Attach(events.NewClosure(s.StoreMessage)) } // StoreMessage stores a new message to the message store. diff --git a/packages/tangle/tangle.go b/packages/tangle/tangle.go index 2d838d3987..7ea6762aa7 100644 --- a/packages/tangle/tangle.go +++ b/packages/tangle/tangle.go @@ -1,6 +1,7 @@ package tangle import ( + "strings" "sync" "github.com/iotaledger/hive.go/autopeering/peer" @@ -12,6 +13,7 @@ import ( "github.com/mr-tron/base58" "golang.org/x/xerrors" + "github.com/iotaledger/goshimmer/packages/ledgerstate" "github.com/iotaledger/goshimmer/packages/markers" "github.com/iotaledger/goshimmer/packages/tangle/payload" ) @@ -114,6 +116,25 @@ func (t *Tangle) IssuePayload(payload payload.Payload) (message *Message, err er return } + if payload.Type() == ledgerstate.TransactionType { + var invalidInputs []string + transaction := payload.(*ledgerstate.Transaction) + for _, input := range transaction.Essence().Inputs() { + if input.Type() == ledgerstate.UTXOInputType { + t.LedgerState.OutputMetadata(input.(*ledgerstate.UTXOInput).ReferencedOutputID()).Consume(func(outputMetadata *ledgerstate.OutputMetadata) { + t.LedgerState.BranchDAG.Branch(outputMetadata.BranchID()).Consume(func(branch ledgerstate.Branch) { + if branch.InclusionState() == ledgerstate.Rejected || !branch.MonotonicallyLiked() { + invalidInputs = append(invalidInputs, input.Base58()) + } + }) + }) + } + } + if len(invalidInputs) > 0 { + return nil, xerrors.Errorf("invalid inputs: %s: %w", strings.Join(invalidInputs, ","), ErrInvalidInputs) + } + } + return t.MessageFactory.IssuePayload(payload) } diff --git a/packages/tangle/tipmanager.go b/packages/tangle/tipmanager.go index 93403f746c..9b3663efb6 100644 --- a/packages/tangle/tipmanager.go +++ b/packages/tangle/tipmanager.go @@ -385,6 +385,25 @@ func (t *TipManager) selectWeakTips(count int) (parents MessageIDs) { return } +// AllWeakTips returns a list of all weak tips that are stored in the TipManger. +func (t *TipManager) AllWeakTips() MessageIDs { + return retrieveAllTips(t.weakTips) +} + +// AllStrongTips returns a list of all strong tips that are stored in the TipManger. +func (t *TipManager) AllStrongTips() MessageIDs { + return retrieveAllTips(t.strongTips) +} + +func retrieveAllTips(tipsMap *randommap.RandomMap) MessageIDs { + mapKeys := tipsMap.Keys() + tips := make(MessageIDs, len(mapKeys)) + for i, key := range mapKeys { + tips[i] = key.(MessageID) + } + return tips +} + // StrongTipCount the amount of strong tips. func (t *TipManager) StrongTipCount() int { return t.strongTips.Size() diff --git a/packages/tangle/utils.go b/packages/tangle/utils.go index 5fdd7cf698..e6f23a4a08 100644 --- a/packages/tangle/utils.go +++ b/packages/tangle/utils.go @@ -122,6 +122,7 @@ func (u *Utils) TransactionApprovedByMessage(transactionID ledgerstate.Transacti continue } + bookedParents := make(MessageIDs, 0) u.tangle.Storage.Message(messageID).Consume(func(message *Message) { for _, parentID := range message.StrongParents() { var parentBooked bool @@ -132,13 +133,26 @@ func (u *Utils) TransactionApprovedByMessage(transactionID ledgerstate.Transacti continue } - if u.MessageApprovedBy(attachmentMessageID, parentID) { + // First check all of the parents to avoid unnecessary checks and possible walking. + if attachmentMessageID == parentID { approved = true return } + + bookedParents = append(bookedParents, parentID) } }) + if approved { + return + } + // Only now check all parents. + for _, bookedParent := range bookedParents { + if u.MessageApprovedBy(attachmentMessageID, bookedParent) { + approved = true + return + } + } if approved { return } diff --git a/plugins/autopeering/parameters.go b/plugins/autopeering/parameters.go index bb99c6cc22..9b0c796aa7 100644 --- a/plugins/autopeering/parameters.go +++ b/plugins/autopeering/parameters.go @@ -13,5 +13,5 @@ const ( func init() { flag.StringSlice(CfgEntryNodes, []string{"2PV5487xMw5rasGBXXWeqSi4hLz7r19YBt8Y1TGAsQbj@ressims.iota.cafe:15626", "5EDH4uY78EA6wrBkHHAVBWBMDt7EcksRq6pjzipoW15B@entryshimmer.tanglebay.com:14646"}, "list of trusted entry nodes for auto peering") - flag.Int(CfgNetworkVersion, 21, "autopeering network version") + flag.Int(CfgNetworkVersion, 22, "autopeering network version") } diff --git a/plugins/banner/plugin.go b/plugins/banner/plugin.go index 026127e14a..d20831d0e0 100644 --- a/plugins/banner/plugin.go +++ b/plugins/banner/plugin.go @@ -17,7 +17,7 @@ var ( once sync.Once // AppVersion version number - AppVersion = "v0.5.3" + AppVersion = "v0.5.4" // SimplifiedAppVersion is the version number without commit hash SimplifiedAppVersion = simplifiedVersion(AppVersion) ) diff --git a/plugins/dashboard/explorer_routes.go b/plugins/dashboard/explorer_routes.go index f19795890d..15db47aa9a 100644 --- a/plugins/dashboard/explorer_routes.go +++ b/plugins/dashboard/explorer_routes.go @@ -128,36 +128,16 @@ func setupExplorerRoutes(routeGroup *echo.Group) { return c.JSON(http.StatusOK, addr) }) - routeGroup.GET("/transaction/:transactionID", func(c echo.Context) error { - return ledgerstateAPI.GetTransaction(c) - }) - routeGroup.GET("/transaction/:transactionID/metadata", func(c echo.Context) error { - return ledgerstateAPI.GetTransactionMetadata(c) - }) - routeGroup.GET("/transaction/:transactionID/attachments", func(c echo.Context) error { - return ledgerstateAPI.GetTransactionAttachments(c) - }) - routeGroup.GET("/output/:outputID", func(c echo.Context) error { - return ledgerstateAPI.GetOutput(c) - }) - routeGroup.GET("/output/:outputID/metadata", func(c echo.Context) error { - return ledgerstateAPI.GetOutputMetadata(c) - }) - routeGroup.GET("/output/:outputID/consumers", func(c echo.Context) error { - return ledgerstateAPI.GetOutputConsumers(c) - }) - routeGroup.GET("/mana/pending", func(c echo.Context) error { - return manaAPI.GetPendingMana(c) - }) - routeGroup.GET("/branch/:branchID", func(c echo.Context) error { - return ledgerstateAPI.GetBranch(c) - }) - routeGroup.GET("/branch/:branchID/children", func(c echo.Context) error { - return ledgerstateAPI.GetBranchChildren(c) - }) - routeGroup.GET("/branch/:branchID/conflicts", func(c echo.Context) error { - return ledgerstateAPI.GetBranchConflicts(c) - }) + routeGroup.GET("/transaction/:transactionID", ledgerstateAPI.GetTransaction) + routeGroup.GET("/transaction/:transactionID/metadata", ledgerstateAPI.GetTransactionMetadata) + routeGroup.GET("/transaction/:transactionID/attachments", ledgerstateAPI.GetTransactionAttachments) + routeGroup.GET("/output/:outputID", ledgerstateAPI.GetOutput) + routeGroup.GET("/output/:outputID/metadata", ledgerstateAPI.GetOutputMetadata) + routeGroup.GET("/output/:outputID/consumers", ledgerstateAPI.GetOutputConsumers) + routeGroup.GET("/mana/pending", manaAPI.GetPendingMana) + routeGroup.GET("/branch/:branchID", ledgerstateAPI.GetBranch) + routeGroup.GET("/branch/:branchID/children", ledgerstateAPI.GetBranchChildren) + routeGroup.GET("/branch/:branchID/conflicts", ledgerstateAPI.GetBranchConflicts) routeGroup.GET("/search/:search", func(c echo.Context) error { search := c.Param("search") diff --git a/plugins/database/versioning.go b/plugins/database/versioning.go index 7bc35fd627..37e1ef26be 100644 --- a/plugins/database/versioning.go +++ b/plugins/database/versioning.go @@ -11,7 +11,7 @@ import ( const ( // DBVersion defines the version of the database schema this version of GoShimmer supports. // Every time there's a breaking change regarding the stored data, this version flag should be adjusted. - DBVersion = 23 + DBVersion = 24 ) var ( diff --git a/plugins/faucet/component.go b/plugins/faucet/component.go deleted file mode 100644 index d98f1e5759..0000000000 --- a/plugins/faucet/component.go +++ /dev/null @@ -1,367 +0,0 @@ -package faucet - -import ( - "sync" - "time" - - "github.com/iotaledger/hive.go/crypto/ed25519" - "github.com/iotaledger/hive.go/datastructure/orderedmap" - "github.com/iotaledger/hive.go/identity" - "golang.org/x/xerrors" - - walletseed "github.com/iotaledger/goshimmer/client/wallet/packages/seed" - "github.com/iotaledger/goshimmer/packages/clock" - "github.com/iotaledger/goshimmer/packages/ledgerstate" - "github.com/iotaledger/goshimmer/packages/tangle" - "github.com/iotaledger/goshimmer/plugins/autopeering/local" - "github.com/iotaledger/goshimmer/plugins/messagelayer" -) - -// New creates a new faucet component using the given seed and tokensPerRequest config. -func New(seed []byte, tokensPerRequest int64, blacklistCapacity int, maxTxBookedAwaitTime time.Duration, preparedOutputsCount int) *Component { - return &Component{ - tokensPerRequest: tokensPerRequest, - seed: walletseed.NewSeed(seed), - maxTxBookedAwaitTime: maxTxBookedAwaitTime, - blacklist: orderedmap.New(), - blacklistCapacity: blacklistCapacity, - preparedOutputsCount: preparedOutputsCount, - } -} - -// Component implements a faucet component which will send tokens to actors requesting tokens. -type Component struct { - sync.Mutex - // the amount of tokens to send to every request - tokensPerRequest int64 - // the seed instance of the faucet holding the tokens - seed *walletseed.Seed - // the time to await for the transaction fulfilling a funding request - // to become booked in the value layer - maxTxBookedAwaitTime time.Duration - blacklistCapacity int - blacklist *orderedmap.OrderedMap - preparedOutputsCount int -} - -// IsAddressBlacklisted checks whether the given address is currently blacklisted. -func (c *Component) IsAddressBlacklisted(addr ledgerstate.Address) bool { - _, blacklisted := c.blacklist.Get(addr.Base58()) - return blacklisted -} - -// adds the given address to the blacklist and removes the oldest blacklist entry -// if it would go over capacity. -func (c *Component) addAddressToBlacklist(addr ledgerstate.Address) { - c.blacklist.Set(addr.Base58(), true) - if c.blacklist.Size() > c.blacklistCapacity { - var headKey interface{} - c.blacklist.ForEach(func(key, value interface{}) bool { - headKey = key - return false - }) - c.blacklist.Delete(headKey) - } -} - -// SendFunds sends IOTA tokens to the address from faucet request. -func (c *Component) SendFunds(msg *tangle.Message) (m *tangle.Message, txID string, err error) { - // ensure that only one request is being processed any given time - c.Lock() - defer c.Unlock() - - addr := msg.Payload().(*Request).Address() - - if c.IsAddressBlacklisted(addr) { - return nil, "", ErrAddressIsBlacklisted - } - - // get the outputs for the inputs and remainder balance - inputs, addrsIndices, remainder := c.collectUTXOsForFunding() - var outputs ledgerstate.Outputs - output := ledgerstate.NewSigLockedColoredOutput(ledgerstate.NewColoredBalances(map[ledgerstate.Color]uint64{ - ledgerstate.ColorIOTA: uint64(c.tokensPerRequest), - }), addr) - outputs = append(outputs, output) - // add remainder address if needed - if remainder > 0 { - remainAddr := c.nextUnusedAddress() - output := ledgerstate.NewSigLockedColoredOutput(ledgerstate.NewColoredBalances(map[ledgerstate.Color]uint64{ - ledgerstate.ColorIOTA: uint64(remainder), - }), remainAddr) - outputs = append(outputs, output) - } - - nodeID := identity.NewID(msg.IssuerPublicKey()) - txEssence := ledgerstate.NewTransactionEssence(0, clock.SyncedTime(), nodeID, nodeID, ledgerstate.NewInputs(inputs...), ledgerstate.NewOutputs(outputs...)) - - unlockBlocks := make([]ledgerstate.UnlockBlock, len(txEssence.Inputs())) - inputIndex := 0 - for i, inputs := range addrsIndices { - w := wallet{ - keyPair: *c.seed.KeyPair(i), - } - for range inputs { - unlockBlock := ledgerstate.NewSignatureUnlockBlock(w.sign(txEssence)) - unlockBlocks[inputIndex] = unlockBlock - inputIndex++ - } - } - - tx := ledgerstate.NewTransaction(txEssence, unlockBlocks) - - // attach to message layer - issueTransaction := func() (*tangle.Message, error) { - message, e := messagelayer.Tangle().IssuePayload(tx) - if e != nil { - return nil, e - } - return message, nil - } - - // last used address index - lastUsedAddressIndex := uint64(0) - for k := range addrsIndices { - if k > lastUsedAddressIndex { - lastUsedAddressIndex = k - } - } - - // block for a certain amount of time until we know that the transaction - // actually got booked by this node itself - // TODO: replace with an actual more reactive way - msg, err = messagelayer.AwaitMessageToBeBooked(issueTransaction, tx.ID(), c.maxTxBookedAwaitTime) - if err != nil { - return nil, "", xerrors.Errorf("%w: tx %s", err, tx.ID().String()) - } - - _, err = c.prepareMoreOutputs(lastUsedAddressIndex) - if err != nil { - err = xerrors.Errorf("%w: %s", ErrPrepareFaucet, err.Error()) - } - c.addAddressToBlacklist(addr) - - return msg, tx.ID().String(), err -} - -// collectUTXOsForFunding iterates over the faucet's UTXOs until the token threshold is reached. -// this function also returns the remainder balance for the given outputs. -func (c *Component) collectUTXOsForFunding() (inputs ledgerstate.Inputs, addrsIndices map[uint64]ledgerstate.Inputs, remainder int64) { - total := c.tokensPerRequest - var i uint64 - addrsIndices = map[uint64]ledgerstate.Inputs{} - - // get a list of address for inputs - for i = 0; total > 0; i++ { - addr := c.seed.Address(i).Address() - cachedOutputs := messagelayer.Tangle().LedgerState.OutputsOnAddress(addr) - cachedOutputs.Consume(func(output ledgerstate.Output) { - messagelayer.Tangle().LedgerState.OutputMetadata(output.ID()).Consume(func(outputMetadata *ledgerstate.OutputMetadata) { - if outputMetadata.ConsumerCount() > 0 || total == 0 { - return - } - var val int64 - output.Balances().ForEach(func(_ ledgerstate.Color, balance uint64) bool { - val += int64(balance) - return true - }) - addrsIndices[i] = append(addrsIndices[i], ledgerstate.NewUTXOInput(output.ID())) - - // get unspent output ids and check if it's conflict - if val <= total { - total -= val - } else { - remainder = val - total - total = 0 - } - inputs = append(inputs, ledgerstate.NewUTXOInput(output.ID())) - }) - }) - } - - return -} - -// nextUnusedAddress generates an unused address from the faucet seed. -func (c *Component) nextUnusedAddress(startIndex ...uint64) ledgerstate.Address { - var index uint64 - if len(startIndex) > 0 { - index = startIndex[0] - } - for ; ; index++ { - addr := c.seed.Address(index).Address() - cachedOutputs := messagelayer.Tangle().LedgerState.OutputsOnAddress(addr) - cachedOutputs.Release() - if len(cachedOutputs.Unwrap()) == 0 { - return addr - } - } -} - -// PrepareGenesisOutput splits genesis output to CfgFaucetPreparedOutputsCount number of outputs. -// If this process has been done before, it'll not do it again. -func (c *Component) PrepareGenesisOutput() (msg *tangle.Message, err error) { - genesisOutputID := ledgerstate.NewOutputID(ledgerstate.GenesisTransactionID, 0) - // get total funds - var faucetTotal int64 - messagelayer.Tangle().LedgerState.Output(genesisOutputID).Consume(func(output ledgerstate.Output) { - output.Balances().ForEach(func(color ledgerstate.Color, balance uint64) bool { - faucetTotal += int64(balance) - return true - }) - }) - - cachedOutputMeta := messagelayer.Tangle().LedgerState.OutputMetadata(genesisOutputID) - defer cachedOutputMeta.Release() - outputMetadata := cachedOutputMeta.Unwrap() - if outputMetadata == nil { - return nil, xerrors.Errorf("can't locate genesis output metadata: %s", genesisOutputID.Base58()) - } - if outputMetadata.ConsumerCount() > 0 { - log.Info("genesis output already spent") - return nil, nil - } - - msg, err = c.splitOutput(genesisOutputID, 0, faucetTotal) - return -} - -// prepareMoreOutputs prepares more outputs on the faucet if most of the already prepared outputs have been consumed. -func (c *Component) prepareMoreOutputs(lastUsedAddressIndex uint64) (msg *tangle.Message, err error) { - lastOne := int64(c.preparedOutputsCount) - (int64(lastUsedAddressIndex) % int64(c.preparedOutputsCount)) - if lastOne != int64(c.preparedOutputsCount) { - return - } - var remainderOutputID ledgerstate.OutputID - found := false - var remainder int64 - var remainderAddressIndex uint64 - // find remainder output - for i := lastUsedAddressIndex + 1; !found; i++ { - addr := c.seed.Address(i).Address() - messagelayer.Tangle().LedgerState.OutputsOnAddress(addr).Consume(func(output ledgerstate.Output) { - var consumerCount int - messagelayer.Tangle().LedgerState.OutputMetadata(output.ID()).Consume(func(outputMetadata *ledgerstate.OutputMetadata) { - consumerCount = outputMetadata.ConsumerCount() - }) - if consumerCount > 0 { - return - } - var val int64 - output.Balances().ForEach(func(color ledgerstate.Color, balance uint64) bool { - val += int64(balance) - return true - }) - // not a prepared output. - if val != c.tokensPerRequest { - remainderOutputID = output.ID() - remainderAddressIndex = i - found = true - remainder = val - } - }) - } - - return c.splitOutput(remainderOutputID, remainderAddressIndex, remainder) -} - -// splitOutput splits the remainder into `f.preparedOutputsCount` outputs. -func (c *Component) splitOutput(remainderOutputID ledgerstate.OutputID, remainderAddressIndex uint64, remainder int64) (msg *tangle.Message, err error) { - if remainder < c.tokensPerRequest { - return - } - var totalPrepared int64 - outputs := ledgerstate.Outputs{} - addrIndex := remainderAddressIndex + 1 - for i := 0; i < c.preparedOutputsCount; i++ { - if totalPrepared+c.tokensPerRequest > remainder { - break - } - nextAddr := c.nextUnusedAddress(addrIndex) - addrIndex++ - outputs = append(outputs, ledgerstate.NewSigLockedColoredOutput( - ledgerstate.NewColoredBalances( - map[ledgerstate.Color]uint64{ - ledgerstate.ColorIOTA: uint64(c.tokensPerRequest), - }), - nextAddr, - ), - ) - totalPrepared += c.tokensPerRequest - } - - faucetBalance := remainder - totalPrepared - if faucetBalance > 0 { - nextAddr := c.nextUnusedAddress(addrIndex) - outputs = append(outputs, ledgerstate.NewSigLockedColoredOutput( - ledgerstate.NewColoredBalances( - map[ledgerstate.Color]uint64{ - ledgerstate.ColorIOTA: uint64(faucetBalance), - }), - nextAddr, - ), - ) - } - inputs := ledgerstate.Inputs{ - ledgerstate.NewUTXOInput(remainderOutputID), - } - - essence := ledgerstate.NewTransactionEssence( - 0, - time.Now(), - local.GetInstance().ID(), - local.GetInstance().ID(), - ledgerstate.NewInputs(inputs...), - ledgerstate.NewOutputs(outputs...), - ) - - unlockBlocks := make([]ledgerstate.UnlockBlock, len(essence.Inputs())) - inputIndex := 0 - w := wallet{keyPair: *c.seed.KeyPair(remainderAddressIndex)} - for range inputs { - unlockBlock := ledgerstate.NewSignatureUnlockBlock(w.sign(essence)) - unlockBlocks[inputIndex] = unlockBlock - inputIndex++ - } - - // prepare the transaction with outputs - tx := ledgerstate.NewTransaction( - essence, - unlockBlocks, - ) - - // attach to message layer - issueTransaction := func() (*tangle.Message, error) { - message, e := messagelayer.Tangle().IssuePayload(tx) - if e != nil { - return nil, e - } - return message, nil - } - - // block for a certain amount of time until we know that the transaction - // actually got booked by this node itself - // TODO: replace with an actual more reactive way - msg, err = messagelayer.AwaitMessageToBeBooked(issueTransaction, tx.ID(), c.maxTxBookedAwaitTime) - if err != nil { - return nil, xerrors.Errorf("%w: tx %s", err, tx.ID().String()) - } - - return -} - -type wallet struct { - keyPair ed25519.KeyPair -} - -func (w wallet) privateKey() ed25519.PrivateKey { - return w.keyPair.PrivateKey -} - -func (w wallet) publicKey() ed25519.PublicKey { - return w.keyPair.PublicKey -} - -func (w wallet) sign(txEssence *ledgerstate.TransactionEssence) *ledgerstate.ED25519Signature { - return ledgerstate.NewED25519Signature(w.publicKey(), w.privateKey().Sign(txEssence.Bytes())) -} diff --git a/plugins/faucet/component_test.go b/plugins/faucet/component_test.go deleted file mode 100644 index 1e6dd445d9..0000000000 --- a/plugins/faucet/component_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package faucet - -import ( - "testing" - "time" - - "github.com/iotaledger/goshimmer/packages/ledgerstate" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/iotaledger/goshimmer/packages/tangle/payload" - - "github.com/iotaledger/hive.go/crypto/ed25519" - "github.com/iotaledger/hive.go/identity" - - "github.com/iotaledger/goshimmer/packages/tangle" -) - -func TestIsFaucetReq(t *testing.T) { - keyPair := ed25519.GenerateKeyPair() - address := ledgerstate.NewED25519Address(keyPair.PublicKey) - local := identity.NewLocalIdentity(keyPair.PublicKey, keyPair.PrivateKey) - - faucetRequest, err := NewRequest(address, 4) - if err != nil { - require.NoError(t, err) - return - } - faucetMsg := tangle.NewMessage( - []tangle.MessageID{tangle.EmptyMessageID}, - []tangle.MessageID{}, - time.Now(), - local.PublicKey(), - 0, - faucetRequest, - 0, - ed25519.EmptySignature, - ) - - dataMsg := tangle.NewMessage( - []tangle.MessageID{tangle.EmptyMessageID}, - []tangle.MessageID{}, - time.Now(), - local.PublicKey(), - 0, - payload.NewGenericDataPayload([]byte("data")), - 0, - ed25519.EmptySignature, - ) - - assert.Equal(t, true, IsFaucetReq(faucetMsg)) - assert.Equal(t, false, IsFaucetReq(dataMsg)) -} diff --git a/plugins/faucet/errors.go b/plugins/faucet/errors.go index 43afecd3cf..d0dba188ec 100644 --- a/plugins/faucet/errors.go +++ b/plugins/faucet/errors.go @@ -1,10 +1,16 @@ package faucet -import "errors" +import ( + "golang.org/x/xerrors" +) var ( - // ErrAddressIsBlacklisted is returned if a funding can't be processed since the address is blacklisted. - ErrAddressIsBlacklisted = errors.New("can't fund address as it is blacklisted") - // ErrPrepareFaucet is returned if the faucet cannot prepare outputs. - ErrPrepareFaucet = errors.New("can't prepare faucet outputs") + // ErrNotEnoughFundingOutputs if there are not enough funding outputs in the faucet. + ErrNotEnoughFundingOutputs = xerrors.New("not enough funding outputs to complete the request") + // ErrMissingRemainderOutput is returned if the remainder output can not be found. + ErrMissingRemainderOutput = xerrors.New("can't find faucet remainder output") + // ErrNotEnoughFunds is returned when not enough funds are left in the faucet. + ErrNotEnoughFunds = xerrors.New("not enough funds in the faucet") + // ErrConfirmationTimeoutExpired is returned when a faucet transaction was not confirmed in expected time. + ErrConfirmationTimeoutExpired = xerrors.New("tx confirmation time expired") ) diff --git a/plugins/faucet/plugin.go b/plugins/faucet/plugin.go index 9145ca9a8a..5dd49b42f2 100644 --- a/plugins/faucet/plugin.go +++ b/plugins/faucet/plugin.go @@ -2,19 +2,22 @@ package faucet import ( "crypto" - "errors" "runtime" "sync" "time" "github.com/iotaledger/hive.go/daemon" + "github.com/iotaledger/hive.go/datastructure/orderedmap" "github.com/iotaledger/hive.go/events" "github.com/iotaledger/hive.go/logger" "github.com/iotaledger/hive.go/node" "github.com/iotaledger/hive.go/workerpool" "github.com/mr-tron/base58" flag "github.com/spf13/pflag" + "go.uber.org/atomic" + walletseed "github.com/iotaledger/goshimmer/client/wallet/packages/seed" + "github.com/iotaledger/goshimmer/packages/ledgerstate" "github.com/iotaledger/goshimmer/packages/pow" "github.com/iotaledger/goshimmer/packages/shutdown" "github.com/iotaledger/goshimmer/packages/tangle" @@ -26,6 +29,9 @@ const ( // PluginName is the name of the faucet plugin. PluginName = "Faucet" + // WaitForSync defines the time to wait for being in sync. + WaitForSync = 5 * time.Second + // CfgFaucetSeed defines the base58 encoded seed the faucet uses. CfgFaucetSeed = "faucet.seed" // CfgFaucetTokensPerRequest defines the amount of tokens the faucet should send for each request. @@ -40,29 +46,39 @@ const ( CfgFaucetBlacklistCapacity = "faucet.blacklistCapacity" // CfgFaucetPreparedOutputsCount is the number of outputs the faucet prepares for requests. CfgFaucetPreparedOutputsCount = "faucet.preparedOutputsCounts" + // CfgFaucetStartIndex defines from which address index the faucet should start gathering outputs. + CfgFaucetStartIndex = "faucet.startIndex" ) func init() { flag.String(CfgFaucetSeed, "", "the base58 encoded seed of the faucet, must be defined if this faucet is enabled") - flag.Int(CfgFaucetTokensPerRequest, 1337, "the amount of tokens the faucet should send for each request") + flag.Int(CfgFaucetTokensPerRequest, 1000000, "the amount of tokens the faucet should send for each request") flag.Int(CfgFaucetMaxTransactionBookedAwaitTimeSeconds, 5, "the max amount of time for a funding transaction to become booked in the value layer") flag.Int(CfgFaucetPoWDifficulty, 25, "defines the PoW difficulty for faucet payloads") flag.Int(CfgFaucetBlacklistCapacity, 10000, "holds the maximum amount the address blacklist holds") - flag.Int(CfgFaucetPreparedOutputsCount, 100, "number of outputs the faucet prepares") + flag.Int(CfgFaucetPreparedOutputsCount, 126, "number of outputs the faucet prepares") + flag.Int(CfgFaucetStartIndex, 0, "address index to start faucet with") } var ( // Plugin is the "plugin" instance of the faucet application. plugin *node.Plugin pluginOnce sync.Once - _faucet *Component + _faucet *StateManager faucetOnce sync.Once log *logger.Logger powVerifier = pow.New(crypto.BLAKE2b_512) fundingWorkerPool *workerpool.WorkerPool fundingWorkerCount = runtime.GOMAXPROCS(0) fundingWorkerQueueSize = 500 - preparedOutputsCount int + targetPoWDifficulty int + startIndex int + // blacklist makes sure that an address might only request tokens once. + blacklist *orderedmap.OrderedMap + blacklistCapacity int + blackListMutex sync.RWMutex + // signals that the faucet has initialized itself and can start funding requests + initDone atomic.Bool ) // Plugin returns the plugin instance of the faucet plugin. @@ -74,7 +90,7 @@ func Plugin() *node.Plugin { } // Faucet gets the faucet component instance the faucet plugin has initialized. -func Faucet() *Component { +func Faucet() *StateManager { faucetOnce.Do(func() { base58Seed := config.Node().String(CfgFaucetSeed) if base58Seed == "" { @@ -92,31 +108,34 @@ func Faucet() *Component { if maxTxBookedAwaitTime <= 0 { log.Fatalf("the max transaction booked await time must be more than 0") } - blacklistCapacity := config.Node().Int(CfgFaucetBlacklistCapacity) - preparedOutputsCount = config.Node().Int(CfgFaucetPreparedOutputsCount) + preparedOutputsCount := config.Node().Int(CfgFaucetPreparedOutputsCount) if preparedOutputsCount <= 0 { log.Fatalf("the number of faucet prepared outputs should be more than 0") } - _faucet = New(seedBytes, tokensPerRequest, blacklistCapacity, time.Duration(maxTxBookedAwaitTime)*time.Second, preparedOutputsCount) + _faucet = NewStateManager( + uint64(tokensPerRequest), + walletseed.NewSeed(seedBytes), + uint64(preparedOutputsCount), + time.Duration(maxTxBookedAwaitTime)*time.Second, + ) }) return _faucet } func configure(*node.Plugin) { log = logger.NewLogger(PluginName) + targetPoWDifficulty = config.Node().Int(CfgFaucetPoWDifficulty) + startIndex = config.Node().Int(CfgFaucetStartIndex) + blacklist = orderedmap.New() + blacklistCapacity = config.Node().Int(CfgFaucetBlacklistCapacity) Faucet() fundingWorkerPool = workerpool.New(func(task workerpool.Task) { msg := task.Param(0).(*tangle.Message) addr := msg.Payload().(*Request).Address() - msg, txID, err := Faucet().SendFunds(msg) + msg, txID, err := Faucet().FulFillFundingRequest(msg) if err != nil { log.Warnf("couldn't fulfill funding request to %s: %s", addr.Base58(), err) - if errors.Is(err, ErrPrepareFaucet) { - log.Warn(err.Error()) - } else { - log.Warnf("couldn't fulfill funding request to %s: %s", addr, err) - } return } log.Infof("sent funds to address %s via tx %s and msg %s", addr.Base58(), txID, msg.ID().String()) @@ -127,12 +146,27 @@ func configure(*node.Plugin) { func run(*node.Plugin) { if err := daemon.BackgroundWorker("[Faucet]", func(shutdownSignal <-chan struct{}) { - fundingWorkerPool.Start() - defer fundingWorkerPool.Stop() - if _, err := Faucet().PrepareGenesisOutput(); err != nil { - log.Errorf("couldn't move all faucet funds: %s", err) + // wait until node is in sync + for !messagelayer.Tangle().Synced() { + log.Infof("Waiting for node to become synced...") + time.Sleep(WaitForSync) + } + log.Infof("Waiting for node to become synced... DONE") + + log.Infof("Deriving faucet state from the ledger...") + // determine state, prepare more outputs if needed + dErr := Faucet().DeriveStateFromTangle(startIndex) + if dErr != nil { + log.Errorf(dErr.Error()) } + log.Infof("Deriving faucet state from the ledger... DONE") + log.Infof("Starting funding workerpools...") + // start the funding workerpool + fundingWorkerPool.Start() + defer fundingWorkerPool.Stop() + log.Infof("Starting funding workerpools... DONE") + initDone.Store(true) <-shutdownSignal }, shutdown.PriorityFaucet); err != nil { log.Panicf("Failed to start daemon: %s", err) @@ -141,15 +175,21 @@ func run(*node.Plugin) { func configureEvents() { messagelayer.Tangle().ConsensusManager.Events.MessageOpinionFormed.Attach(events.NewClosure(func(messageID tangle.MessageID) { + // Do not start picking up request while waiting for initialization. + // If faucet nodes crashes and you restart with a clean db, all previous faucet req msgs will be enqueued + // and addresses will be funded again. Therefore, do not process any faucet request messages until we are in + // sync and initialized. + if !initDone.Load() { + return + } messagelayer.Tangle().Storage.Message(messageID).Consume(func(message *tangle.Message) { if !IsFaucetReq(message) { return } - fundingRequest := message.Payload().(*Request) addr := fundingRequest.Address() - if Faucet().IsAddressBlacklisted(addr) { - log.Debugf("can't fund address %s since it is blacklisted", addr) + if IsAddressBlackListed(addr) { + log.Debugf("can't fund address %s since it is blacklisted", addr.Base58()) return } @@ -159,7 +199,7 @@ func configureEvents() { log.Warnf("couldn't verify PoW of funding request for address %s", addr) return } - targetPoWDifficulty := config.Node().Int(CfgFaucetPoWDifficulty) + if leadingZeroes < targetPoWDifficulty { log.Debugf("funding request for address %s doesn't fulfill PoW requirement %d vs. %d", addr, targetPoWDifficulty, leadingZeroes) return @@ -168,6 +208,7 @@ func configureEvents() { // finally add it to the faucet to be processed _, added := fundingWorkerPool.TrySubmit(message) if !added { + RemoveAddressFromBlacklist(addr) log.Info("dropped funding request for address %s as queue is full", addr.Base58()) return } @@ -175,3 +216,38 @@ func configureEvents() { }) })) } + +// IsAddressBlackListed returns if an address is blacklisted. +// adds the given address to the blacklist and removes the oldest blacklist entry if it would go over capacity. +func IsAddressBlackListed(address ledgerstate.Address) bool { + blackListMutex.Lock() + defer blackListMutex.Unlock() + + // see if it was already blacklisted + _, blacklisted := blacklist.Get(address.Base58()) + + if blacklisted { + return true + } + + // add it to the blacklist + blacklist.Set(address.Base58(), true) + if blacklist.Size() > blacklistCapacity { + var headKey interface{} + blacklist.ForEach(func(key, value interface{}) bool { + headKey = key + return false + }) + blacklist.Delete(headKey) + } + + return false +} + +// RemoveAddressFromBlacklist removes an address from the blacklist. +func RemoveAddressFromBlacklist(address ledgerstate.Address) { + blackListMutex.Lock() + defer blackListMutex.Unlock() + + blacklist.Delete(address.Base58()) +} diff --git a/plugins/faucet/request_test.go b/plugins/faucet/request_test.go index 95dade7c8a..0cd55fbdc9 100644 --- a/plugins/faucet/request_test.go +++ b/plugins/faucet/request_test.go @@ -8,9 +8,11 @@ import ( "github.com/iotaledger/hive.go/crypto/ed25519" "github.com/iotaledger/hive.go/identity" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/iotaledger/goshimmer/packages/ledgerstate" "github.com/iotaledger/goshimmer/packages/tangle" + "github.com/iotaledger/goshimmer/packages/tangle/payload" ) func ExampleRequest() { @@ -60,3 +62,39 @@ func TestRequest(t *testing.T) { assert.Equal(t, originalRequest.Address(), clonedRequest2.Address()) } + +func TestIsFaucetReq(t *testing.T) { + keyPair := ed25519.GenerateKeyPair() + address := ledgerstate.NewED25519Address(keyPair.PublicKey) + local := identity.NewLocalIdentity(keyPair.PublicKey, keyPair.PrivateKey) + + faucetRequest, err := NewRequest(address, 4) + if err != nil { + require.NoError(t, err) + return + } + faucetMsg := tangle.NewMessage( + []tangle.MessageID{tangle.EmptyMessageID}, + []tangle.MessageID{}, + time.Now(), + local.PublicKey(), + 0, + faucetRequest, + 0, + ed25519.EmptySignature, + ) + + dataMsg := tangle.NewMessage( + []tangle.MessageID{tangle.EmptyMessageID}, + []tangle.MessageID{}, + time.Now(), + local.PublicKey(), + 0, + payload.NewGenericDataPayload([]byte("data")), + 0, + ed25519.EmptySignature, + ) + + assert.Equal(t, true, IsFaucetReq(faucetMsg)) + assert.Equal(t, false, IsFaucetReq(dataMsg)) +} diff --git a/plugins/faucet/state_manager.go b/plugins/faucet/state_manager.go new file mode 100644 index 0000000000..88e9ba4624 --- /dev/null +++ b/plugins/faucet/state_manager.go @@ -0,0 +1,506 @@ +package faucet + +import ( + "container/list" + "sort" + "sync" + "time" + + "github.com/iotaledger/hive.go/crypto/ed25519" + "github.com/iotaledger/hive.go/events" + "github.com/iotaledger/hive.go/identity" + "golang.org/x/xerrors" + + walletseed "github.com/iotaledger/goshimmer/client/wallet/packages/seed" + "github.com/iotaledger/goshimmer/packages/clock" + "github.com/iotaledger/goshimmer/packages/ledgerstate" + "github.com/iotaledger/goshimmer/packages/tangle" + "github.com/iotaledger/goshimmer/plugins/autopeering/local" + "github.com/iotaledger/goshimmer/plugins/messagelayer" +) + +const ( + // GenesisTokenAmount is the total supply. + GenesisTokenAmount = 1000000000000000 + + // RemainderAddressIndex is the RemainderAddressIndex. + RemainderAddressIndex = 0 + + // MinimumFaucetBalance defines the minimum token amount required, before the faucet stops operating. + MinimumFaucetBalance = 0.1 * GenesisTokenAmount + + // MaxFaucetOutputsCount defines the max outputs count for the Facuet as the ledgerstate.MaxOutputCount -1 remainder output. + MaxFaucetOutputsCount = ledgerstate.MaxOutputCount - 1 + + // WaitForConfirmation defines the wait time before considering a transaction confirmed. + WaitForConfirmation = 10 * time.Second +) + +// region FaucetOutput + +// FaucetOutput represents an output controlled by the faucet. +type FaucetOutput struct { + ID ledgerstate.OutputID + Balance uint64 + Address ledgerstate.Address + AddressIndex uint64 +} + +// endregion + +// region StateManager + +// StateManager manages the funds and outputs of the faucet. Can derive its state from a synchronized Tangle, can +// carry out funding requests, and prepares more funding outputs when needed. +type StateManager struct { + // ordered list of available outputs to fund faucet requests + fundingOutputs *list.List + // output that holds the remainder funds to the faucet, should always be on address 0 + remainderOutput *FaucetOutput + // the last funding output address index + // when we prepare new funding outputs, we start from lastFundingOutputAddressIndex + 1 + lastFundingOutputAddressIndex uint64 + // mapping base58 encoded addresses to their indices + addressToIndex map[string]uint64 + + // the amount of tokens to send to every request + tokensPerRequest uint64 + // number of funding outputs to prepare if fundingOutputs is exhausted + preparedOutputsCount uint64 + // the seed instance of the faucet holding the tokens + seed *walletseed.Seed + + // the time to await for the transaction fulfilling a funding request + // to become booked in the value layer + maxTxBookedAwaitTime time.Duration + + // ensures that only one goroutine can work on the stateManager at any time + sync.RWMutex +} + +// NewStateManager creates a new state manager for the faucet. +func NewStateManager( + tokensPerRequest uint64, + seed *walletseed.Seed, + preparedOutputsCount uint64, + maxTxBookedTime time.Duration, +) *StateManager { + // currently the max number of outputs in a tx is 127, therefore, when creating the splitting tx, we can have at most + // 126 prepared outputs (+1 remainder output). + // TODO: break down the splitting into more tx steps to be able to create more, than 126 + if preparedOutputsCount > MaxFaucetOutputsCount { + preparedOutputsCount = MaxFaucetOutputsCount + } + res := &StateManager{ + fundingOutputs: list.New(), + addressToIndex: map[string]uint64{ + seed.Address(RemainderAddressIndex).Address().Base58(): RemainderAddressIndex, + }, + + tokensPerRequest: tokensPerRequest, + preparedOutputsCount: preparedOutputsCount, + seed: seed, + maxTxBookedAwaitTime: maxTxBookedTime, + } + + return res +} + +// FundingOutputsCount returns the number of available outputs that can be used to fund a request. +func (s *StateManager) FundingOutputsCount() int { + s.RLock() + defer s.RUnlock() + return s.fundingOutputs.Len() +} + +// DeriveStateFromTangle derives the faucet state from a synchronized Tangle. +// - startIndex defines from which address index to start look for prepared outputs. +// - remainder output should always sit on address 0. +// - if no funding outputs are found, the faucet creates them from the remainder output. +func (s *StateManager) DeriveStateFromTangle(startIndex int) (err error) { + s.Lock() + defer s.Unlock() + + foundPreparedOutputs := make([]*FaucetOutput, 0) + toBeSweptOutputs := make([]*FaucetOutput, 0) + var foundRemainderOutput *FaucetOutput + + remainderAddress := s.seed.Address(RemainderAddressIndex).Address() + + // remainder output should sit on address 0 + messagelayer.Tangle().LedgerState.OutputsOnAddress(remainderAddress).Consume(func(output ledgerstate.Output) { + messagelayer.Tangle().LedgerState.OutputMetadata(output.ID()).Consume(func(outputMetadata *ledgerstate.OutputMetadata) { + if outputMetadata.ConsumerCount() < 1 { + iotaBalance, ok := output.Balances().Get(ledgerstate.ColorIOTA) + if !ok || iotaBalance < MinimumFaucetBalance { + return + } + if foundRemainderOutput != nil && iotaBalance < foundRemainderOutput.Balance { + // when multiple "big" unspent outputs sit on this address, take the biggest one + return + } + foundRemainderOutput = &FaucetOutput{ + ID: output.ID(), + Balance: iotaBalance, + Address: output.Address(), + AddressIndex: RemainderAddressIndex, + } + } + }) + }) + if foundRemainderOutput == nil { + return xerrors.Errorf("can't find an output on address %s that has at least %d tokens", remainderAddress.Base58(), int(MinimumFaucetBalance)) + } + + endIndex := (GenesisTokenAmount - foundRemainderOutput.Balance) / s.tokensPerRequest + log.Infof("%d indices have already been used based on found remainder output", endIndex) + + log.Infof("Looking for prepared outputs in the Tangle...") + + for i := startIndex; uint64(i) <= endIndex; i++ { + messagelayer.Tangle().LedgerState.OutputsOnAddress(s.seed.Address(uint64(i)).Address()).Consume(func(output ledgerstate.Output) { + messagelayer.Tangle().LedgerState.OutputMetadata(output.ID()).Consume(func(outputMetadata *ledgerstate.OutputMetadata) { + if outputMetadata.ConsumerCount() < 1 { + iotaBalance, colorExist := output.Balances().Get(ledgerstate.ColorIOTA) + if !colorExist { + return + } + switch iotaBalance { + case s.tokensPerRequest: + // we found a prepared output + foundPreparedOutputs = append(foundPreparedOutputs, &FaucetOutput{ + ID: output.ID(), + Balance: iotaBalance, + Address: output.Address(), + AddressIndex: uint64(i), + }) + default: + toBeSweptOutputs = append(toBeSweptOutputs, &FaucetOutput{ + ID: output.ID(), + Balance: iotaBalance, + Address: output.Address(), + AddressIndex: uint64(i), + }) + } + } + }) + }) + } + log.Infof("Found %d prepared outputs in the Tangle", len(foundPreparedOutputs)) + log.Infof("Looking for prepared outputs in the Tangle... DONE") + + s.remainderOutput = foundRemainderOutput + + if len(foundPreparedOutputs) == 0 { + // prepare more funding outputs if we did not find any + err = s.prepareMoreFundingOutputs() + if err != nil { + return xerrors.Errorf("Found no prepared outputs, failed to create them: %w", err) + } + } else { + // else just save the found outputs into the state + s.saveFundingOutputs(foundPreparedOutputs) + } + log.Infof("Remainder output %s had %d funds", s.remainderOutput.ID.Base58(), s.remainderOutput.Balance) + // ignore toBeSweptOutputs + return err +} + +// FulFillFundingRequest fulfills a faucet request by spending the next funding output to the requested address. +// Mana of the transaction is pledged to the requesting node. +func (s *StateManager) FulFillFundingRequest(requestMsg *tangle.Message) (m *tangle.Message, txID string, err error) { + s.Lock() + defer s.Unlock() + + addr := requestMsg.Payload().(*Request).Address() + + // get an output that we can spend + fundingOutput, fErr := s.getFundingOutput() + // we don't have funding outputs + if xerrors.Is(fErr, ErrNotEnoughFundingOutputs) { + // try preparing them + log.Infof("Preparing more outputs...") + pErr := s.prepareMoreFundingOutputs() + if pErr != nil { + err = xerrors.Errorf("failed to prepare more outputs: %w", pErr) + return + } + log.Infof("Preparing more outputs... DONE") + // and try getting the output again + fundingOutput, fErr = s.getFundingOutput() + if fErr != nil { + err = xerrors.Errorf("failed to gather funding outputs") + return + } + } else if fErr != nil { + err = xerrors.Errorf("failed to gather funding outputs") + return + } + + // prepare funding tx, pledge mana to requester + pledgeID := identity.NewID(requestMsg.IssuerPublicKey()) + tx := s.prepareFaucetTransaction(addr, fundingOutput, pledgeID) + + // issue funding request + m, err = s.issueTX(tx) + if err != nil { + return + } + txID = tx.ID().Base58() + + return m, txID, err +} + +// prepareFaucetTransaction prepares a funding faucet transaction that spends fundingOutput to destAddr and pledges +// mana to pledgeID. +func (s *StateManager) prepareFaucetTransaction(destAddr ledgerstate.Address, fundingOutput *FaucetOutput, pledgeID identity.ID) (tx *ledgerstate.Transaction) { + inputs := ledgerstate.NewInputs(ledgerstate.NewUTXOInput(fundingOutput.ID)) + + outputs := ledgerstate.NewOutputs(ledgerstate.NewSigLockedColoredOutput( + ledgerstate.NewColoredBalances( + map[ledgerstate.Color]uint64{ + ledgerstate.ColorIOTA: s.tokensPerRequest, + }), + destAddr, + ), + ) + + essence := ledgerstate.NewTransactionEssence( + 0, + clock.SyncedTime(), + pledgeID, + pledgeID, + ledgerstate.NewInputs(inputs...), + ledgerstate.NewOutputs(outputs...), + ) + + w := wallet{keyPair: *s.seed.KeyPair(fundingOutput.AddressIndex)} + unlockBlock := ledgerstate.NewSignatureUnlockBlock(w.sign(essence)) + + tx = ledgerstate.NewTransaction( + essence, + ledgerstate.UnlockBlocks{unlockBlock}, + ) + return +} + +// saveFundingOutputs sorts the given slice of faucet funding outputs based on the address indices, and then saves them in stateManager. +func (s *StateManager) saveFundingOutputs(fundingOutputs []*FaucetOutput) { + // sort prepared outputs based on address index + sort.Slice(fundingOutputs, func(i, j int) bool { + return fundingOutputs[i].AddressIndex < fundingOutputs[j].AddressIndex + }) + + // fill prepared output list + for _, fOutput := range fundingOutputs { + s.fundingOutputs.PushBack(fOutput) + } + s.lastFundingOutputAddressIndex = s.fundingOutputs.Back().Value.(*FaucetOutput).AddressIndex + + log.Infof("Added %d new funding outputs, last used address index is %d", len(fundingOutputs), s.lastFundingOutputAddressIndex) + log.Infof("There are currently %d prepared outputs in the faucet", s.fundingOutputs.Len()) +} + +// getFundingOutput returns the first funding output in the list. +func (s *StateManager) getFundingOutput() (fundingOutput *FaucetOutput, err error) { + if s.fundingOutputs.Len() < 1 { + return nil, ErrNotEnoughFundingOutputs + } + fundingOutput = s.fundingOutputs.Remove(s.fundingOutputs.Front()).(*FaucetOutput) + return +} + +// prepareMoreFundingOutputs prepares more funding outputs by splitting up the remainder output, submits the transaction +// to the Tangle, waits for its confirmation, and then updates the internal state of the faucet. +func (s *StateManager) prepareMoreFundingOutputs() (err error) { + // no remainder output present + if s.remainderOutput == nil { + err = ErrMissingRemainderOutput + return + } + + // not enough funds to carry out operation + if s.tokensPerRequest*s.preparedOutputsCount > s.remainderOutput.Balance { + err = ErrNotEnoughFunds + return + } + + // take remainder output and split it up + tx := s.createSplittingTx() + + txConfirmed := make(chan *ledgerstate.Transaction, 1) + + monitorTxConfirmation := events.NewClosure( + func(msgID tangle.MessageID) { + messagelayer.Tangle().Storage.Message(msgID).Consume(func(msg *tangle.Message) { + if msg.Payload().Type() == ledgerstate.TransactionType { + msgTx, _, er := ledgerstate.TransactionFromBytes(msg.Payload().Bytes()) + if er != nil { + // log.Errorf("Message %s contains invalid transaction payload: %w", msgID.String(), err) + return + } + if msgTx.ID() == tx.ID() { + txConfirmed <- msgTx + } + } + }) + }) + + // listen on confirmation + messagelayer.Tangle().ConsensusManager.Events.TransactionConfirmed.Attach(monitorTxConfirmation) + defer messagelayer.Tangle().ConsensusManager.Events.TransactionConfirmed.Detach(monitorTxConfirmation) + + // issue the tx + issuedMsg, issueErr := s.issueTX(tx) + if issueErr != nil { + return issueErr + } + + ticker := time.NewTicker(WaitForConfirmation) + defer ticker.Stop() + timeoutCounter := 0 + maxWaitAttempts := 10 // 100 s max timeout (if fpc voting is in place) + + for { + select { + case confirmedTx := <-txConfirmed: + err = s.updateState(confirmedTx) + return err + case <-ticker.C: + if timeoutCounter >= maxWaitAttempts { + return xerrors.Errorf("Message %s: %w", issuedMsg.ID().String(), ErrConfirmationTimeoutExpired) + } + timeoutCounter++ + } + } +} + +// updateState takes a confirmed transaction (splitting tx), and updates the faucet internal state based on its content. +func (s *StateManager) updateState(tx *ledgerstate.Transaction) error { + remainingBalance := s.remainderOutput.Balance - s.tokensPerRequest*s.preparedOutputsCount + fundingOutputs := make([]*FaucetOutput, 0, s.preparedOutputsCount) + + // derive information from outputs + for _, output := range tx.Essence().Outputs() { + iotaBalance, hasIota := output.Balances().Get(ledgerstate.ColorIOTA) + if !hasIota { + return xerrors.Errorf("tx outputs don't have IOTA balance ") + } + switch iotaBalance { + case s.tokensPerRequest: + fundingOutputs = append(fundingOutputs, &FaucetOutput{ + ID: output.ID(), + Balance: iotaBalance, + Address: output.Address(), + AddressIndex: s.addressToIndex[output.Address().Base58()], + }) + case remainingBalance: + s.remainderOutput = &FaucetOutput{ + ID: output.ID(), + Balance: iotaBalance, + Address: output.Address(), + AddressIndex: s.addressToIndex[output.Address().Base58()], + } + default: + err := xerrors.Errorf("tx %s should not have output with balance %d", tx.ID().Base58(), iotaBalance) + return err + } + } + // save the info in internal state + s.saveFundingOutputs(fundingOutputs) + return nil +} + +// createSplittingTx takes the current remainder output and creates a transaction that splits it into s.preparedOutputsCount +// funding outputs and one remainder output. +func (s *StateManager) createSplittingTx() *ledgerstate.Transaction { + inputs := ledgerstate.NewInputs(ledgerstate.NewUTXOInput(s.remainderOutput.ID)) + + // prepare s.preparedOutputsCount number of funding outputs. + outputs := make(ledgerstate.Outputs, 0, s.preparedOutputsCount+1) + // start from the last used funding output address index + for i := s.lastFundingOutputAddressIndex + 1; i < s.lastFundingOutputAddressIndex+1+s.preparedOutputsCount; i++ { + outputs = append(outputs, ledgerstate.NewSigLockedColoredOutput( + ledgerstate.NewColoredBalances( + map[ledgerstate.Color]uint64{ + ledgerstate.ColorIOTA: s.tokensPerRequest, + }), + s.seed.Address(i).Address(), + ), + ) + s.addressToIndex[s.seed.Address(i).Address().Base58()] = i + } + + // add the remainder output + outputs = append(outputs, ledgerstate.NewSigLockedColoredOutput( + ledgerstate.NewColoredBalances( + map[ledgerstate.Color]uint64{ + ledgerstate.ColorIOTA: s.remainderOutput.Balance - s.tokensPerRequest*s.preparedOutputsCount, + }), + + s.seed.Address(RemainderAddressIndex).Address(), + ), + ) + + essence := ledgerstate.NewTransactionEssence( + 0, + clock.SyncedTime(), + local.GetInstance().ID(), + // consensus mana is pledged to EmptyNodeID + identity.ID{}, + ledgerstate.NewInputs(inputs...), + ledgerstate.NewOutputs(outputs...), + ) + + w := wallet{keyPair: *s.seed.KeyPair(s.remainderOutput.AddressIndex)} + unlockBlock := ledgerstate.NewSignatureUnlockBlock(w.sign(essence)) + + tx := ledgerstate.NewTransaction( + essence, + ledgerstate.UnlockBlocks{unlockBlock}, + ) + return tx +} + +// issueTX issues a transaction to the Tangle and waits for it to become booked. +func (s *StateManager) issueTX(tx *ledgerstate.Transaction) (msg *tangle.Message, err error) { + // attach to message layer + issueTransaction := func() (*tangle.Message, error) { + message, e := messagelayer.Tangle().IssuePayload(tx) + if e != nil { + return nil, e + } + return message, nil + } + + // block for a certain amount of time until we know that the transaction + // actually got booked by this node itself + // TODO: replace with an actual more reactive way + msg, err = messagelayer.AwaitMessageToBeBooked(issueTransaction, tx.ID(), s.maxTxBookedAwaitTime) + if err != nil { + return nil, xerrors.Errorf("%w: tx %s", err, tx.ID().String()) + } + return +} + +// endregion + +// region helper methods + +type wallet struct { + keyPair ed25519.KeyPair +} + +func (w wallet) privateKey() ed25519.PrivateKey { + return w.keyPair.PrivateKey +} + +func (w wallet) publicKey() ed25519.PublicKey { + return w.keyPair.PublicKey +} + +func (w wallet) sign(txEssence *ledgerstate.TransactionEssence) *ledgerstate.ED25519Signature { + return ledgerstate.NewED25519Signature(w.publicKey(), w.privateKey().Sign(txEssence.Bytes())) +} + +// endregion diff --git a/plugins/mana/parameters.go b/plugins/mana/parameters.go index 09467a0e0f..c76b8b299e 100644 --- a/plugins/mana/parameters.go +++ b/plugins/mana/parameters.go @@ -33,8 +33,8 @@ const ( ) func init() { - flag.Float64(CfgEmaCoefficient1, 0.00003209, "coefficient used for Effective Base Mana 1 (moving average) calculation") - flag.Float64(CfgEmaCoefficient2, 0.00003209, "coefficient used for Effective Base Mana 2 (moving average) calculation") + flag.Float64(CfgEmaCoefficient1, 0.00003209, "coefficient used for Effective Base Mana 1 (moving average) calculation") // half-life = 6 hours + flag.Float64(CfgEmaCoefficient2, 0.0057762265, "coefficient used for Effective Base Mana 2 (moving average) calculation") // half-life = 2 minutes flag.Float64(CfgDecay, 0.00003209, "decay coefficient used for Base Mana 2 calculation") flag.StringSlice(CfgAllowedAccessPledge, nil, "list of nodes that access mana is allowed to be pledged to") flag.StringSlice(CfgAllowedConsensusPledge, nil, "list of nodes that consensus mana is allowed to be pledge to") diff --git a/plugins/messagelayer/plugin.go b/plugins/messagelayer/plugin.go index 70b783e846..d26c58f0ce 100644 --- a/plugins/messagelayer/plugin.go +++ b/plugins/messagelayer/plugin.go @@ -50,6 +50,16 @@ func configure(plugin *node.Plugin) { plugin.LogError(err) })) + Tangle().Parser.Events.MessageRejected.Attach(events.NewClosure(func(rejectedEvent *tangle.MessageRejectedEvent, err error) { + plugin.LogError(err) + plugin.LogError(rejectedEvent.Message) + })) + + // Messages created by the node need to pass through the normal flow. + Tangle().MessageFactory.Events.MessageConstructed.Attach(events.NewClosure(func(message *tangle.Message) { + Tangle().ProcessGossipMessage(message.Bytes(), local.GetInstance().Peer) + })) + // read snapshot file if Parameters.Snapshot.File != "" { snapshot := ledgerstate.Snapshot{} diff --git a/plugins/webapi/mana/mana.go b/plugins/webapi/mana/mana.go index ee6a5a12bb..6d5e636588 100644 --- a/plugins/webapi/mana/mana.go +++ b/plugins/webapi/mana/mana.go @@ -4,7 +4,8 @@ import ( "net/http" "time" - "github.com/iotaledger/hive.go/identity" + "golang.org/x/xerrors" + "github.com/labstack/echo" "github.com/mr-tron/base58" @@ -23,18 +24,27 @@ func getManaHandler(c echo.Context) error { if err != nil { return c.JSON(http.StatusBadRequest, GetManaResponse{Error: err.Error()}) } - emptyID := identity.ID{} - if ID == emptyID { + if request.NodeID == "" { ID = local.GetInstance().ID() } t := time.Now() accessMana, tAccess, err := manaPlugin.GetAccessMana(ID, t) if err != nil { - return c.JSON(http.StatusBadRequest, GetManaResponse{Error: err.Error()}) + if xerrors.Is(err, mana.ErrNodeNotFoundInBaseManaVector) { + accessMana = 0 + tAccess = t + } else { + return c.JSON(http.StatusBadRequest, GetManaResponse{Error: err.Error()}) + } } consensusMana, tConsensus, err := manaPlugin.GetConsensusMana(ID, t) if err != nil { - return c.JSON(http.StatusBadRequest, GetManaResponse{Error: err.Error()}) + if xerrors.Is(err, mana.ErrNodeNotFoundInBaseManaVector) { + consensusMana = 0 + tConsensus = t + } else { + return c.JSON(http.StatusBadRequest, GetManaResponse{Error: err.Error()}) + } } return c.JSON(http.StatusOK, GetManaResponse{ diff --git a/plugins/webapi/mana/percentile.go b/plugins/webapi/mana/percentile.go index 5dbfd831cf..a77fd3befc 100644 --- a/plugins/webapi/mana/percentile.go +++ b/plugins/webapi/mana/percentile.go @@ -4,9 +4,9 @@ import ( "net/http" "time" - "github.com/iotaledger/hive.go/identity" "github.com/labstack/echo" "github.com/mr-tron/base58" + "golang.org/x/xerrors" "github.com/iotaledger/goshimmer/packages/mana" "github.com/iotaledger/goshimmer/plugins/autopeering/local" @@ -23,8 +23,7 @@ func getPercentileHandler(c echo.Context) error { if err != nil { return c.JSON(http.StatusBadRequest, GetPercentileResponse{Error: err.Error()}) } - emptyID := identity.ID{} - if ID == emptyID { + if request.NodeID == "" { ID = local.GetInstance().ID() } t := time.Now() @@ -34,7 +33,11 @@ func getPercentileHandler(c echo.Context) error { } accessPercentile, err := access.GetPercentile(ID) if err != nil { - return c.JSON(http.StatusBadRequest, GetPercentileResponse{Error: err.Error()}) + if xerrors.Is(err, mana.ErrNodeNotFoundInBaseManaVector) { + accessPercentile = 0 + } else { + return c.JSON(http.StatusBadRequest, GetManaResponse{Error: err.Error()}) + } } consensus, tConsensus, err := manaPlugin.GetManaMap(mana.ConsensusMana, t) if err != nil { @@ -42,7 +45,11 @@ func getPercentileHandler(c echo.Context) error { } consensusPercentile, err := consensus.GetPercentile(ID) if err != nil { - return c.JSON(http.StatusBadRequest, GetPercentileResponse{Error: err.Error()}) + if xerrors.Is(err, mana.ErrNodeNotFoundInBaseManaVector) { + consensusPercentile = 0 + } else { + return c.JSON(http.StatusBadRequest, GetManaResponse{Error: err.Error()}) + } } return c.JSON(http.StatusOK, GetPercentileResponse{ ShortNodeID: ID.String(), diff --git a/plugins/webapi/tools/message/diagnostic_messages.go b/plugins/webapi/tools/message/diagnostic_messages.go index 3c2df1f3ce..46e1db9926 100644 --- a/plugins/webapi/tools/message/diagnostic_messages.go +++ b/plugins/webapi/tools/message/diagnostic_messages.go @@ -1,12 +1,15 @@ package message import ( + "encoding/csv" "fmt" "net/http" "strconv" "strings" "time" + "golang.org/x/xerrors" + "github.com/iotaledger/hive.go/datastructure/walker" "github.com/iotaledger/hive.go/identity" "github.com/labstack/echo" @@ -19,14 +22,12 @@ import ( // DiagnosticMessagesHandler runs the diagnostic over the Tangle. func DiagnosticMessagesHandler(c echo.Context) (err error) { - runDiagnosticMessages(c) - return + return runDiagnosticMessages(c) } // DiagnosticMessagesOnlyFirstWeakReferencesHandler runs the diagnostic over the Tangle. func DiagnosticMessagesOnlyFirstWeakReferencesHandler(c echo.Context) (err error) { - runDiagnosticMessagesOnFirstWeakReferences(c) - return + return runDiagnosticMessagesOnFirstWeakReferences(c) } // DiagnosticMessagesRankHandler runs the diagnostic over the Tangle @@ -36,20 +37,19 @@ func DiagnosticMessagesRankHandler(c echo.Context) (err error) { if err != nil { return c.JSON(http.StatusBadRequest, jsonmodels.NewErrorResponse(err)) } - runDiagnosticMessages(c, rank) - return + return runDiagnosticMessages(c, rank) } // region DiagnosticMessages code implementation ///////////////////////////////////////////////////////////////////////////////// -func runDiagnosticMessages(c echo.Context, rank ...uint64) { +func runDiagnosticMessages(c echo.Context, rank ...uint64) (err error) { // write Header and table description c.Response().Header().Set(echo.HeaderContentType, "text/csv") c.Response().WriteHeader(http.StatusOK) - _, err := fmt.Fprintln(c.Response(), strings.Join(DiagnosticMessagesTableDescription, ",")) - if err != nil { - panic(err) + csvWriter := csv.NewWriter(c.Response()) + if err := csvWriter.Write(DiagnosticMessagesTableDescription); err != nil { + return xerrors.Errorf("failed to write table description row: %w", err) } startRank := uint64(0) @@ -57,54 +57,58 @@ func runDiagnosticMessages(c echo.Context, rank ...uint64) { if len(rank) > 0 { startRank = rank[0] } - + var writeErr error messagelayer.Tangle().Utils.WalkMessageID(func(messageID tangle.MessageID, walker *walker.Walker) { messageInfo := getDiagnosticMessageInfo(messageID) if messageInfo.Rank >= startRank { - _, err = fmt.Fprintln(c.Response(), messageInfo.toCSV()) - if err != nil { - panic(err) + if err := csvWriter.Write(messageInfo.toCSVRow()); err != nil { + writeErr = xerrors.Errorf("failed to write message diagnostic info row: %w", err) + return } - c.Response().Flush() } messagelayer.Tangle().Storage.Approvers(messageID).Consume(func(approver *tangle.Approver) { walker.Push(approver.ApproverMessageID()) }) }, tangle.MessageIDs{tangle.EmptyMessageID}) + if writeErr != nil { + return writeErr + } + csvWriter.Flush() + if err := csvWriter.Error(); err != nil { + return xerrors.Errorf("csv writer failed after flush: %w", err) + } - c.Response().Flush() + return nil } -func runDiagnosticMessagesOnFirstWeakReferences(c echo.Context) { +func runDiagnosticMessagesOnFirstWeakReferences(c echo.Context) (err error) { // write Header and table description c.Response().Header().Set(echo.HeaderContentType, "text/csv") c.Response().WriteHeader(http.StatusOK) - _, err := fmt.Fprintln(c.Response(), strings.Join(DiagnosticMessagesTableDescription, ",")) - if err != nil { - panic(err) + csvWriter := csv.NewWriter(c.Response()) + if err := csvWriter.Write(DiagnosticMessagesTableDescription); err != nil { + return xerrors.Errorf("failed to write table description row: %w", err) } - + var writeErr error messagelayer.Tangle().Utils.WalkMessageID(func(messageID tangle.MessageID, walker *walker.Walker) { messageInfo := getDiagnosticMessageInfo(messageID) if len(messageInfo.WeakApprovers) > 0 { - _, err = fmt.Fprintln(c.Response(), messageInfo.toCSV()) - if err != nil { - panic(err) + if err := csvWriter.Write(messageInfo.toCSVRow()); err != nil { + writeErr = xerrors.Errorf("failed to write message diagnostic info row: %w", err) + return } - c.Response().Flush() messagelayer.Tangle().Storage.Message(messageID).Consume(func(message *tangle.Message) { message.ForEachParent(func(parent tangle.Parent) { - messageInfo = getDiagnosticMessageInfo(parent.ID) - _, err = fmt.Fprintln(c.Response(), messageInfo.toCSV()) - if err != nil { - panic(err) + parentMessageInfo := getDiagnosticMessageInfo(parent.ID) + if err := csvWriter.Write(parentMessageInfo.toCSVRow()); err != nil { + writeErr = xerrors.Errorf("failed to write parent message diagnostic info row: %w", err) + return } - c.Response().Flush() }) }) @@ -118,8 +122,16 @@ func runDiagnosticMessagesOnFirstWeakReferences(c echo.Context) { } }) }, tangle.MessageIDs{tangle.EmptyMessageID}) + if writeErr != nil { + return writeErr + } + + csvWriter.Flush() + if err := csvWriter.Error(); err != nil { + return xerrors.Errorf("csv writer failed after flush: %w", err) + } - c.Response().Flush() + return nil } // DiagnosticMessagesTableDescription holds the description of the diagnostic messages. @@ -188,8 +200,8 @@ type DiagnosticMessagesInfo struct { TransactionID string } -func getDiagnosticMessageInfo(messageID tangle.MessageID) DiagnosticMessagesInfo { - msgInfo := DiagnosticMessagesInfo{ +func getDiagnosticMessageInfo(messageID tangle.MessageID) *DiagnosticMessagesInfo { + msgInfo := &DiagnosticMessagesInfo{ ID: messageID.String(), } @@ -217,14 +229,16 @@ func getDiagnosticMessageInfo(messageID tangle.MessageID) DiagnosticMessagesInfo msgInfo.Booked = metadata.IsBooked() msgInfo.Eligible = metadata.IsEligible() msgInfo.Invalid = metadata.IsInvalid() - msgInfo.Rank = metadata.StructureDetails().Rank - msgInfo.IsPastMarker = metadata.StructureDetails().IsPastMarker - msgInfo.PastMarkers = metadata.StructureDetails().PastMarkers.SequenceToString() - msgInfo.PMHI = uint64(metadata.StructureDetails().PastMarkers.HighestIndex()) - msgInfo.PMLI = uint64(metadata.StructureDetails().PastMarkers.LowestIndex()) - msgInfo.FutureMarkers = metadata.StructureDetails().FutureMarkers.SequenceToString() - msgInfo.FMHI = uint64(metadata.StructureDetails().FutureMarkers.HighestIndex()) - msgInfo.FMLI = uint64(metadata.StructureDetails().FutureMarkers.LowestIndex()) + if metadata.StructureDetails() != nil { + msgInfo.Rank = metadata.StructureDetails().Rank + msgInfo.IsPastMarker = metadata.StructureDetails().IsPastMarker + msgInfo.PastMarkers = metadata.StructureDetails().PastMarkers.SequenceToString() + msgInfo.PMHI = uint64(metadata.StructureDetails().PastMarkers.HighestIndex()) + msgInfo.PMLI = uint64(metadata.StructureDetails().PastMarkers.LowestIndex()) + msgInfo.FutureMarkers = metadata.StructureDetails().FutureMarkers.SequenceToString() + msgInfo.FMHI = uint64(metadata.StructureDetails().FutureMarkers.HighestIndex()) + msgInfo.FMLI = uint64(metadata.StructureDetails().FutureMarkers.LowestIndex()) + } branchID = metadata.BranchID() }, false) @@ -237,8 +251,8 @@ func getDiagnosticMessageInfo(messageID tangle.MessageID) DiagnosticMessagesInfo return msgInfo } -func (d DiagnosticMessagesInfo) toCSV() (result string) { - row := []string{ +func (d *DiagnosticMessagesInfo) toCSVRow() (row []string) { + row = []string{ d.ID, d.IssuerID, d.IssuerPublicKey, @@ -270,9 +284,7 @@ func (d DiagnosticMessagesInfo) toCSV() (result string) { d.TransactionID, } - result = strings.Join(row, ",") - - return + return row } // rankFromContext determines the marker rank from the rank parameter in an echo.Context. diff --git a/plugins/webapi/tools/message/diagnostic_tips.go b/plugins/webapi/tools/message/diagnostic_tips.go new file mode 100644 index 0000000000..7dd1f7fefb --- /dev/null +++ b/plugins/webapi/tools/message/diagnostic_tips.go @@ -0,0 +1,93 @@ +package message + +import ( + "encoding/csv" + "net/http" + + "github.com/iotaledger/goshimmer/packages/tangle" + "github.com/iotaledger/goshimmer/plugins/messagelayer" + + "github.com/labstack/echo" + "golang.org/x/xerrors" +) + +type tipsDiagnosticType int + +const ( + strongTipsOnly tipsDiagnosticType = iota + weakTipsOnly + allTips +) + +// TipsDiagnosticHandler runs tips diagnostic over the Tangle. +func TipsDiagnosticHandler(c echo.Context) error { + return runTipsDiagnostic(c, allTips) +} + +// StrongTipsDiagnosticHandler runs strong tips diagnostic over the Tangle. +func StrongTipsDiagnosticHandler(c echo.Context) error { + return runTipsDiagnostic(c, strongTipsOnly) +} + +// WeakTipsDiagnosticHandler runs weak tips diagnostic over the Tangle. +func WeakTipsDiagnosticHandler(c echo.Context) error { + return runTipsDiagnostic(c, weakTipsOnly) +} + +var tipsDiagnosticTableDescription = append([]string{"tipType"}, DiagnosticMessagesTableDescription...) + +type tipsDiagnosticInfo struct { + tipType tangle.TipType + *DiagnosticMessagesInfo +} + +func (tdi *tipsDiagnosticInfo) toCSVRow() (row []string) { + messageRow := tdi.DiagnosticMessagesInfo.toCSVRow() + row = make([]string, 0, 1+len(messageRow)) + row = append(row, tdi.tipType.String()) + row = append(row, messageRow...) + return row +} + +func runTipsDiagnostic(c echo.Context, diagnosticType tipsDiagnosticType) (err error) { + response := c.Response() + response.Header().Set(echo.HeaderContentType, "text/csv") + response.WriteHeader(http.StatusOK) + + csvWriter := csv.NewWriter(response) + if err := csvWriter.Write(tipsDiagnosticTableDescription); err != nil { + return xerrors.Errorf("can't write table description row: %w", err) + } + var strongTips, weakTips tangle.MessageIDs + if diagnosticType == strongTipsOnly || diagnosticType == allTips { + strongTips = messagelayer.Tangle().TipManager.AllStrongTips() + } + if diagnosticType == weakTipsOnly || diagnosticType == allTips { + weakTips = messagelayer.Tangle().TipManager.AllWeakTips() + } + if err := buildAndWriteTipsDiagnostic(csvWriter, strongTips, tangle.StrongTip); err != nil { + return xerrors.Errorf("%w", err) + } + if err := buildAndWriteTipsDiagnostic(csvWriter, weakTips, tangle.WeakTip); err != nil { + return xerrors.Errorf("%w", err) + } + csvWriter.Flush() + if err := csvWriter.Error(); err != nil { + return xerrors.Errorf("csv writer failed after flush: %w", err) + } + return nil +} + +func buildAndWriteTipsDiagnostic(w *csv.Writer, tips tangle.MessageIDs, tipType tangle.TipType) (err error) { + for _, tipID := range tips { + messageInfo := getDiagnosticMessageInfo(tipID) + tipInfo := tipsDiagnosticInfo{ + tipType: tipType, + DiagnosticMessagesInfo: messageInfo, + } + if err := w.Write(tipInfo.toCSVRow()); err != nil { + return xerrors.Errorf("failed to write tip diagnostic info row: %w", err) + } + } + return nil +} diff --git a/plugins/webapi/tools/plugin.go b/plugins/webapi/tools/plugin.go index 8578dddd24..5e3bb881b4 100644 --- a/plugins/webapi/tools/plugin.go +++ b/plugins/webapi/tools/plugin.go @@ -41,4 +41,7 @@ func configure(_ *node.Plugin) { webapi.Server().GET("tools/diagnostic/branches", message.DiagnosticBranchesHandler) webapi.Server().GET("tools/diagnostic/branches/lazybooked", message.DiagnosticLazyBookedBranchesHandler) webapi.Server().GET("tools/diagnostic/branches/invalid", message.DiagnosticInvalidBranchesHandler) + webapi.Server().GET("tools/diagnostic/tips", message.TipsDiagnosticHandler) + webapi.Server().GET("tools/diagnostic/tips/strong", message.StrongTipsDiagnosticHandler) + webapi.Server().GET("tools/diagnostic/tips/weak", message.WeakTipsDiagnosticHandler) } diff --git a/tools/docker-network/run.sh b/tools/docker-network/run.sh index 913dcc60fe..52aeddd02a 100755 --- a/tools/docker-network/run.sh +++ b/tools/docker-network/run.sh @@ -8,8 +8,8 @@ fi REPLICAS=$1 GRAFANA=${2:-0} -DOCKER_BUILDKIT=1 -COMPOSE_DOCKER_CLI_BUILD=1 +export DOCKER_BUILDKIT=1 +export COMPOSE_DOCKER_CLI_BUILD=1 echo "Build GoShimmer" docker-compose -f builder/docker-compose.builder.yml up --abort-on-container-exit --exit-code-from builder diff --git a/tools/integration-tests/runTests.sh b/tools/integration-tests/runTests.sh index 81c72b6e63..940da02a7f 100755 --- a/tools/integration-tests/runTests.sh +++ b/tools/integration-tests/runTests.sh @@ -2,8 +2,8 @@ TEST_NAMES='autopeering common drng message value consensus faucet syncbeacon mana' -DOCKER_BUILDKIT=1 -COMPOSE_DOCKER_CLI_BUILD=1 +export DOCKER_BUILDKIT=1 +export COMPOSE_DOCKER_CLI_BUILD=1 echo "Build GoShimmer image" docker build -t iotaledger/goshimmer ../../. diff --git a/tools/integration-tests/tester/framework/parameters.go b/tools/integration-tests/tester/framework/parameters.go index 4378e948fc..9dfe2df978 100644 --- a/tools/integration-tests/tester/framework/parameters.go +++ b/tools/integration-tests/tester/framework/parameters.go @@ -24,6 +24,9 @@ const ( syncBeaconSeed = "Dw6dKWvQGbcijpib6A8t1vSiuDU1XWsnT71xhLSzXUGc" syncBeaconPublicKey = "6wuo4zNP4MXzojmj2EXGsPEHPkWJNnbKZ9e17ufdTmp" + + // GenesisTokenAmount is the amount of tokens in the genesis output. + GenesisTokenAmount = 1000000000000000 ) // Parameters to override before calling any peer creation function. diff --git a/tools/integration-tests/tester/tests/faucet/faucetmoveallfunds_test.go b/tools/integration-tests/tester/tests/faucet/faucetmoveallfunds_test.go index 7a63a92311..141d6a6e30 100644 --- a/tools/integration-tests/tester/tests/faucet/faucetmoveallfunds_test.go +++ b/tools/integration-tests/tester/tests/faucet/faucetmoveallfunds_test.go @@ -45,7 +45,7 @@ func TestPrepareFaucet(t *testing.T) { totalSplit += framework.ParaFaucetTokensPerRequest } balance := genesisBalance - totalSplit - faucetAddr := faucet.Seed.Address(i).Address().Base58() + faucetAddr := faucet.Seed.Address(0).Address().Base58() outputs, err := faucet.GetUnspentOutputs([]string{faucetAddr}) require.NoError(t, err) assert.Equal(t, balance, outputs.UnspentOutputs[0].OutputIDs[0].Balances[0].Value) @@ -75,23 +75,26 @@ func TestPrepareFaucet(t *testing.T) { assert.Equal(t, framework.ParaFaucetTokensPerRequest, lastPreparedOutput.UnspentOutputs[0].OutputIDs[0].Balances[0].Value) // check balance is untouched - balanceOutputAddress := faucet.Seed.Address(11).Address().Base58() + balanceOutputAddress := faucet.Seed.Address(0).Address().Base58() balanceOutput, err := faucet.GetUnspentOutputs([]string{balanceOutputAddress}) require.NoError(t, err) assert.Equal(t, genesisBalance-(10*framework.ParaFaucetTokensPerRequest), balanceOutput.UnspentOutputs[0].OutputIDs[0].Balances[0].Value) - // issue 1 more request to split the balance at [11] + // issue 2 more request to split the remainder balance. addr := peer.Seed.Address(10).Address() tests.SendFaucetRequest(t, peer, addr) + addr = peer.Seed.Address(11).Address() + tests.SendFaucetRequest(t, peer, addr) time.Sleep(2 * time.Second) - // check split of balance [11] to [12...21] - _addr := faucet.Seed.Address(11).Address().Base58() + // check split of balance [0] to [11...20] + _addr := faucet.Seed.Address(0).Address().Base58() outputs, err = faucet.GetUnspentOutputs([]string{_addr}) require.NoError(t, err) - assert.Equal(t, 0, len(outputs.UnspentOutputs[0].OutputIDs)) //output is spent. + assert.Equal(t, 1, len(outputs.UnspentOutputs[0].OutputIDs)) // 1 output is unspent. + assert.Equal(t, genesisBalance-20*framework.ParaFaucetTokensPerRequest, outputs.UnspentOutputs[0].OutputIDs[0].Balances[0].Value) - for i := 12; i < 22; i++ { + for i := 11; i < 21; i++ { _addr := faucet.Seed.Address(uint64(i)).Address().Base58() outputs, err = faucet.GetUnspentOutputs([]string{_addr}) require.NoError(t, err) diff --git a/tools/integration-tests/tester/tests/mana/mana_test.go b/tools/integration-tests/tester/tests/mana/mana_test.go index 44c5aeed54..8c243467d9 100644 --- a/tools/integration-tests/tester/tests/mana/mana_test.go +++ b/tools/integration-tests/tester/tests/mana/mana_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/iotaledger/hive.go/identity" "github.com/mr-tron/base58" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,7 +29,8 @@ func TestManaPersistence(t *testing.T) { require.NoError(t, err) manaBefore := info.Mana require.Greater(t, manaBefore.Access, 0.0) - require.Greater(t, manaBefore.Consensus, 0.0) + // cons mana is pledged to emptyNodeID + require.Equal(t, manaBefore.Consensus, 0.0) // stop all nodes. Expects mana to be saved successfully for _, peer := range n.Peers() { @@ -49,7 +51,7 @@ func TestManaPersistence(t *testing.T) { require.NoError(t, err) manaAfter := info.Mana require.Greater(t, manaAfter.Access, 0.0) - require.Greater(t, manaAfter.Consensus, 0.0) + require.Equal(t, manaAfter.Consensus, 0.0) } func TestPledgeFilter(t *testing.T) { @@ -171,17 +173,27 @@ func TestApis(t *testing.T) { require.NoError(t, err) defer tests.ShutdownNetwork(t, n) + emptyNodeID := identity.ID{} + peers := n.Peers() for _, p := range peers { fmt.Printf("peer id: %s, short id: %s\n", base58.Encode(p.ID().Bytes()), p.ID().String()) } // Test /mana - resp, err := peers[0].GoShimmerAPI.GetManaFullNodeID(base58.Encode(peers[0].ID().Bytes())) + // consensus mana was pledged to empty nodeID by faucet + resp, err := peers[0].GoShimmerAPI.GetManaFullNodeID(base58.Encode(emptyNodeID.Bytes())) + require.NoError(t, err) + assert.Equal(t, base58.Encode(emptyNodeID.Bytes()), resp.NodeID) + assert.Equal(t, resp.Access, 0.0) + assert.Greater(t, resp.Consensus, 0.0) + + // access mana was pledged to itself by the faucet + resp, err = peers[0].GoShimmerAPI.GetManaFullNodeID(base58.Encode(peers[0].ID().Bytes())) require.NoError(t, err) assert.Equal(t, base58.Encode(peers[0].ID().Bytes()), resp.NodeID) assert.Greater(t, resp.Access, 0.0) - assert.Greater(t, resp.Consensus, 0.0) + assert.Equal(t, resp.Consensus, 0.0) // Test /mana/all resp2, err := peers[0].GoShimmerAPI.GetAllMana() @@ -196,7 +208,7 @@ func TestApis(t *testing.T) { // send funds to node 2 _, err = peers[2].SendFaucetRequest(peers[2].Seed.Address(0).Address().Base58()) require.NoError(t, err) - time.Sleep(12 * time.Second) + time.Sleep(20 * time.Second) require.NoError(t, err) allManaResp, err := peers[0].GoShimmerAPI.GetAllMana() @@ -214,7 +226,12 @@ func TestApis(t *testing.T) { require.Equal(t, 3, len(resp4.Nodes)) for i := 0; i < 3; i++ { assert.Equal(t, base58.Encode(peers[i].ID().Bytes()), resp3.Nodes[i].NodeID) - assert.Equal(t, base58.Encode(peers[i].ID().Bytes()), resp4.Nodes[i].NodeID) + // faucet pledged its cons mana to emptyNodeID... + if i == 0 { + assert.Equal(t, base58.Encode(emptyNodeID.Bytes()), resp4.Nodes[i].NodeID) + } else { + assert.Equal(t, base58.Encode(peers[i].ID().Bytes()), resp4.Nodes[i].NodeID) + } } // Test /mana/percentile @@ -222,6 +239,10 @@ func TestApis(t *testing.T) { require.NoError(t, err) assert.Equal(t, base58.Encode(peers[0].ID().Bytes()), resp5.NodeID) assert.InDelta(t, 66.66, resp5.Access, 0.01) + + resp5, err = peers[0].GoShimmerAPI.GetManaPercentile(base58.Encode(emptyNodeID.Bytes())) + require.NoError(t, err) + assert.Equal(t, base58.Encode(emptyNodeID.Bytes()), resp5.NodeID) assert.InDelta(t, 66.66, resp5.Consensus, 0.01) // Test /mana/online/access @@ -230,16 +251,19 @@ func TestApis(t *testing.T) { resp7, err := peers[0].GoShimmerAPI.GetOnlineConsensusMana() require.NoError(t, err) require.Equal(t, 3, len(resp6.Online)) - fmt.Println("online nodes mana") + // emptyNodeID cannot be online! + require.Equal(t, 2, len(resp7.Online)) + fmt.Println("online nodes access mana") for _, r := range resp6.Online { fmt.Println("node - ", r.ShortID, " -- mana: ", r.Mana) } assert.Equal(t, base58.Encode(peers[0].ID().Bytes()), resp6.Online[0].ID) assert.Equal(t, base58.Encode(peers[1].ID().Bytes()), resp6.Online[1].ID) assert.Equal(t, base58.Encode(peers[2].ID().Bytes()), resp6.Online[2].ID) - assert.Equal(t, base58.Encode(peers[0].ID().Bytes()), resp7.Online[0].ID) - assert.Equal(t, base58.Encode(peers[1].ID().Bytes()), resp7.Online[1].ID) - assert.Equal(t, base58.Encode(peers[2].ID().Bytes()), resp7.Online[2].ID) + + // emptyNodeID cannot be online! + assert.Equal(t, base58.Encode(peers[1].ID().Bytes()), resp7.Online[0].ID) + assert.Equal(t, base58.Encode(peers[2].ID().Bytes()), resp7.Online[1].ID) // Test /mana/pending unspentOutputs, err := peers[1].GetUnspentOutputs([]string{peers[1].Seed.Address(0).Address().Base58()}) @@ -264,8 +288,12 @@ func TestApis(t *testing.T) { for _, c := range resp9.Consensus { m[c.ShortNodeID] = c.Mana } + + mana, ok := m[emptyNodeID.String()] + assert.True(t, ok) + assert.Greater(t, mana, 0.0) // node 3 shouldn't have mana from way back at `timestampPast` - for _, p := range peers[:3] { + for _, p := range peers[1:3] { mana, ok := m[p.ID().String()] assert.True(t, ok) assert.Greater(t, mana, 0.0) @@ -274,20 +302,22 @@ func TestApis(t *testing.T) { // Test /mana/consensus/logs resp10, err := peers[0].GoShimmerAPI.GetConsensusEventLogs([]string{}) require.NoError(t, err) - fmt.Println("consensus mana evnet logs") + fmt.Println("consensus mana event logs") for n, l := range resp10.Logs { fmt.Println("node: ", n, " pledge logs: ", len(l.Pledge), " revoke logs: ", len(l.Revoke)) } - for i := 0; i < len(peers); i++ { + + // emptyNodeID was pledged once (splitting genesis) and revoked 3 (3 requests) + logs, ok := resp10.Logs[base58.Encode(emptyNodeID.Bytes())] + require.True(t, ok) + assert.Equal(t, 1, len(logs.Pledge)) + assert.Equal(t, 3, len(logs.Revoke)) + + for i := 1; i < len(peers); i++ { logs, ok := resp10.Logs[base58.Encode(peers[i].ID().Bytes())] require.True(t, ok) - if i == 0 { - assert.Equal(t, 1, len(logs.Pledge)) - assert.Equal(t, 3, len(logs.Revoke)) - } else { - assert.Equal(t, 1, len(logs.Pledge)) - assert.Equal(t, 0, len(logs.Revoke)) - } + assert.Equal(t, 1, len(logs.Pledge)) + assert.Equal(t, 0, len(logs.Revoke)) } // Test /mana/consensus/pastmetadata diff --git a/tools/integration-tests/tester/tests/testutil.go b/tools/integration-tests/tester/tests/testutil.go index b348edc81d..253c21ce64 100644 --- a/tools/integration-tests/tester/tests/testutil.go +++ b/tools/integration-tests/tester/tests/testutil.go @@ -164,8 +164,10 @@ func SendTransactionFromFaucet(t *testing.T, peers []*framework.Peer, sentValue } faucetPeer := peers[0] + // faucet keeps remaining amount on address 0 + addrBalance[faucetPeer.Seed.Address(0).Address().Base58()][ledgerstate.ColorIOTA] = int64(framework.GenesisTokenAmount - framework.ParaFaucetPreparedOutputsCount*int(framework.ParaFaucetTokensPerRequest)) var i uint64 - // faucet has split genesis output into n bits of 1337 each and remainder on n + 1 + // faucet has split genesis output into n bits of 1337 each and remainder on 0 for i = 1; i < uint64(len(peers)); i++ { faucetAddrStr := faucetPeer.Seed.Address(i).Address().Base58() addrBalance[faucetAddrStr] = make(map[ledgerstate.Color]int64)