Skip to content

Commit

Permalink
Merge pull request #962 from iotaledger/develop
Browse files Browse the repository at this point in the history
Merge develop to v0.3.6
  • Loading branch information
capossele authored Feb 12, 2021
2 parents a4518a0 + 4c837de commit d086365
Show file tree
Hide file tree
Showing 172 changed files with 20,071 additions and 2,186 deletions.
3 changes: 3 additions & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Auto detect text files and perform LF normalization.

*.go text eol=lf
16 changes: 8 additions & 8 deletions .github/workflows/integration-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ jobs:

- name: Pull additional Docker images
run: |
docker pull angelocapossele/drand:1.1.3
docker pull angelocapossele/drand:v1.1.4
docker pull gaiaadm/pumba:0.7.2
docker pull gaiadocker/iproute2:latest
Expand Down Expand Up @@ -54,7 +54,7 @@ jobs:

- name: Pull additional Docker images
run: |
docker pull angelocapossele/drand:1.1.3
docker pull angelocapossele/drand:v1.1.4
docker pull gaiaadm/pumba:0.7.2
docker pull gaiadocker/iproute2:latest
Expand Down Expand Up @@ -88,7 +88,7 @@ jobs:

- name: Pull additional Docker images
run: |
docker pull angelocapossele/drand:1.1.3
docker pull angelocapossele/drand:v1.1.4
docker pull gaiaadm/pumba:0.7.2
docker pull gaiadocker/iproute2:latest
Expand Down Expand Up @@ -123,7 +123,7 @@ jobs:

- name: Pull additional Docker images
run: |
docker pull angelocapossele/drand:1.1.3
docker pull angelocapossele/drand:v1.1.4
docker pull gaiaadm/pumba:0.7.2
docker pull gaiadocker/iproute2:latest
Expand Down Expand Up @@ -159,7 +159,7 @@ jobs:

- name: Pull additional Docker images
run: |
docker pull angelocapossele/drand:1.1.3
docker pull angelocapossele/drand:v1.1.4
docker pull gaiaadm/pumba:0.7.2
docker pull gaiadocker/iproute2:latest
Expand Down Expand Up @@ -195,7 +195,7 @@ jobs:

- name: Pull additional Docker images
run: |
docker pull angelocapossele/drand:1.1.3
docker pull angelocapossele/drand:v1.1.4
docker pull gaiaadm/pumba:0.7.2
docker pull gaiadocker/iproute2:latest
Expand Down Expand Up @@ -229,7 +229,7 @@ jobs:

- name: Pull additional Docker images
run: |
docker pull angelocapossele/drand:1.1.3
docker pull angelocapossele/drand:v1.1.4
docker pull gaiaadm/pumba:0.7.2
docker pull gaiadocker/iproute2:latest
Expand Down Expand Up @@ -263,7 +263,7 @@ jobs:

- name: Pull additional Docker images
run: |
docker pull angelocapossele/drand:1.1.3
docker pull angelocapossele/drand:v1.1.4
docker pull gaiaadm/pumba:0.7.2
docker pull gaiadocker/iproute2:latest
Expand Down
19 changes: 19 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,21 @@
# v0.3.6 - 2021-02-12
* Finalize Payload layout
* Update dRNG with finalized payload layout
* Add simple scheduler
* Add message approval analysis API
* Add core logic for timestamp voting (still disabled)
* Refactor message parser
* Refactor solidifier
* Refactor Tangle events
* Update entry node URL
* Update gossip to not gossip requested messages
* Introduce invalid message flag
* Merge the new data flow
* Fix visualizer bug
* Update hive.go
* Update JS dependencies
* **Breaking**: bumps network and database versions

# v0.3.5 - 2021-01-13
* Fix consensus statement bug
* Fix deadlock in RandomMap
Expand All @@ -10,6 +28,7 @@
* Update hive.go
* Update JS dependencies
* **Breaking**: bumps network and database versions

# v0.3.4 - 2020-12-11
* Revert Pebble to Badger.
* **Breaking**: bumps network and database versions
Expand Down
2 changes: 1 addition & 1 deletion config.default.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
"autopeering": {
"entryNodes": [
"2PV5487xMw5rasGBXXWeqSi4hLz7r19YBt8Y1TGAsQbj@ressims.iota.cafe:15626",
"5EDH4uY78EA6wrBkHHAVBWBMDt7EcksRq6pjzipoW15B@entrynode.alphanet.einfachiota.de:14656"
"5EDH4uY78EA6wrBkHHAVBWBMDt7EcksRq6pjzipoW15B@entryshimmer.tanglebay.com:14646"
],
"port": 14626
},
Expand Down
74 changes: 46 additions & 28 deletions dapps/valuetransfers/dapp.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"github.com/iotaledger/hive.go/logger"
"github.com/iotaledger/hive.go/node"
flag "github.com/spf13/pflag"
"golang.org/x/xerrors"
)

const (
Expand Down Expand Up @@ -148,11 +149,11 @@ func configure(_ *node.Plugin) {
}))

// register SignatureFilter in Parser
messagelayer.MessageParser().AddMessageFilter(valuetangle.NewSignatureFilter())
messagelayer.Tangle().Parser.AddMessageFilter(valuetangle.NewSignatureFilter())

// subscribe to message-layer
receiveMessageClosure = events.NewClosure(onReceiveMessageFromMessageLayer)
messagelayer.Tangle().Events.MessageSolid.Attach(receiveMessageClosure)
messagelayer.Tangle().Scheduler.Events.MessageScheduled.Attach(receiveMessageClosure)
}

func run(*node.Plugin) {
Expand All @@ -161,41 +162,33 @@ func run(*node.Plugin) {
// TODO: make this better
// stop listening to stuff from the message tangle. By the time we are here, gossip and autopeering have already
// been shutdown, so no new incoming messages should appear.
messagelayer.Tangle().Events.MessageSolid.Detach(receiveMessageClosure)
messagelayer.Tangle().Solidifier.Events.MessageSolid.Detach(receiveMessageClosure)
// wait one network delay to be sure that all scheduled setPreferred are triggered in fcob. Otherwise, we would
// try to access an already shutdown objectstorage in fcob.
cfgAvgNetworkDelay := config.Node().Int(CfgValueLayerFCOBAverageNetworkDelay)
time.Sleep(time.Duration(cfgAvgNetworkDelay) * time.Second)
_tangle.Shutdown()
}, shutdown.PriorityTangle); err != nil {
}, shutdown.PriorityValueTangle); err != nil {
log.Panicf("Failed to start as daemon: %s", err)
}
}

func onReceiveMessageFromMessageLayer(cachedMessageEvent *tangle.CachedMessageEvent) {
defer cachedMessageEvent.Message.Release()
defer cachedMessageEvent.MessageMetadata.Release()

solidMessage := cachedMessageEvent.Message.Unwrap()
if solidMessage == nil {
log.Debug("failed to unpack solid message from message layer")

return
}

messagePayload := solidMessage.Payload()
if messagePayload.Type() != valuepayload.Type {
return
}
func onReceiveMessageFromMessageLayer(messageID tangle.MessageID) {
messagelayer.Tangle().Storage.Message(messageID).Consume(func(solidMessage *tangle.Message) {
messagePayload := solidMessage.Payload()
if messagePayload.Type() != valuepayload.Type {
return
}

valuePayload, ok := messagePayload.(*valuepayload.Payload)
if !ok {
log.Debug("could not cast payload to value payload")
valuePayload, ok := messagePayload.(*valuepayload.Payload)
if !ok {
log.Debug("could not cast payload to value payload")

return
}
return
}

_tangle.AttachPayload(valuePayload)
_tangle.AttachPayload(valuePayload)
})
}

// TipManager returns the TipManager singleton.
Expand All @@ -215,7 +208,8 @@ func ValueObjectFactory() *valuetangle.ValueObjectFactory {
}

// AwaitTransactionToBeBooked awaits maxAwait for the given transaction to get booked.
func AwaitTransactionToBeBooked(txID transaction.ID, maxAwait time.Duration) error {
func AwaitTransactionToBeBooked(f func() (*tangle.Message, error), txID transaction.ID, maxAwait time.Duration) (*tangle.Message, error) {
// first subscribe to the transaction booked event
booked := make(chan struct{}, 1)
// exit is used to let the caller exit if for whatever
// reason the same transaction gets booked multiple times
Expand All @@ -234,10 +228,34 @@ func AwaitTransactionToBeBooked(txID transaction.ID, maxAwait time.Duration) err
})
Tangle().Events.TransactionBooked.Attach(closure)
defer Tangle().Events.TransactionBooked.Detach(closure)

// then issue the message with the tx

// channel to receive the result of issuance
issueResult := make(chan struct {
msg *tangle.Message
err error
}, 1)

go func() {
msg, err := f()
issueResult <- struct {
msg *tangle.Message
err error
}{msg: msg, err: err}
}()

// wait on issuance
result := <-issueResult

if result.err != nil || result.msg == nil {
return nil, xerrors.Errorf("Failed to issue transaction %s: %w", txID.String(), result.err)
}

select {
case <-time.After(maxAwait):
return ErrTransactionWasNotBookedInTime
return nil, ErrTransactionWasNotBookedInTime
case <-booked:
return nil
return result.msg, nil
}
}
11 changes: 6 additions & 5 deletions dapps/valuetransfers/packages/consensus/fcob.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/tangle"
"github.com/iotaledger/goshimmer/dapps/valuetransfers/packages/transaction"
"github.com/iotaledger/goshimmer/packages/vote"
"github.com/iotaledger/goshimmer/packages/vote/opinion"
)

// FCOB defines the "Fast Consensus of Barcelona" rules that are used to form the initial opinions of nodes. It uses a
Expand Down Expand Up @@ -50,7 +51,7 @@ func (fcob *FCOB) ProcessVoteResult(ev *vote.OpinionEvent) {
return
}

if _, err := fcob.tangle.SetTransactionPreferred(transactionID, ev.Opinion == vote.Like); err != nil {
if _, err := fcob.tangle.SetTransactionPreferred(transactionID, ev.Opinion == opinion.Like); err != nil {
fcob.Events.Error.Trigger(err)
}

Expand All @@ -72,7 +73,7 @@ func (fcob *FCOB) onTransactionBooked(cachedTransactionBookEvent *tangle.CachedT
return
}

fcob.Events.Vote.Trigger(transactionMetadata.BranchID().String(), vote.Dislike)
fcob.Events.Vote.Trigger(transactionMetadata.BranchID().String(), opinion.Dislike)

return
}
Expand Down Expand Up @@ -152,9 +153,9 @@ func (fcob *FCOB) onFork(forkEvent *tangle.ForkEvent) {

switch transactionMetadata.Preferred() {
case true:
fcob.Events.Vote.Trigger(transactionMetadata.ID().String(), vote.Like)
fcob.Events.Vote.Trigger(transactionMetadata.ID().String(), opinion.Like)
case false:
fcob.Events.Vote.Trigger(transactionMetadata.ID().String(), vote.Dislike)
fcob.Events.Vote.Trigger(transactionMetadata.ID().String(), opinion.Dislike)
}
}

Expand All @@ -168,5 +169,5 @@ type FCOBEvents struct {
}

func voteEvent(handler interface{}, params ...interface{}) {
handler.(func(id string, initOpn vote.Opinion))(params[0].(string), params[1].(vote.Opinion))
handler.(func(id string, initOpn opinion.Opinion))(params[0].(string), params[1].(opinion.Opinion))
}
4 changes: 2 additions & 2 deletions dapps/valuetransfers/packages/payload/payload.go
Original file line number Diff line number Diff line change
Expand Up @@ -223,8 +223,8 @@ func (p *Payload) ObjectStorageValue() (bytes []byte) {

// marshal fields
payloadLength := IDLength + IDLength + len(transferBytes)
marshalUtil := marshalutil.New(marshalutil.Uint32Size + marshalutil.Uint32Size + payloadLength)
marshalUtil.WriteUint32(uint32(payloadLength))
marshalUtil := marshalutil.New(marshalutil.Uint32Size + payload.TypeLength + payloadLength)
marshalUtil.WriteUint32(payload.TypeLength + uint32(payloadLength))
marshalUtil.WriteBytes(Type.Bytes())
marshalUtil.WriteBytes(p.parent1PayloadID.Bytes())
marshalUtil.WriteBytes(p.parent2PayloadID.Bytes())
Expand Down
2 changes: 1 addition & 1 deletion dapps/valuetransfers/packages/tangle/signature_filter.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import (
"github.com/iotaledger/hive.go/autopeering/peer"
)

// SignatureFilter represents a filter for the MessageParser that filters out transactions with an invalid signature.
// SignatureFilter represents a filter for the Parser that filters out transactions with an invalid signature.
type SignatureFilter struct {
onAcceptCallback func(message *tangle.Message, peer *peer.Peer)
onRejectCallback func(message *tangle.Message, err error, peer *peer.Peer)
Expand Down
24 changes: 13 additions & 11 deletions dapps/valuetransfers/packages/tangle/signature_filter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,19 +13,19 @@ import (
"github.com/iotaledger/goshimmer/packages/tangle/payload"
"github.com/iotaledger/hive.go/autopeering/peer"
"github.com/iotaledger/hive.go/events"
"github.com/iotaledger/hive.go/identity"
"github.com/iotaledger/hive.go/kvstore/mapdb"
"github.com/iotaledger/hive.go/marshalutil"
"github.com/stretchr/testify/require"
)

func TestSignatureFilter(t *testing.T) {
testTangle := tangle.New()
defer testTangle.Shutdown()

// create parser
messageParser := newSyncMessageParser(NewSignatureFilter())

// create helper instances
seed := newSeed()
messageFactory := tangle.NewMessageFactory(mapdb.NewMapDB(), []byte("sequenceKey"), identity.GenerateLocalIdentity(), tangle.NewMessageTipSelector())

// 1. test value message without signatures
{
Expand All @@ -42,7 +42,7 @@ func TestSignatureFilter(t *testing.T) {
)

// parse message bytes
msg, err := messageFactory.IssuePayload(valuePayload.New(valuePayload.GenesisID, valuePayload.GenesisID, tx))
msg, err := testTangle.MessageFactory.IssuePayload(valuePayload.New(valuePayload.GenesisID, valuePayload.GenesisID, tx))
require.NoError(t, err)
accepted, _, _, err := messageParser.Parse(msg.Bytes(), &peer.Peer{})

Expand All @@ -68,7 +68,7 @@ func TestSignatureFilter(t *testing.T) {
tx.Sign(signaturescheme.ED25519(*seed.KeyPair(0)))

// parse message bytes
msg, err := messageFactory.IssuePayload(valuePayload.New(valuePayload.GenesisID, valuePayload.GenesisID, tx))
msg, err := testTangle.MessageFactory.IssuePayload(valuePayload.New(valuePayload.GenesisID, valuePayload.GenesisID, tx))
require.NoError(t, err)

accepted, _, _, err := messageParser.Parse(msg.Bytes(), &peer.Peer{})
Expand All @@ -92,7 +92,7 @@ func TestSignatureFilter(t *testing.T) {
require.NoError(t, err)

// parse message bytes
msg, err := messageFactory.IssuePayload(dataPayload)
msg, err := testTangle.MessageFactory.IssuePayload(dataPayload)
require.NoError(t, err)
accepted, _, _, err := messageParser.Parse(msg.Bytes(), &peer.Peer{})

Expand All @@ -103,15 +103,17 @@ func TestSignatureFilter(t *testing.T) {
}
}

// newSyncMessageParser creates a wrapped MessageParser that works synchronously by using a WaitGroup to wait for the
// newSyncMessageParser creates a wrapped Parser that works synchronously by using a WaitGroup to wait for the
// parse result.
func newSyncMessageParser(messageFilters ...tangle.MessageFilter) (tester *syncMessageParser) {
// initialize MessageParser
messageParser := tangle.NewMessageParser()
// initialize Parser
messageParser := tangle.NewParser()
for _, messageFilter := range messageFilters {
messageParser.AddMessageFilter(messageFilter)
}

messageParser.Setup()

// create wrapped result
tester = &syncMessageParser{
messageParser: messageParser,
Expand Down Expand Up @@ -152,9 +154,9 @@ func newSyncMessageParser(messageFilters ...tangle.MessageFilter) (tester *syncM
return
}

// syncMessageParser is a wrapper for the MessageParser that allows to parse Messages synchronously.
// syncMessageParser is a wrapper for the Parser that allows to parse Messages synchronously.
type syncMessageParser struct {
messageParser *tangle.MessageParser
messageParser *tangle.Parser
result *messageParserResult
wg sync.WaitGroup
}
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ require (
github.com/go-ole/go-ole v1.2.4 // indirect
github.com/golang/protobuf v1.4.3
github.com/gorilla/websocket v1.4.2
github.com/iotaledger/hive.go v0.0.0-20210107100912-23832b944f60
github.com/iotaledger/hive.go v0.0.0-20210209113323-87572778f0d9
github.com/labstack/echo v3.3.10+incompatible
github.com/labstack/gommon v0.3.0
github.com/magiconair/properties v1.8.1
Expand Down
Loading

0 comments on commit d086365

Please sign in to comment.