Skip to content

Commit

Permalink
refactor: decouple API from go-waku (#1239)
Browse files Browse the repository at this point in the history
  • Loading branch information
richard-ramos authored Oct 15, 2024
1 parent 76275f6 commit 37f936d
Show file tree
Hide file tree
Showing 8 changed files with 212 additions and 55 deletions.
17 changes: 17 additions & 0 deletions waku/v2/api/common/result.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
package common

import (
"context"

"github.com/libp2p/go-libp2p/core/peer"
"github.com/waku-org/go-waku/waku/v2/protocol/store"
"github.com/waku-org/go-waku/waku/v2/protocol/store/pb"
)

type StoreRequestResult interface {
Cursor() []byte
IsComplete() bool
PeerID() peer.ID
Next(ctx context.Context, opts ...store.RequestOption) error // TODO: see how to decouple store.RequestOption
Messages() []*pb.WakuMessageKeyValue
}
33 changes: 33 additions & 0 deletions waku/v2/api/missing/default_requestor.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
package missing

import (
"context"

"github.com/libp2p/go-libp2p/core/peer"
"github.com/waku-org/go-waku/waku/v2/api/common"
"github.com/waku-org/go-waku/waku/v2/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
"github.com/waku-org/go-waku/waku/v2/protocol/store"
)

func NewDefaultStorenodeRequestor(store *store.WakuStore) StorenodeRequestor {
return &defaultStorenodeRequestor{
store: store,
}
}

type defaultStorenodeRequestor struct {
store *store.WakuStore
}

func (d *defaultStorenodeRequestor) GetMessagesByHash(ctx context.Context, peerID peer.ID, pageSize uint64, messageHashes []pb.MessageHash) (common.StoreRequestResult, error) {
return d.store.QueryByHash(ctx, messageHashes, store.WithPeer(peerID), store.WithPaging(false, pageSize))
}

func (d *defaultStorenodeRequestor) QueryWithCriteria(ctx context.Context, peerID peer.ID, pageSize uint64, pubsubTopic string, contentTopics []string, from *int64, to *int64) (common.StoreRequestResult, error) {
return d.store.Query(ctx, store.FilterCriteria{
ContentFilter: protocol.NewContentFilter(pubsubTopic, contentTopics...),
TimeStart: from,
TimeEnd: to,
}, store.WithPeer(peerID), store.WithPaging(false, pageSize), store.IncludeData(false))
}
50 changes: 30 additions & 20 deletions waku/v2/api/missing/missing_messages.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@ import (

"github.com/libp2p/go-libp2p/core/peer"
"github.com/waku-org/go-waku/logging"
"github.com/waku-org/go-waku/waku/v2/api/common"
"github.com/waku-org/go-waku/waku/v2/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
"github.com/waku-org/go-waku/waku/v2/protocol/store"
"github.com/waku-org/go-waku/waku/v2/timesource"
"github.com/waku-org/go-waku/waku/v2/utils"
"go.uber.org/zap"
Expand All @@ -22,6 +22,7 @@ import (

const maxContentTopicsPerRequest = 10
const maxMsgHashesPerRequest = 50
const messageFetchPageSize = 100

// MessageTracker should keep track of messages it has seen before and
// provide a way to determine whether a message exists or not. This
Expand All @@ -30,37 +31,42 @@ type MessageTracker interface {
MessageExists(pb.MessageHash) (bool, error)
}

type StorenodeRequestor interface {
GetMessagesByHash(ctx context.Context, peerID peer.ID, pageSize uint64, messageHashes []pb.MessageHash) (common.StoreRequestResult, error)
QueryWithCriteria(ctx context.Context, peerID peer.ID, pageSize uint64, pubsubTopic string, contentTopics []string, from *int64, to *int64) (common.StoreRequestResult, error)
}

// MissingMessageVerifier is used to periodically retrieve missing messages from store nodes that have some specific criteria
type MissingMessageVerifier struct {
ctx context.Context
params missingMessageVerifierParams

messageTracker MessageTracker
storenodeRequestor StorenodeRequestor
messageTracker MessageTracker

criteriaInterest map[string]criteriaInterest // Track message verification requests and when was the last time a pubsub topic was verified for missing messages
criteriaInterestMu sync.RWMutex

C <-chan *protocol.Envelope

store *store.WakuStore
timesource timesource.Timesource
logger *zap.Logger
}

// NewMissingMessageVerifier creates an instance of a MissingMessageVerifier
func NewMissingMessageVerifier(store *store.WakuStore, messageTracker MessageTracker, timesource timesource.Timesource, logger *zap.Logger, options ...MissingMessageVerifierOption) *MissingMessageVerifier {
func NewMissingMessageVerifier(storenodeRequester StorenodeRequestor, messageTracker MessageTracker, timesource timesource.Timesource, logger *zap.Logger, options ...MissingMessageVerifierOption) *MissingMessageVerifier {
options = append(defaultMissingMessagesVerifierOptions, options...)
params := missingMessageVerifierParams{}
for _, opt := range options {
opt(&params)
}

return &MissingMessageVerifier{
store: store,
timesource: timesource,
messageTracker: messageTracker,
logger: logger.Named("missing-msg-verifier"),
params: params,
storenodeRequestor: storenodeRequester,
timesource: timesource,
messageTracker: messageTracker,
logger: logger.Named("missing-msg-verifier"),
params: params,
}
}

Expand Down Expand Up @@ -178,7 +184,7 @@ func (m *MissingMessageVerifier) fetchHistory(c chan<- *protocol.Envelope, inter
}
}

func (m *MissingMessageVerifier) storeQueryWithRetry(ctx context.Context, queryFunc func(ctx context.Context) (store.Result, error), logger *zap.Logger, logMsg string) (store.Result, error) {
func (m *MissingMessageVerifier) storeQueryWithRetry(ctx context.Context, queryFunc func(ctx context.Context) (common.StoreRequestResult, error), logger *zap.Logger, logMsg string) (common.StoreRequestResult, error) {
retry := true
count := 1
for retry && count <= m.params.maxAttemptsToRetrieveHistory {
Expand Down Expand Up @@ -212,12 +218,16 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
logging.Epoch("to", now),
)

result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (store.Result, error) {
return m.store.Query(ctx, store.FilterCriteria{
ContentFilter: protocol.NewContentFilter(interest.contentFilter.PubsubTopic, contentTopics[batchFrom:batchTo]...),
TimeStart: proto.Int64(interest.lastChecked.Add(-m.params.delay).UnixNano()),
TimeEnd: proto.Int64(now.Add(-m.params.delay).UnixNano()),
}, store.WithPeer(interest.peerID), store.WithPaging(false, 100), store.IncludeData(false))
result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (common.StoreRequestResult, error) {
return m.storenodeRequestor.QueryWithCriteria(
ctx,
interest.peerID,
messageFetchPageSize,
interest.contentFilter.PubsubTopic,
contentTopics[batchFrom:batchTo],
proto.Int64(interest.lastChecked.Add(-m.params.delay).UnixNano()),
proto.Int64(now.Add(-m.params.delay).UnixNano()),
)
}, logger, "retrieving history to check for missing messages")
if err != nil {
if !errors.Is(err, context.Canceled) {
Expand All @@ -243,7 +253,7 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
missingHashes = append(missingHashes, hash)
}

result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (store.Result, error) {
result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (common.StoreRequestResult, error) {
if err = result.Next(ctx); err != nil {
return nil, err
}
Expand Down Expand Up @@ -282,10 +292,10 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
defer utils.LogOnPanic()
defer wg.Wait()

result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (store.Result, error) {
result, err := m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (common.StoreRequestResult, error) {
queryCtx, cancel := context.WithTimeout(ctx, m.params.storeQueryTimeout)
defer cancel()
return m.store.QueryByHash(queryCtx, messageHashes, store.WithPeer(interest.peerID), store.WithPaging(false, maxMsgHashesPerRequest))
return m.storenodeRequestor.GetMessagesByHash(queryCtx, interest.peerID, maxMsgHashesPerRequest, messageHashes)
}, logger, "retrieving missing messages")
if err != nil {
if !errors.Is(err, context.Canceled) {
Expand All @@ -303,7 +313,7 @@ func (m *MissingMessageVerifier) fetchMessagesBatch(c chan<- *protocol.Envelope,
}
}

result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (store.Result, error) {
result, err = m.storeQueryWithRetry(interest.ctx, func(ctx context.Context) (common.StoreRequestResult, error) {
if err = result.Next(ctx); err != nil {
return nil, err
}
Expand Down
50 changes: 50 additions & 0 deletions waku/v2/api/publish/default_publisher.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
package publish

import (
"context"
"errors"

"github.com/libp2p/go-libp2p/core/peer"
"github.com/waku-org/go-waku/waku/v2/protocol/lightpush"
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
)

var ErrRelayNotAvailable = errors.New("relay is not available")
var ErrLightpushNotAvailable = errors.New("lightpush is not available")

func NewDefaultPublisher(lightpush *lightpush.WakuLightPush, relay *relay.WakuRelay) Publisher {
return &defaultPublisher{
lightpush: lightpush,
relay: relay,
}
}

type defaultPublisher struct {
lightpush *lightpush.WakuLightPush
relay *relay.WakuRelay
}

func (d *defaultPublisher) RelayListPeers(pubsubTopic string) ([]peer.ID, error) {
if d.relay == nil {
return nil, ErrRelayNotAvailable
}

return d.relay.PubSub().ListPeers(pubsubTopic), nil
}

func (d *defaultPublisher) RelayPublish(ctx context.Context, message *pb.WakuMessage, pubsubTopic string) (pb.MessageHash, error) {
if d.relay == nil {
return pb.MessageHash{}, ErrRelayNotAvailable
}

return d.relay.Publish(ctx, message, relay.WithPubSubTopic(pubsubTopic))
}

func (d *defaultPublisher) LightpushPublish(ctx context.Context, message *pb.WakuMessage, pubsubTopic string, maxPeers int) (pb.MessageHash, error) {
if d.lightpush == nil {
return pb.MessageHash{}, ErrLightpushNotAvailable
}

return d.lightpush.Publish(ctx, message, lightpush.WithPubSubTopic(pubsubTopic), lightpush.WithMaxPeers(maxPeers))
}
39 changes: 39 additions & 0 deletions waku/v2/api/publish/default_verifier.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
package publish

import (
"context"

"github.com/libp2p/go-libp2p/core/peer"
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
"github.com/waku-org/go-waku/waku/v2/protocol/store"
)

func NewDefaultStorenodeMessageVerifier(store *store.WakuStore) StorenodeMessageVerifier {
return &defaultStorenodeMessageVerifier{
store: store,
}
}

type defaultStorenodeMessageVerifier struct {
store *store.WakuStore
}

func (d *defaultStorenodeMessageVerifier) MessageHashesExist(ctx context.Context, requestID []byte, peerID peer.ID, pageSize uint64, messageHashes []pb.MessageHash) ([]pb.MessageHash, error) {
var opts []store.RequestOption
opts = append(opts, store.WithRequestID(requestID))
opts = append(opts, store.WithPeer(peerID))
opts = append(opts, store.WithPaging(false, pageSize))
opts = append(opts, store.IncludeData(false))

response, err := d.store.QueryByHash(ctx, messageHashes, opts...)
if err != nil {
return nil, err
}

result := make([]pb.MessageHash, len(response.Messages()))
for i, msg := range response.Messages() {
result[i] = msg.WakuMessageHash()
}

return result, nil
}
26 changes: 13 additions & 13 deletions waku/v2/api/publish/message_check.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@ import (

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/libp2p/go-libp2p/core/peer"
apicommon "github.com/waku-org/go-waku/waku/v2/api/common"
"github.com/waku-org/go-waku/waku/v2/api/history"
"github.com/waku-org/go-waku/waku/v2/protocol"
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
"github.com/waku-org/go-waku/waku/v2/protocol/store"
"github.com/waku-org/go-waku/waku/v2/timesource"
"github.com/waku-org/go-waku/waku/v2/utils"
"go.uber.org/zap"
Expand All @@ -31,6 +31,11 @@ type ISentCheck interface {
DeleteByMessageIDs(messageIDs []common.Hash)
}

type StorenodeMessageVerifier interface {
// MessagesExist returns a list of the messages it found from a list of message hashes
MessageHashesExist(ctx context.Context, requestID []byte, peerID peer.ID, pageSize uint64, messageHashes []pb.MessageHash) ([]pb.MessageHash, error)
}

// MessageSentCheck tracks the outgoing messages and check against store node
// if the message sent time has passed the `messageSentPeriod`, the message id will be includes for the next query
// if the message keeps missing after `messageExpiredPerid`, the message id will be expired
Expand All @@ -40,7 +45,7 @@ type MessageSentCheck struct {
messageStoredChan chan common.Hash
messageExpiredChan chan common.Hash
ctx context.Context
store *store.WakuStore
messageVerifier StorenodeMessageVerifier
storenodeCycle *history.StorenodeCycle
timesource timesource.Timesource
logger *zap.Logger
Expand All @@ -52,14 +57,14 @@ type MessageSentCheck struct {
}

// NewMessageSentCheck creates a new instance of MessageSentCheck with default parameters
func NewMessageSentCheck(ctx context.Context, store *store.WakuStore, cycle *history.StorenodeCycle, timesource timesource.Timesource, msgStoredChan chan common.Hash, msgExpiredChan chan common.Hash, logger *zap.Logger) *MessageSentCheck {
func NewMessageSentCheck(ctx context.Context, messageVerifier StorenodeMessageVerifier, cycle *history.StorenodeCycle, timesource timesource.Timesource, msgStoredChan chan common.Hash, msgExpiredChan chan common.Hash, logger *zap.Logger) *MessageSentCheck {
return &MessageSentCheck{
messageIDs: make(map[string]map[common.Hash]uint32),
messageIDsMu: sync.RWMutex{},
messageStoredChan: msgStoredChan,
messageExpiredChan: msgExpiredChan,
ctx: ctx,
store: store,
messageVerifier: messageVerifier,
storenodeCycle: cycle,
timesource: timesource,
logger: logger,
Expand Down Expand Up @@ -212,12 +217,7 @@ func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []c
return []common.Hash{}
}

var opts []store.RequestOption
requestID := protocol.GenerateRequestID()
opts = append(opts, store.WithRequestID(requestID))
opts = append(opts, store.WithPeer(selectedPeer))
opts = append(opts, store.WithPaging(false, m.maxHashQueryLength))
opts = append(opts, store.IncludeData(false))

messageHashes := make([]pb.MessageHash, len(hashes))
for i, hash := range hashes {
Expand All @@ -228,20 +228,20 @@ func (m *MessageSentCheck) messageHashBasedQuery(ctx context.Context, hashes []c

queryCtx, cancel := context.WithTimeout(ctx, m.storeQueryTimeout)
defer cancel()
result, err := m.store.QueryByHash(queryCtx, messageHashes, opts...)
result, err := m.messageVerifier.MessageHashesExist(queryCtx, requestID, selectedPeer, m.maxHashQueryLength, messageHashes)
if err != nil {
m.logger.Error("store.queryByHash failed", zap.String("requestID", hexutil.Encode(requestID)), zap.Stringer("peerID", selectedPeer), zap.Error(err))
return []common.Hash{}
}

m.logger.Debug("store.queryByHash result", zap.String("requestID", hexutil.Encode(requestID)), zap.Int("messages", len(result.Messages())))
m.logger.Debug("store.queryByHash result", zap.String("requestID", hexutil.Encode(requestID)), zap.Int("messages", len(result)))

var ackHashes []common.Hash
var missedHashes []common.Hash
for i, hash := range hashes {
found := false
for _, msg := range result.Messages() {
if bytes.Equal(msg.GetMessageHash(), hash.Bytes()) {
for _, msgHash := range result {
if bytes.Equal(msgHash.Bytes(), hash.Bytes()) {
found = true
break
}
Expand Down
Loading

0 comments on commit 37f936d

Please sign in to comment.