@@ -3,23 +3,27 @@ package provider
3
3
import (
4
4
"context"
5
5
"errors"
6
- "fmt"
7
6
8
7
"github.com/ipfs/go-cid"
8
+ logging "github.com/ipfs/go-log/v2"
9
9
dht "github.com/libp2p/go-libp2p-kad-dht"
10
10
"github.com/libp2p/go-libp2p-kad-dht/dual"
11
+ "github.com/libp2p/go-libp2p-kad-dht/internal/batch"
11
12
"github.com/libp2p/go-libp2p-kad-dht/provider"
12
13
"github.com/libp2p/go-libp2p-kad-dht/provider/datastore"
13
14
mh "github.com/multiformats/go-multihash"
14
15
)
15
16
17
+ var logger = logging .Logger (provider .LoggerName )
18
+
16
19
// SweepingProvider manages provides and reprovides for both DHT swarms (LAN
17
20
// and WAN) in the dual DHT setup.
18
21
type SweepingProvider struct {
19
- dht * dual.DHT
20
- LAN * provider.SweepingProvider
21
- WAN * provider.SweepingProvider
22
- keyStore datastore.KeyStore
22
+ dht * dual.DHT
23
+ lan * provider.SweepingProvider
24
+ wan * provider.SweepingProvider
25
+ keyStore datastore.KeyStore
26
+ keyStoreQueue * batch.Processor
23
27
}
24
28
25
29
// New creates a new SweepingProvider that manages provides and reprovides for
@@ -70,32 +74,44 @@ func New(d *dual.DHT, opts ...Option) (*SweepingProvider, error) {
70
74
}
71
75
}
72
76
73
- return & SweepingProvider {
77
+ s := & SweepingProvider {
74
78
dht : d ,
75
- LAN : sweepingProviders [0 ],
76
- WAN : sweepingProviders [1 ],
79
+ lan : sweepingProviders [0 ],
80
+ wan : sweepingProviders [1 ],
77
81
keyStore : cfg .keyStore ,
78
- }, nil
82
+ }
83
+ s .keyStoreQueue = batch .NewProcessor (func (keys []mh.Multihash , force bool ) {
84
+ newKeys , err := s .keyStore .Put (context .Background (), keys ... )
85
+ if err != nil {
86
+ logger .Errorf ("failed to store multihashes: %v" , err )
87
+ return
88
+ }
89
+ if ! force {
90
+ if len (newKeys ) == 0 {
91
+ return
92
+ }
93
+ keys = newKeys
94
+ }
95
+
96
+ // Add to schedule and to provide queue of both systems
97
+ s .runOnBoth (func (p * provider.SweepingProvider ) {
98
+ p .AddToScheduleAndProvide (newKeys ... )
99
+ })
100
+ })
101
+
102
+ return s , nil
79
103
}
80
104
81
105
// runOnBoth runs the provided function on both the LAN and WAN providers in
82
106
// parallel and waits for both to complete.
83
- func (s * SweepingProvider ) runOnBoth (f func (* provider.SweepingProvider ) error ) error {
84
- var errs [2 ]error
107
+ func (s * SweepingProvider ) runOnBoth (f func (* provider.SweepingProvider )) {
85
108
done := make (chan struct {})
86
109
go func () {
87
110
defer close (done )
88
- err := f (s .LAN )
89
- if err != nil {
90
- errs [0 ] = fmt .Errorf ("LAN provider: %w" , err )
91
- }
111
+ f (s .lan )
92
112
}()
93
- err := f (s .WAN )
94
- if err != nil {
95
- errs [1 ] = fmt .Errorf ("WAN provider: %w" , err )
96
- }
113
+ f (s .wan )
97
114
<- done
98
- return errors .Join (errs [:]... )
99
115
}
100
116
101
117
// ProvideOnce sends provider records for the specified keys to both DHT swarms
@@ -109,9 +125,9 @@ func (s *SweepingProvider) runOnBoth(f func(*provider.SweepingProvider) error) e
109
125
// (either never bootstrapped, or disconnected since more than `OfflineDelay`).
110
126
// The schedule and provide queue depend on the network size, hence recent
111
127
// network connectivity is essential.
112
- func (s * SweepingProvider ) ProvideOnce (keys ... mh.Multihash ) error {
113
- return s .runOnBoth (func (p * provider.SweepingProvider ) error {
114
- return p .ProvideOnce (keys ... )
128
+ func (s * SweepingProvider ) ProvideOnce (keys ... mh.Multihash ) {
129
+ s .runOnBoth (func (p * provider.SweepingProvider ) {
130
+ p .ProvideOnce (keys ... )
115
131
})
116
132
}
117
133
@@ -132,22 +148,8 @@ func (s *SweepingProvider) ProvideOnce(keys ...mh.Multihash) error {
132
148
// (either never bootstrapped, or disconnected since more than `OfflineDelay`).
133
149
// The schedule and provide queue depend on the network size, hence recent
134
150
// network connectivity is essential.
135
- func (s * SweepingProvider ) StartProviding (force bool , keys ... mh.Multihash ) error {
136
- ctx := context .Background ()
137
- newKeys , err := s .keyStore .Put (ctx , keys ... )
138
- if err != nil {
139
- return fmt .Errorf ("failed to store multihashes: %w" , err )
140
- }
141
-
142
- s .runOnBoth (func (p * provider.SweepingProvider ) error {
143
- return p .AddToSchedule (newKeys ... )
144
- })
145
-
146
- if ! force {
147
- keys = newKeys
148
- }
149
-
150
- return s .ProvideOnce (keys ... )
151
+ func (s * SweepingProvider ) StartProviding (force bool , keys ... mh.Multihash ) {
152
+ s .keyStoreQueue .Enqueue (force , keys ... )
151
153
}
152
154
153
155
// StopProviding stops reproviding the given keys to both DHT swarms. The node
@@ -157,12 +159,12 @@ func (s *SweepingProvider) StartProviding(force bool, keys ...mh.Multihash) erro
157
159
// Remove the `keys` from the schedule and return immediately. Valid records
158
160
// can remain in the DHT swarms up to the provider record TTL after calling
159
161
// `StopProviding`.
160
- func (s * SweepingProvider ) StopProviding (keys ... mh.Multihash ) error {
162
+ func (s * SweepingProvider ) StopProviding (keys ... mh.Multihash ) {
163
+ // TODO: batch deletes
161
164
err := s .keyStore .Delete (context .Background (), keys ... )
162
165
if err != nil {
163
- return fmt .Errorf ("failed to stop providing keys: %w" , err )
166
+ logger .Errorf ("failed to stop providing keys: %w" , err )
164
167
}
165
- return nil
166
168
}
167
169
168
170
// Clear clears the all the keys from the provide queues of both DHTs and
@@ -171,7 +173,7 @@ func (s *SweepingProvider) StopProviding(keys ...mh.Multihash) error {
171
173
// The keys are not deleted from the keystore, so they will continue to be
172
174
// reprovided as scheduled.
173
175
func (s * SweepingProvider ) Clear () int {
174
- return s .LAN .Clear () + s .WAN .Clear ()
176
+ return s .lan .Clear () + s .wan .Clear ()
175
177
}
176
178
177
179
// RefreshSchedule scans the KeyStore for any keys that are not currently
@@ -186,10 +188,17 @@ func (s *SweepingProvider) Clear() int {
186
188
// Offline (either never bootstrapped, or disconnected since more than
187
189
// `OfflineDelay`). The schedule depends on the network size, hence recent
188
190
// network connectivity is essential.
189
- func (s * SweepingProvider ) RefreshSchedule () error {
190
- return s .runOnBoth (func (p * provider.SweepingProvider ) error {
191
- return p .RefreshSchedule ()
191
+ func (s * SweepingProvider ) RefreshSchedule () {
192
+ s .runOnBoth (func (p * provider.SweepingProvider ) {
193
+ p .RefreshSchedule ()
194
+ })
195
+ }
196
+
197
+ func (s * SweepingProvider ) Close () error {
198
+ s .runOnBoth (func (p * provider.SweepingProvider ) {
199
+ p .Close ()
192
200
})
201
+ return nil
193
202
}
194
203
195
204
var (
@@ -200,9 +209,10 @@ var (
200
209
// dhtProvider is the interface to ensure that SweepingProvider and
201
210
// provider.SweepingProvider share the same interface.
202
211
type dhtProvider interface {
203
- StartProviding (force bool , keys ... mh.Multihash ) error
204
- StopProviding (keys ... mh.Multihash ) error
205
- ProvideOnce (keys ... mh.Multihash ) error
212
+ StartProviding (force bool , keys ... mh.Multihash )
213
+ StopProviding (keys ... mh.Multihash )
214
+ ProvideOnce (keys ... mh.Multihash )
206
215
Clear () int
207
- RefreshSchedule () error
216
+ RefreshSchedule ()
217
+ Close () error
208
218
}
0 commit comments