Skip to content

Commit e3f95dc

Browse files
committed
htlcswitch: remove batchReplayBkt
This commit removes the `batchReplayBkt` as its only effect is to allow reforwarding htlcs during startup. Normally for every incoming htlc added, their shared secret is used as the key to be saved into the `sharedHashBucket`, which will be used for check for replays. In addition, the fwdPkg's ID, which is SCID+height is also saved to the bucket `batchReplayBkt`. Since replays of HTLCs cannot happen at the same commitment height, when a replay happens, `batchReplayBkt` simply doesn't have this info, and we again rely on `sharedHashBucket` to detect it. This means most of the time the `batchReplayBkt` is a list of SCID+height with empty values. The `batchReplayBkt` was previously used as a mechanism to check for reforwardings during startup - when reforwarding htlcs, it quries this bucket and finds an empty map, knowing this is a forwarding and skips the check in `sharedHashBucket`. Given now we use a bool flag to explicitly skip the replay check, this bucket is no longer useful.
1 parent 3b9c4eb commit e3f95dc

File tree

2 files changed

+5
-46
lines changed

2 files changed

+5
-46
lines changed

htlcswitch/decayedlog.go

Lines changed: 3 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
package htlcswitch
22

33
import (
4-
"bytes"
54
"encoding/binary"
65
"errors"
76
"fmt"
@@ -24,11 +23,6 @@ var (
2423
// bytes of a received HTLC's hashed shared secret as the key and the HTLC's
2524
// CLTV expiry as the value.
2625
sharedHashBucket = []byte("shared-hash")
27-
28-
// batchReplayBucket is a bucket that maps batch identifiers to
29-
// serialized ReplaySets. This is used to give idempotency in the event
30-
// that a batch is processed more than once.
31-
batchReplayBucket = []byte("batch-replay")
3226
)
3327

3428
var (
@@ -138,11 +132,6 @@ func (d *DecayedLog) initBuckets() error {
138132
return ErrDecayedLogInit
139133
}
140134

141-
_, err = tx.CreateTopLevelBucket(batchReplayBucket)
142-
if err != nil {
143-
return ErrDecayedLogInit
144-
}
145-
146135
return nil
147136
}, func() {})
148137
}
@@ -329,11 +318,8 @@ func (d *DecayedLog) Put(hash *sphinx.HashPrefix, cltv uint32) error {
329318
// PutBatch accepts a pending batch of hashed secret entries to write to disk.
330319
// Each hashed secret is inserted with a corresponding time value, dictating
331320
// when the entry will be evicted from the log.
332-
// NOTE: This method enforces idempotency by writing the replay set obtained
333-
// from the first attempt for a particular batch ID, and decoding the return
334-
// value to subsequent calls. For the indices of the replay set to be aligned
335-
// properly, the batch MUST be constructed identically to the first attempt,
336-
// pruning will cause the indices to become invalid.
321+
//
322+
// TODO(yy): remove this method and use `Put` instead.
337323
func (d *DecayedLog) PutBatch(b *sphinx.Batch) (*sphinx.ReplaySet, error) {
338324
// Since batched boltdb txns may be executed multiple times before
339325
// succeeding, we will create a new replay set for each invocation to
@@ -348,25 +334,6 @@ func (d *DecayedLog) PutBatch(b *sphinx.Batch) (*sphinx.ReplaySet, error) {
348334
return ErrDecayedLogCorrupted
349335
}
350336

351-
// Load the batch replay bucket, which will be used to either
352-
// retrieve the result of previously processing this batch, or
353-
// to write the result of this operation.
354-
batchReplayBkt := tx.ReadWriteBucket(batchReplayBucket)
355-
if batchReplayBkt == nil {
356-
return ErrDecayedLogCorrupted
357-
}
358-
359-
// Check for the existence of this batch's id in the replay
360-
// bucket. If a non-nil value is found, this indicates that we
361-
// have already processed this batch before. We deserialize the
362-
// resulting and return it to ensure calls to put batch are
363-
// idempotent.
364-
replayBytes := batchReplayBkt.Get(b.ID)
365-
if replayBytes != nil {
366-
replays = sphinx.NewReplaySet()
367-
return replays.Decode(bytes.NewReader(replayBytes))
368-
}
369-
370337
// The CLTV will be stored into scratch and then stored into the
371338
// sharedHashBucket.
372339
var scratch [4]byte
@@ -394,17 +361,7 @@ func (d *DecayedLog) PutBatch(b *sphinx.Batch) (*sphinx.ReplaySet, error) {
394361
// batch's construction.
395362
replays.Merge(b.ReplaySet)
396363

397-
// Write the replay set under the batch identifier to the batch
398-
// replays bucket. This can be used during recovery to test (1)
399-
// that a particular batch was successfully processed and (2)
400-
// recover the indexes of the adds that were rejected as
401-
// replays.
402-
var replayBuf bytes.Buffer
403-
if err := replays.Encode(&replayBuf); err != nil {
404-
return err
405-
}
406-
407-
return batchReplayBkt.Put(b.ID, replayBuf.Bytes())
364+
return nil
408365
}); err != nil {
409366
return nil, err
410367
}

htlcswitch/hop/iterator.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -784,6 +784,8 @@ func (p *OnionProcessor) DecodeHopIterators(id []byte,
784784
b.Val,
785785
))
786786
})
787+
788+
// TODO(yy): use `p.router.ProcessOnionPacket` instead.
787789
err = tx.ProcessOnionPacket(
788790
seqNum, onionPkt, req.RHash, req.IncomingCltv, opts...,
789791
)

0 commit comments

Comments
 (0)