Skip to content

Commit b12f0af

Browse files
committed
taprpc+rpcserver: add group key to DecodeAssetPayReq
Since we support group keys in all other payment related commands, we now add the same argument for the invoice decoding RPC.
1 parent 1b181d5 commit b12f0af

File tree

4 files changed

+239
-70
lines changed

4 files changed

+239
-70
lines changed

rpcserver.go

Lines changed: 157 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -8584,29 +8584,73 @@ func (r *rpcServer) assetInvoiceAmt(ctx context.Context,
85848584
func (r *rpcServer) DecodeAssetPayReq(ctx context.Context,
85858585
payReq *tchrpc.AssetPayReq) (*tchrpc.AssetPayReqResponse, error) {
85868586

8587+
tapdLog.Debugf("Decoding asset pay req, asset_id=%x, group_key=%x,"+
8588+
"pay_req=%v", payReq.AssetId, payReq.GroupKey,
8589+
payReq.PayReqString)
8590+
85878591
if r.cfg.PriceOracle == nil {
85888592
return nil, fmt.Errorf("price oracle is not set")
85898593
}
85908594

85918595
// First, we'll perform some basic input validation.
8596+
var assetID asset.ID
85928597
switch {
8593-
case len(payReq.AssetId) == 0:
8594-
return nil, fmt.Errorf("asset ID must be specified")
8598+
case len(payReq.AssetId) == 0 && len(payReq.GroupKey) == 0:
8599+
return nil, fmt.Errorf("either asset ID or group key must be " +
8600+
"specified")
85958601

8596-
case len(payReq.AssetId) != 32:
8602+
case len(payReq.AssetId) != 0 && len(payReq.GroupKey) != 0:
8603+
return nil, fmt.Errorf("cannot set both asset ID and group key")
8604+
8605+
case len(payReq.AssetId) != 0 && len(payReq.AssetId) != 32:
85978606
return nil, fmt.Errorf("asset ID must be 32 bytes, "+
85988607
"was %d", len(payReq.AssetId))
85998608

8609+
case len(payReq.GroupKey) != 0 && len(payReq.GroupKey) != 33 &&
8610+
len(payReq.GroupKey) != 32:
8611+
8612+
return nil, fmt.Errorf("group key must be 32 or 33 bytes, "+
8613+
"was %d", len(payReq.GroupKey))
8614+
86008615
case len(payReq.PayReqString) == 0:
86018616
return nil, fmt.Errorf("payment request must be specified")
86028617
}
86038618

8604-
var (
8605-
resp tchrpc.AssetPayReqResponse
8606-
assetID asset.ID
8607-
)
8619+
// We made sure that only one is set, so let's now use the asset ID
8620+
// or group key.
8621+
switch {
8622+
// The asset ID is easy, we can just copy the bytes.
8623+
case len(payReq.AssetId) != 0:
8624+
copy(assetID[:], payReq.AssetId)
8625+
8626+
// The group key is a bit more involved. We first need to sync the asset
8627+
// group, then fetch the leaves that are associated with this group key.
8628+
// From there, we can look up the asset ID of one of the group's
8629+
// tranches.
8630+
case len(payReq.GroupKey) != 0:
8631+
var (
8632+
groupKey *btcec.PublicKey
8633+
err error
8634+
)
8635+
if len(payReq.GroupKey) == 32 {
8636+
groupKey, err = schnorr.ParsePubKey(payReq.GroupKey)
8637+
} else {
8638+
groupKey, err = btcec.ParsePubKey(payReq.GroupKey)
8639+
}
8640+
if err != nil {
8641+
return nil, fmt.Errorf("error parsing group "+
8642+
"key: %w", err)
8643+
}
8644+
8645+
assetID, err = r.syncAssetGroup(ctx, *groupKey)
8646+
if err != nil {
8647+
return nil, fmt.Errorf("error syncing asset group: %w",
8648+
err)
8649+
}
86088650

8609-
copy(assetID[:], payReq.AssetId)
8651+
tapdLog.Debugf("Resolved asset ID %v for group key %x",
8652+
assetID.String(), groupKey.SerializeCompressed())
8653+
}
86108654

86118655
// With the inputs validated, we'll first call out to lnd to decode the
86128656
// payment request.
@@ -8618,7 +8662,9 @@ func (r *rpcServer) DecodeAssetPayReq(ctx context.Context,
86188662
return nil, fmt.Errorf("unable to fetch channel: %w", err)
86198663
}
86208664

8621-
resp.PayReq = payReqInfo
8665+
resp := tchrpc.AssetPayReqResponse{
8666+
PayReq: payReqInfo,
8667+
}
86228668

86238669
// Next, we'll fetch the information for this asset ID through the addr
86248670
// book. This'll automatically fetch the asset if needed.
@@ -8628,12 +8674,17 @@ func (r *rpcServer) DecodeAssetPayReq(ctx context.Context,
86288674
"asset_id=%x: %w", assetID[:], err)
86298675
}
86308676

8631-
resp.GenesisInfo = &taprpc.GenesisInfo{
8632-
GenesisPoint: assetGroup.FirstPrevOut.String(),
8633-
AssetType: taprpc.AssetType(assetGroup.Type),
8634-
Name: assetGroup.Tag,
8635-
MetaHash: assetGroup.MetaHash[:],
8636-
AssetId: assetID[:],
8677+
// The genesis info makes no sense in the case where we have decoded the
8678+
// invoice for a group key, since we just pick the first asset ID we
8679+
// found for a group key.
8680+
if len(payReq.GroupKey) == 0 {
8681+
resp.GenesisInfo = &taprpc.GenesisInfo{
8682+
GenesisPoint: assetGroup.FirstPrevOut.String(),
8683+
AssetType: taprpc.AssetType(assetGroup.Type),
8684+
Name: assetGroup.Tag,
8685+
MetaHash: assetGroup.MetaHash[:],
8686+
AssetId: assetID[:],
8687+
}
86378688
}
86388689

86398690
// If this asset ID belongs to an asset group, then we'll display that
@@ -8693,6 +8744,97 @@ func (r *rpcServer) DecodeAssetPayReq(ctx context.Context,
86938744
return &resp, nil
86948745
}
86958746

8747+
// syncAssetGroup checks if we already know any asset leaves associated with
8748+
// this group key. If not, it will sync the asset group and return the asset
8749+
// ID of the first leaf found. If there are no leaves found after syncing, an
8750+
// error is returned.
8751+
func (r *rpcServer) syncAssetGroup(ctx context.Context,
8752+
groupKey btcec.PublicKey) (asset.ID, error) {
8753+
8754+
// We first check if we already know any asset leaves associated with
8755+
// this group key.
8756+
leaf := fn.NewRight[asset.ID](groupKey)
8757+
leaves, err := r.cfg.Multiverse.FetchLeaves(
8758+
ctx, []universe.MultiverseLeafDesc{leaf},
8759+
universe.ProofTypeIssuance,
8760+
)
8761+
if err != nil {
8762+
return asset.ID{}, fmt.Errorf("error fetching leaves: %w", err)
8763+
}
8764+
8765+
tapdLog.Tracef("Found %d leaves for group key %x",
8766+
len(leaves), groupKey.SerializeCompressed())
8767+
8768+
// If there are no leaves, then we need to sync the asset group.
8769+
if len(leaves) == 0 {
8770+
tapdLog.Debugf("No leaves found for group key %x, "+
8771+
"syncing asset group", groupKey.SerializeCompressed())
8772+
err = r.cfg.AddrBook.SyncAssetGroup(ctx, groupKey)
8773+
if err != nil {
8774+
return asset.ID{}, fmt.Errorf("error syncing asset "+
8775+
"group: %w", err)
8776+
}
8777+
8778+
// Now we can try again.
8779+
leaves, err = r.cfg.Multiverse.FetchLeaves(
8780+
ctx, []universe.MultiverseLeafDesc{leaf},
8781+
universe.ProofTypeIssuance,
8782+
)
8783+
if err != nil {
8784+
return asset.ID{}, fmt.Errorf("error fetching leaves: "+
8785+
"%w", err)
8786+
}
8787+
8788+
tapdLog.Tracef("Found %d leaves for group key %x",
8789+
len(leaves), groupKey.SerializeCompressed())
8790+
8791+
if len(leaves) == 0 {
8792+
return asset.ID{}, fmt.Errorf("no asset leaves found "+
8793+
"for group %x after sync",
8794+
groupKey.SerializeCompressed())
8795+
}
8796+
}
8797+
8798+
// Since we know we have at least one leaf, we can just take leaf ID and
8799+
// query the keys with it. We just need one so we can fetch the actual
8800+
// proof to find out the asset ID. We don't really care about the order,
8801+
// so we just use the natural database order.
8802+
leafID := leaves[0]
8803+
leafKeys, err := r.cfg.Multiverse.UniverseLeafKeys(
8804+
ctx, universe.UniverseLeafKeysQuery{
8805+
Id: leafID.ID,
8806+
Limit: 1,
8807+
},
8808+
)
8809+
if err != nil {
8810+
return asset.ID{}, fmt.Errorf("error fetching leaf keys: %w",
8811+
err)
8812+
}
8813+
8814+
// We know we have a leaf, so this shouldn't happen.
8815+
if len(leafKeys) != 1 {
8816+
return asset.ID{}, fmt.Errorf("expected 1 leaf key, got %d",
8817+
len(leafKeys))
8818+
}
8819+
8820+
proofs, err := r.cfg.Multiverse.FetchProofLeaf(
8821+
ctx, leafID.ID, leafKeys[0],
8822+
)
8823+
if err != nil {
8824+
return asset.ID{}, fmt.Errorf("error fetching proof leaf: %w",
8825+
err)
8826+
}
8827+
8828+
// We should have a proof for the asset ID now.
8829+
if len(proofs) != 1 {
8830+
return asset.ID{}, fmt.Errorf("expected 1 proof, got %d",
8831+
len(proofs))
8832+
}
8833+
8834+
// We can now extract the asset ID from the proof.
8835+
return proofs[0].Leaf.ID(), nil
8836+
}
8837+
86968838
// RegisterTransfer informs the daemon about a new inbound transfer that has
86978839
// happened. This is used for interactive transfers where no TAP address is
86988840
// involved and the recipient is aware of the transfer through an out-of-band

taprpc/tapchannelrpc/tapchannel.pb.go

Lines changed: 67 additions & 52 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)