Skip to content

add vector dimension option and update tests to support both HNSW and… #9471

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 48 additions & 31 deletions posting/index.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
"fmt"
"math"
"os"
"strconv"
"strings"
"sync/atomic"
"time"
Expand Down Expand Up @@ -1373,37 +1374,42 @@ func rebuildVectorIndex(ctx context.Context, factorySpecs []*tok.FactoryCreateSp
return err
}

numVectorsToCheck := 100
lenFreq := make(map[int]int, numVectorsToCheck)
maxFreq := 0
dimension := 0
MemLayerInstance.IterateDisk(ctx, IterateDiskArgs{
Prefix: pk.DataPrefix(),
ReadTs: rb.StartTs,
AllVersions: false,
Reverse: false,
CheckInclusion: func(uid uint64) error {
return nil
},
Function: func(l *List, pk x.ParsedKey) error {
val, err := l.Value(rb.StartTs)
if err != nil {
return err
}
inVec := types.BytesAsFloatArray(val.Value.([]byte))
lenFreq[len(inVec)] += 1
if lenFreq[len(inVec)] > maxFreq {
maxFreq = lenFreq[len(inVec)]
dimension = len(inVec)
}
numVectorsToCheck -= 1
if numVectorsToCheck <= 0 {
return ErrStopIteration
}
return nil
},
StartKey: x.DataKey(rb.Attr, 0),
})
dimension := indexer.Dimension()
if dimension == 0 {
numVectorsToCheck := 100
lenFreq := make(map[int]int, numVectorsToCheck)
maxFreq := 0
MemLayerInstance.IterateDisk(ctx, IterateDiskArgs{
Prefix: pk.DataPrefix(),
ReadTs: rb.StartTs,
AllVersions: false,
Reverse: false,
CheckInclusion: func(uid uint64) error {
return nil
},
Function: func(l *List, pk x.ParsedKey) error {
val, err := l.Value(rb.StartTs)
if err != nil {
return err
}
inVec := types.BytesAsFloatArray(val.Value.([]byte))
lenFreq[len(inVec)] += 1
if lenFreq[len(inVec)] > maxFreq {
maxFreq = lenFreq[len(inVec)]
dimension = len(inVec)
}
numVectorsToCheck -= 1
if numVectorsToCheck <= 0 {
return ErrStopIteration
}
return nil
},
StartKey: x.DataKey(rb.Attr, 0),
})

indexer.SetDimension(dimension)
addDimensionOptionInSchema(rb.CurrentSchema, dimension)
}

fmt.Println("Selecting vector dimension to be:", dimension)

Expand Down Expand Up @@ -1648,6 +1654,17 @@ func rebuildVectorIndex(ctx context.Context, factorySpecs []*tok.FactoryCreateSp
// return nil
}

func addDimensionOptionInSchema(schema *pb.SchemaUpdate, dimension int) {
for _, vs := range schema.IndexSpecs {
if vs.Name == "partionedhnsw" {
vs.Options = append(vs.Options, &pb.OptionPair{
Key: "dimension",
Value: strconv.Itoa(dimension),
})
}
}
}

// rebuildTokIndex rebuilds index for a given attribute.
// We commit mutations with startTs and ignore the errors.
func rebuildTokIndex(ctx context.Context, rb *IndexRebuild) error {
Expand Down
2 changes: 1 addition & 1 deletion schema/parse.go
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ func parseTokenOrVectorIndexSpec(
tokenizer, has := tok.GetTokenizer(tokenOrFactoryName)
if !has {
return tokenOrFactoryName, nil, false,
next.Errorf("Invalid tokenizer 1 %s", next.Val)
next.Errorf("Invalid tokenizer %s", next.Val)
}
tokenizerType, ok := types.TypeForName(tokenizer.Type())
x.AssertTrue(ok) // Type is validated during tokenizer loading.
Expand Down
25 changes: 14 additions & 11 deletions systest/vector/backup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ import (
"fmt"
"slices"
"strings"
"testing"
"time"

"github.com/stretchr/testify/require"
Expand All @@ -23,7 +22,8 @@ import (
"github.com/hypermodeinc/dgraph/v25/x"
)

func TestVectorIncrBackupRestore(t *testing.T) {
func (vsuite *VectorTestSuite) TestVectorIncrBackupRestore() {
t := vsuite.T()
conf := dgraphtest.NewClusterConfig().WithNumAlphas(1).WithNumZeros(1).WithReplicas(1).WithACL(time.Hour)
c, err := dgraphtest.NewLocalCluster(conf)
require.NoError(t, err)
Expand All @@ -41,7 +41,7 @@ func TestVectorIncrBackupRestore(t *testing.T) {
require.NoError(t, hc.LoginIntoNamespace(dgraphapi.DefaultUser,
dgraphapi.DefaultPassword, x.RootNamespace))

require.NoError(t, gc.SetupSchema(testSchema))
require.NoError(t, gc.SetupSchema(vsuite.schema))

numVectors := 500
pred := "project_description_v"
Expand Down Expand Up @@ -100,7 +100,8 @@ func TestVectorIncrBackupRestore(t *testing.T) {
}
}

func TestVectorBackupRestore(t *testing.T) {
func (vsuite *VectorTestSuite) TestVectorBackupRestore() {
t := vsuite.T()
conf := dgraphtest.NewClusterConfig().WithNumAlphas(1).WithNumZeros(1).WithReplicas(1).WithACL(time.Hour)
c, err := dgraphtest.NewLocalCluster(conf)
require.NoError(t, err)
Expand All @@ -118,7 +119,7 @@ func TestVectorBackupRestore(t *testing.T) {
require.NoError(t, hc.LoginIntoNamespace(dgraphapi.DefaultUser,
dgraphapi.DefaultPassword, x.RootNamespace))

require.NoError(t, gc.SetupSchema(testSchema))
require.NoError(t, gc.SetupSchema(vsuite.schema))

numVectors := 1000
pred := "project_description_v"
Expand All @@ -138,7 +139,8 @@ func TestVectorBackupRestore(t *testing.T) {
testVectorQuery(t, gc, vectors, rdfs, pred, numVectors)
}

func TestVectorBackupRestoreDropIndex(t *testing.T) {
func (vsuite *VectorTestSuite) TestVectorBackupRestoreDropIndex() {
t := vsuite.T()
// setup cluster
conf := dgraphtest.NewClusterConfig().WithNumAlphas(1).WithNumZeros(1).WithReplicas(1).WithACL(time.Hour)
c, err := dgraphtest.NewLocalCluster(conf)
Expand All @@ -158,7 +160,7 @@ func TestVectorBackupRestoreDropIndex(t *testing.T) {
dgraphapi.DefaultPassword, x.RootNamespace))

// add vector predicate + index
require.NoError(t, gc.SetupSchema(testSchema))
require.NoError(t, gc.SetupSchema(vsuite.schema))
// add data to the vector predicate
numVectors := 3
pred := "project_description_v"
Expand Down Expand Up @@ -195,7 +197,7 @@ func TestVectorBackupRestoreDropIndex(t *testing.T) {
require.NoError(t, hc.Backup(c, false, dgraphtest.DefaultBackupDir))

// add index
require.NoError(t, gc.SetupSchema(testSchema))
require.NoError(t, gc.SetupSchema(vsuite.schema))

t.Log("taking second incr backup \n")
require.NoError(t, hc.Backup(c, false, dgraphtest.DefaultBackupDir))
Expand Down Expand Up @@ -227,7 +229,8 @@ func TestVectorBackupRestoreDropIndex(t *testing.T) {
}
}

func TestVectorBackupRestoreReIndexing(t *testing.T) {
func (vsuite *VectorTestSuite) TestVectorBackupRestoreReIndexing() {
t := vsuite.T()
conf := dgraphtest.NewClusterConfig().WithNumAlphas(1).WithNumZeros(1).WithReplicas(1).WithACL(time.Hour)
c, err := dgraphtest.NewLocalCluster(conf)
require.NoError(t, err)
Expand All @@ -245,7 +248,7 @@ func TestVectorBackupRestoreReIndexing(t *testing.T) {
require.NoError(t, hc.LoginIntoNamespace(dgraphapi.DefaultUser,
dgraphapi.DefaultPassword, x.RootNamespace))

require.NoError(t, gc.SetupSchema(testSchema))
require.NoError(t, gc.SetupSchema(vsuite.schema))

numVectors := 1000
pred := "project_description_v"
Expand All @@ -271,7 +274,7 @@ func TestVectorBackupRestoreReIndexing(t *testing.T) {
// drop index
require.NoError(t, gc.SetupSchema(testSchemaWithoutIndex))
// add index
require.NoError(t, gc.SetupSchema(testSchema))
require.NoError(t, gc.SetupSchema(vsuite.schema))
}
vectors = append(vectors, vectors2...)
rdfs = rdfs + rdfs2
Expand Down
9 changes: 5 additions & 4 deletions systest/vector/load_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,17 +27,18 @@ type Node struct {
Vtest []float32 `json:"vtest"`
}

func TestLiveLoadAndExportRDFFormat(t *testing.T) {
func (vsuite *VectorTestSuite) TestLiveLoadAndExportRDFFormat() {
t := vsuite.T()
conf := dgraphtest.NewClusterConfig().WithNumAlphas(1).WithNumZeros(1).WithReplicas(1).WithACL(time.Hour)
c, err := dgraphtest.NewLocalCluster(conf)
require.NoError(t, err)
defer func() { c.Cleanup(t.Failed()) }()
require.NoError(t, c.Start())

testExportAndLiveLoad(t, c, "rdf")
testExportAndLiveLoad(t, c, "rdf", vsuite.schema)
}

func testExportAndLiveLoad(t *testing.T, c *dgraphtest.LocalCluster, exportFormat string) {
func testExportAndLiveLoad(t *testing.T, c *dgraphtest.LocalCluster, exportFormat string, schema string) {
gc, cleanup, err := c.Client()
require.NoError(t, err)
defer cleanup()
Expand All @@ -49,7 +50,7 @@ func testExportAndLiveLoad(t *testing.T, c *dgraphtest.LocalCluster, exportForma
require.NoError(t, hc.LoginIntoNamespace(dgraphapi.DefaultUser,
dgraphapi.DefaultPassword, x.RootNamespace))

require.NoError(t, gc.SetupSchema(testSchema))
require.NoError(t, gc.SetupSchema(schema))

numVectors := 100
pred := "project_description_v"
Expand Down
Loading