From 7e2ccf6da0bc1b463fc06f89ce9e2ca561ab9047 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADctor=20Rold=C3=A1n=20Betancort?= Date: Tue, 20 Feb 2024 12:36:16 +0000 Subject: [PATCH 1/4] reproduce integer overflow in ForEachChunk This may cause 2 issues: - server panic due to slice bounds out of range - incorrect slicing, which leads to incorrect usages across the server, like missing elements to be dispatched --- pkg/genutil/slicez/chunking_test.go | 37 +++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/pkg/genutil/slicez/chunking_test.go b/pkg/genutil/slicez/chunking_test.go index ee3cf775e7..bb7b150b53 100644 --- a/pkg/genutil/slicez/chunking_test.go +++ b/pkg/genutil/slicez/chunking_test.go @@ -2,6 +2,7 @@ package slicez import ( "fmt" + "math" "testing" "github.com/stretchr/testify/require" @@ -29,3 +30,39 @@ func TestForEachChunk(t *testing.T) { } } } + +func TestForEachChunkOverflowPanic(t *testing.T) { + datasize := math.MaxUint16 + chunksize := uint16(50) + data := make([]int, 0, datasize) + for i := 0; i < datasize; i++ { + data = append(data, i) + } + + found := make([]int, 0, datasize) + ForEachChunk(data, chunksize, func(items []int) { + found = append(found, items...) + require.True(t, len(items) <= int(chunksize)) + require.True(t, len(items) > 0) + }) + + require.Equal(t, data, found) +} + +func TestForEachChunkOverflowIncorrect(t *testing.T) { + chunksize := uint16(50) + datasize := math.MaxUint16 + int(chunksize) + data := make([]int, 0, datasize) + for i := 0; i < datasize; i++ { + data = append(data, i) + } + + found := make([]int, 0, datasize) + ForEachChunk(data, chunksize, func(items []int) { + found = append(found, items...) + require.True(t, len(items) <= int(chunksize)) + require.True(t, len(items) > 0) + }) + + require.Equal(t, data, found) +} From 72dc9b1b915f76dff6bec34a4d8f15657da60eab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADctor=20Rold=C3=A1n=20Betancort?= Date: Tue, 20 Feb 2024 19:18:28 +0000 Subject: [PATCH 2/4] chunking correctness: add repro for DispatchCheck adds a test that exercises wide relations that would trigger the integer overflow in the chunking utility --- internal/dispatch/graph/check_test.go | 86 +++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/internal/dispatch/graph/check_test.go b/internal/dispatch/graph/check_test.go index 98e4f9b8ca..46bfd80910 100644 --- a/internal/dispatch/graph/check_test.go +++ b/internal/dispatch/graph/check_test.go @@ -3,6 +3,7 @@ package graph import ( "context" "fmt" + "math" "strings" "testing" @@ -410,6 +411,91 @@ func TestCheckDebugging(t *testing.T) { } } +func TestDispatchChunking(t *testing.T) { + schema := ` + definition user { + relation self: user + } + + definition res { + relation owner : user + permission view = owner->self + } + ` + + resources := make([]*core.RelationTuple, 0, math.MaxUint16+1) + enabled := make([]*core.RelationTuple, 0, math.MaxUint16+1) + for i := 0; i < math.MaxUint16+1; i++ { + resources = append(resources, tuple.Parse(fmt.Sprintf("res:res1#owner@user:user%d", i))) + enabled = append(enabled, tuple.Parse(fmt.Sprintf("user:user%d#self@user:user%d", i, i))) + } + + ctx, dispatcher, revision := newLocalDispatcherWithSchemaAndRels(t, schema, append(enabled, resources...)) + + t.Run("check", func(t *testing.T) { + for _, tpl := range resources[:1] { + checkResult, err := dispatcher.DispatchCheck(ctx, &v1.DispatchCheckRequest{ + ResourceRelation: RR(tpl.ResourceAndRelation.Namespace, "view"), + ResourceIds: []string{tpl.ResourceAndRelation.ObjectId}, + ResultsSetting: v1.DispatchCheckRequest_ALLOW_SINGLE_RESULT, + Subject: ONR(tpl.Subject.Namespace, tpl.Subject.ObjectId, graph.Ellipsis), + Metadata: &v1.ResolverMeta{ + AtRevision: revision.String(), + DepthRemaining: 50, + }, + }) + + require.NoError(t, err) + require.NotNil(t, checkResult) + require.NotEmpty(t, checkResult.ResultsByResourceId, "expected membership for resource %s", tpl.ResourceAndRelation.ObjectId) + require.Equal(t, v1.ResourceCheckResult_MEMBER, checkResult.ResultsByResourceId[tpl.ResourceAndRelation.ObjectId].Membership) + } + }) + + t.Run("lookup-resources", func(t *testing.T) { + for _, tpl := range resources[:1] { + stream := dispatch.NewCollectingDispatchStream[*v1.DispatchLookupResourcesResponse](ctx) + err := dispatcher.DispatchLookupResources(&v1.DispatchLookupResourcesRequest{ + ObjectRelation: RR(tpl.ResourceAndRelation.Namespace, "view"), + Subject: ONR(tpl.Subject.Namespace, tpl.Subject.ObjectId, graph.Ellipsis), + Metadata: &v1.ResolverMeta{ + AtRevision: revision.String(), + DepthRemaining: 50, + }, + OptionalLimit: veryLargeLimit, + }, stream) + + require.NoError(t, err) + + foundResources, _, _, _ := processResults(stream) + require.Len(t, foundResources, 1) + } + }) + + t.Run("lookup-subjects", func(t *testing.T) { + for _, tpl := range resources[:1] { + stream := dispatch.NewCollectingDispatchStream[*v1.DispatchLookupSubjectsResponse](ctx) + + err := dispatcher.DispatchLookupSubjects(&v1.DispatchLookupSubjectsRequest{ + ResourceRelation: RR(tpl.ResourceAndRelation.Namespace, "view"), + ResourceIds: []string{tpl.ResourceAndRelation.ObjectId}, + SubjectRelation: RR(tpl.Subject.Namespace, graph.Ellipsis), + Metadata: &v1.ResolverMeta{ + AtRevision: revision.String(), + DepthRemaining: 50, + }, + }, stream) + + require.NoError(t, err) + res := stream.Results() + require.Len(t, res, 1) + require.Len(t, res[0].FoundSubjectsByResourceId, 1) + require.NotNil(t, res[0].FoundSubjectsByResourceId["res1"]) + require.Len(t, res[0].FoundSubjectsByResourceId["res1"].FoundSubjects, math.MaxUint16+1) + } + }) +} + func newLocalDispatcherWithConcurrencyLimit(t testing.TB, concurrencyLimit uint16) (context.Context, dispatch.Dispatcher, datastore.Revision) { rawDS, err := memdb.NewMemdbDatastore(0, 0, memdb.DisableGC) require.NoError(t, err) From 93331d06f47e0bb012c3c900966d979311c9c5aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADctor=20Rold=C3=A1n=20Betancort?= Date: Tue, 20 Feb 2024 15:21:40 +0000 Subject: [PATCH 3/4] fixes integer overflow in ForEachChunk the calculation of the length of the input data is now done with a 64 unsigned integer. A 64 unsigned integer overflow is still possible, but if that's happening, we have bigger problems. --- pkg/genutil/slicez/chunking.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/genutil/slicez/chunking.go b/pkg/genutil/slicez/chunking.go index 158b4ee2c6..2e59bf3c08 100644 --- a/pkg/genutil/slicez/chunking.go +++ b/pkg/genutil/slicez/chunking.go @@ -18,11 +18,12 @@ func ForEachChunkUntil[T any](data []T, chunkSize uint16, handler func(items []T chunkSize = 1 } - dataLength := uint16(len(data)) - chunkCount := (dataLength / chunkSize) + 1 - for chunkIndex := uint16(0); chunkIndex < chunkCount; chunkIndex++ { - chunkStart := chunkIndex * chunkSize - chunkEnd := (chunkIndex + 1) * chunkSize + dataLength := uint64(len(data)) + chunkSize64 := uint64(chunkSize) + chunkCount := (dataLength / chunkSize64) + 1 + for chunkIndex := uint64(0); chunkIndex < chunkCount; chunkIndex++ { + chunkStart := chunkIndex * chunkSize64 + chunkEnd := (chunkIndex + 1) * chunkSize64 if chunkEnd > dataLength { chunkEnd = dataLength } @@ -38,5 +39,6 @@ func ForEachChunkUntil[T any](data []T, chunkSize uint16, handler func(items []T } } } + return true, nil } From ab7b53030ad810d0811a11a16d4919423fcf80cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADctor=20Rold=C3=A1n=20Betancort?= Date: Wed, 21 Feb 2024 09:11:24 +0000 Subject: [PATCH 4/4] address PR feedback - check_test.go: move dispatch chunking in its own test file - chunking_test.go: test bigger number than MaxUInt16 - check_test.go: indentation of test schema --- internal/dispatch/graph/check_test.go | 86 -------------------- internal/dispatch/graph/dispatch_test.go | 99 ++++++++++++++++++++++++ pkg/genutil/slicez/chunking_test.go | 42 ++++++---- 3 files changed, 127 insertions(+), 100 deletions(-) create mode 100644 internal/dispatch/graph/dispatch_test.go diff --git a/internal/dispatch/graph/check_test.go b/internal/dispatch/graph/check_test.go index 46bfd80910..98e4f9b8ca 100644 --- a/internal/dispatch/graph/check_test.go +++ b/internal/dispatch/graph/check_test.go @@ -3,7 +3,6 @@ package graph import ( "context" "fmt" - "math" "strings" "testing" @@ -411,91 +410,6 @@ func TestCheckDebugging(t *testing.T) { } } -func TestDispatchChunking(t *testing.T) { - schema := ` - definition user { - relation self: user - } - - definition res { - relation owner : user - permission view = owner->self - } - ` - - resources := make([]*core.RelationTuple, 0, math.MaxUint16+1) - enabled := make([]*core.RelationTuple, 0, math.MaxUint16+1) - for i := 0; i < math.MaxUint16+1; i++ { - resources = append(resources, tuple.Parse(fmt.Sprintf("res:res1#owner@user:user%d", i))) - enabled = append(enabled, tuple.Parse(fmt.Sprintf("user:user%d#self@user:user%d", i, i))) - } - - ctx, dispatcher, revision := newLocalDispatcherWithSchemaAndRels(t, schema, append(enabled, resources...)) - - t.Run("check", func(t *testing.T) { - for _, tpl := range resources[:1] { - checkResult, err := dispatcher.DispatchCheck(ctx, &v1.DispatchCheckRequest{ - ResourceRelation: RR(tpl.ResourceAndRelation.Namespace, "view"), - ResourceIds: []string{tpl.ResourceAndRelation.ObjectId}, - ResultsSetting: v1.DispatchCheckRequest_ALLOW_SINGLE_RESULT, - Subject: ONR(tpl.Subject.Namespace, tpl.Subject.ObjectId, graph.Ellipsis), - Metadata: &v1.ResolverMeta{ - AtRevision: revision.String(), - DepthRemaining: 50, - }, - }) - - require.NoError(t, err) - require.NotNil(t, checkResult) - require.NotEmpty(t, checkResult.ResultsByResourceId, "expected membership for resource %s", tpl.ResourceAndRelation.ObjectId) - require.Equal(t, v1.ResourceCheckResult_MEMBER, checkResult.ResultsByResourceId[tpl.ResourceAndRelation.ObjectId].Membership) - } - }) - - t.Run("lookup-resources", func(t *testing.T) { - for _, tpl := range resources[:1] { - stream := dispatch.NewCollectingDispatchStream[*v1.DispatchLookupResourcesResponse](ctx) - err := dispatcher.DispatchLookupResources(&v1.DispatchLookupResourcesRequest{ - ObjectRelation: RR(tpl.ResourceAndRelation.Namespace, "view"), - Subject: ONR(tpl.Subject.Namespace, tpl.Subject.ObjectId, graph.Ellipsis), - Metadata: &v1.ResolverMeta{ - AtRevision: revision.String(), - DepthRemaining: 50, - }, - OptionalLimit: veryLargeLimit, - }, stream) - - require.NoError(t, err) - - foundResources, _, _, _ := processResults(stream) - require.Len(t, foundResources, 1) - } - }) - - t.Run("lookup-subjects", func(t *testing.T) { - for _, tpl := range resources[:1] { - stream := dispatch.NewCollectingDispatchStream[*v1.DispatchLookupSubjectsResponse](ctx) - - err := dispatcher.DispatchLookupSubjects(&v1.DispatchLookupSubjectsRequest{ - ResourceRelation: RR(tpl.ResourceAndRelation.Namespace, "view"), - ResourceIds: []string{tpl.ResourceAndRelation.ObjectId}, - SubjectRelation: RR(tpl.Subject.Namespace, graph.Ellipsis), - Metadata: &v1.ResolverMeta{ - AtRevision: revision.String(), - DepthRemaining: 50, - }, - }, stream) - - require.NoError(t, err) - res := stream.Results() - require.Len(t, res, 1) - require.Len(t, res[0].FoundSubjectsByResourceId, 1) - require.NotNil(t, res[0].FoundSubjectsByResourceId["res1"]) - require.Len(t, res[0].FoundSubjectsByResourceId["res1"].FoundSubjects, math.MaxUint16+1) - } - }) -} - func newLocalDispatcherWithConcurrencyLimit(t testing.TB, concurrencyLimit uint16) (context.Context, dispatch.Dispatcher, datastore.Revision) { rawDS, err := memdb.NewMemdbDatastore(0, 0, memdb.DisableGC) require.NoError(t, err) diff --git a/internal/dispatch/graph/dispatch_test.go b/internal/dispatch/graph/dispatch_test.go new file mode 100644 index 0000000000..5d428e7662 --- /dev/null +++ b/internal/dispatch/graph/dispatch_test.go @@ -0,0 +1,99 @@ +package graph + +import ( + "fmt" + "math" + "testing" + + "github.com/authzed/spicedb/internal/dispatch" + "github.com/authzed/spicedb/internal/graph" + core "github.com/authzed/spicedb/pkg/proto/core/v1" + v1 "github.com/authzed/spicedb/pkg/proto/dispatch/v1" + "github.com/authzed/spicedb/pkg/tuple" + + "github.com/stretchr/testify/require" +) + +func TestDispatchChunking(t *testing.T) { + schema := ` + definition user { + relation self: user + } + + definition res { + relation owner : user + permission view = owner->self + }` + + resources := make([]*core.RelationTuple, 0, math.MaxUint16+1) + enabled := make([]*core.RelationTuple, 0, math.MaxUint16+1) + for i := 0; i < math.MaxUint16+1; i++ { + resources = append(resources, tuple.Parse(fmt.Sprintf("res:res1#owner@user:user%d", i))) + enabled = append(enabled, tuple.Parse(fmt.Sprintf("user:user%d#self@user:user%d", i, i))) + } + + ctx, dispatcher, revision := newLocalDispatcherWithSchemaAndRels(t, schema, append(enabled, resources...)) + + t.Run("check", func(t *testing.T) { + for _, tpl := range resources[:1] { + checkResult, err := dispatcher.DispatchCheck(ctx, &v1.DispatchCheckRequest{ + ResourceRelation: RR(tpl.ResourceAndRelation.Namespace, "view"), + ResourceIds: []string{tpl.ResourceAndRelation.ObjectId}, + ResultsSetting: v1.DispatchCheckRequest_ALLOW_SINGLE_RESULT, + Subject: ONR(tpl.Subject.Namespace, tpl.Subject.ObjectId, graph.Ellipsis), + Metadata: &v1.ResolverMeta{ + AtRevision: revision.String(), + DepthRemaining: 50, + }, + }) + + require.NoError(t, err) + require.NotNil(t, checkResult) + require.NotEmpty(t, checkResult.ResultsByResourceId, "expected membership for resource %s", tpl.ResourceAndRelation.ObjectId) + require.Equal(t, v1.ResourceCheckResult_MEMBER, checkResult.ResultsByResourceId[tpl.ResourceAndRelation.ObjectId].Membership) + } + }) + + t.Run("lookup-resources", func(t *testing.T) { + for _, tpl := range resources[:1] { + stream := dispatch.NewCollectingDispatchStream[*v1.DispatchLookupResourcesResponse](ctx) + err := dispatcher.DispatchLookupResources(&v1.DispatchLookupResourcesRequest{ + ObjectRelation: RR(tpl.ResourceAndRelation.Namespace, "view"), + Subject: ONR(tpl.Subject.Namespace, tpl.Subject.ObjectId, graph.Ellipsis), + Metadata: &v1.ResolverMeta{ + AtRevision: revision.String(), + DepthRemaining: 50, + }, + OptionalLimit: veryLargeLimit, + }, stream) + + require.NoError(t, err) + + foundResources, _, _, _ := processResults(stream) + require.Len(t, foundResources, 1) + } + }) + + t.Run("lookup-subjects", func(t *testing.T) { + for _, tpl := range resources[:1] { + stream := dispatch.NewCollectingDispatchStream[*v1.DispatchLookupSubjectsResponse](ctx) + + err := dispatcher.DispatchLookupSubjects(&v1.DispatchLookupSubjectsRequest{ + ResourceRelation: RR(tpl.ResourceAndRelation.Namespace, "view"), + ResourceIds: []string{tpl.ResourceAndRelation.ObjectId}, + SubjectRelation: RR(tpl.Subject.Namespace, graph.Ellipsis), + Metadata: &v1.ResolverMeta{ + AtRevision: revision.String(), + DepthRemaining: 50, + }, + }, stream) + + require.NoError(t, err) + res := stream.Results() + require.Len(t, res, 1) + require.Len(t, res[0].FoundSubjectsByResourceId, 1) + require.NotNil(t, res[0].FoundSubjectsByResourceId["res1"]) + require.Len(t, res[0].FoundSubjectsByResourceId["res1"].FoundSubjects, math.MaxUint16+1) + } + }) +} diff --git a/pkg/genutil/slicez/chunking_test.go b/pkg/genutil/slicez/chunking_test.go index bb7b150b53..78f5366498 100644 --- a/pkg/genutil/slicez/chunking_test.go +++ b/pkg/genutil/slicez/chunking_test.go @@ -9,17 +9,21 @@ import ( ) func TestForEachChunk(t *testing.T) { + t.Parallel() + for _, datasize := range []int{0, 1, 5, 10, 50, 100, 250} { datasize := datasize for _, chunksize := range []uint16{1, 2, 3, 5, 10, 50} { chunksize := chunksize t.Run(fmt.Sprintf("test-%d-%d", datasize, chunksize), func(t *testing.T) { - data := []int{} + t.Parallel() + + data := make([]int, 0, datasize) for i := 0; i < datasize; i++ { data = append(data, i) } - found := []int{} + found := make([]int, 0, datasize) ForEachChunk(data, chunksize, func(items []int) { found = append(found, items...) require.True(t, len(items) <= int(chunksize)) @@ -32,6 +36,8 @@ func TestForEachChunk(t *testing.T) { } func TestForEachChunkOverflowPanic(t *testing.T) { + t.Parallel() + datasize := math.MaxUint16 chunksize := uint16(50) data := make([]int, 0, datasize) @@ -50,19 +56,27 @@ func TestForEachChunkOverflowPanic(t *testing.T) { } func TestForEachChunkOverflowIncorrect(t *testing.T) { + t.Parallel() + chunksize := uint16(50) - datasize := math.MaxUint16 + int(chunksize) - data := make([]int, 0, datasize) - for i := 0; i < datasize; i++ { - data = append(data, i) - } + for _, datasize := range []int{math.MaxUint16 + int(chunksize), 10_000_000} { + datasize := datasize + t.Run(fmt.Sprintf("test-%d-%d", datasize, chunksize), func(t *testing.T) { + t.Parallel() - found := make([]int, 0, datasize) - ForEachChunk(data, chunksize, func(items []int) { - found = append(found, items...) - require.True(t, len(items) <= int(chunksize)) - require.True(t, len(items) > 0) - }) + data := make([]int, 0, datasize) + for i := 0; i < datasize; i++ { + data = append(data, i) + } - require.Equal(t, data, found) + found := make([]int, 0, datasize) + ForEachChunk(data, chunksize, func(items []int) { + found = append(found, items...) + require.True(t, len(items) <= int(chunksize)) + require.True(t, len(items) > 0) + }) + + require.Equal(t, data, found) + }) + } }