Skip to content

Commit

Permalink
Merge pull request #1764 from josephschorr/remove-stale-todos
Browse files Browse the repository at this point in the history
Remove stale TODOs
  • Loading branch information
josephschorr authored Feb 26, 2024
2 parents 5a562e2 + 6390b78 commit acd4a68
Show file tree
Hide file tree
Showing 15 changed files with 5 additions and 50 deletions.
3 changes: 0 additions & 3 deletions internal/datasets/subjectsetbytype.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,6 @@ import (
"github.com/authzed/spicedb/pkg/tuple"
)

// TODO(jschorr): See if there is a nice way we can combine this withn ONRByTypeSet and the multimap
// used in Check to allow for a simple implementation.

// SubjectByTypeSet is a set of SubjectSet's, grouped by their subject types.
type SubjectByTypeSet struct {
byType map[string]SubjectSet
Expand Down
2 changes: 0 additions & 2 deletions internal/datastore/common/sql.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,6 @@ func (sqf SchemaQueryFilterer) MustFilterToResourceIDs(resourceIds []string) Sch
// FilterToResourceIDs returns a new SchemaQueryFilterer that is limited to resources with any of the
// specified IDs.
func (sqf SchemaQueryFilterer) FilterToResourceIDs(resourceIds []string) (SchemaQueryFilterer, error) {
// TODO(jschorr): Change this panic into an automatic query split, if we find it necessary.
if len(resourceIds) > int(datastore.FilterMaximumIDCount) {
return sqf, spiceerrors.MustBugf("cannot have more than %d resources IDs in a single filter", datastore.FilterMaximumIDCount)
}
Expand Down Expand Up @@ -380,7 +379,6 @@ func (sqf SchemaQueryFilterer) FilterWithSubjectsSelectors(selectors ...datastor
}

if len(selector.OptionalSubjectIds) > 0 {
// TODO(jschorr): Change this panic into an automatic query split, if we find it necessary.
if len(selector.OptionalSubjectIds) > int(datastore.FilterMaximumIDCount) {
return sqf, spiceerrors.MustBugf("cannot have more than %d subject IDs in a single filter", datastore.FilterMaximumIDCount)
}
Expand Down
1 change: 0 additions & 1 deletion internal/datastore/memdb/memdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,6 @@ func (mdb *memdbDatastore) Close() error {
mdb.Lock()
defer mdb.Unlock()

// TODO Make this nil once we have removed all access to closed datastores
if db := mdb.db; db != nil {
mdb.revisions = []snapshot{
{
Expand Down
5 changes: 0 additions & 5 deletions internal/datastore/mysql/datastore.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,6 @@ func newMySQLDatastore(ctx context.Context, uri string, options ...Option) (*Dat
return store, nil
}

// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
func (mds *Datastore) SnapshotReader(rev datastore.Revision) datastore.Reader {
createTxFunc := func(ctx context.Context) (*sql.Tx, txCleanupFunc, error) {
tx, err := mds.db.BeginTx(ctx, mds.readTxOptions)
Expand All @@ -277,7 +276,6 @@ func (mds *Datastore) SnapshotReader(rev datastore.Revision) datastore.Reader {

func noCleanup() error { return nil }

// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
// ReadWriteTx starts a read/write transaction, which will be committed if no error is
// returned and rolled back if an error is returned.
func (mds *Datastore) ReadWriteTx(
Expand Down Expand Up @@ -457,7 +455,6 @@ type Datastore struct {

// Close closes the data store.
func (mds *Datastore) Close() error {
// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
mds.cancelGc()
if mds.gcGroup != nil {
if err := mds.gcGroup.Wait(); err != nil && !errors.Is(err, context.Canceled) {
Expand Down Expand Up @@ -597,7 +594,6 @@ func (mds *Datastore) seedDatabase(ctx context.Context) error {
})
}

// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
func buildLivingObjectFilterForRevision(revision datastore.Revision) queryFilterer {
return func(original sq.SelectBuilder) sq.SelectBuilder {
return original.Where(sq.LtOrEq{colCreatedTxn: revision.(revisions.TransactionIDRevision).TransactionID()}).
Expand All @@ -608,7 +604,6 @@ func buildLivingObjectFilterForRevision(revision datastore.Revision) queryFilter
}
}

// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
func currentlyLivingObjects(original sq.SelectBuilder) sq.SelectBuilder {
return original.Where(sq.Eq{colDeletedTxn: liveDeletedTxnID})
}
4 changes: 0 additions & 4 deletions internal/datastore/mysql/gc.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ func (mds *Datastore) ResetGCCompleted() {
mds.gcHasRun.Store(false)
}

// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
func (mds *Datastore) Now(ctx context.Context) (time.Time, error) {
// Retrieve the `now` time from the database.
nowSQL, nowArgs, err := getNow.ToSql()
Expand All @@ -46,7 +45,6 @@ func (mds *Datastore) Now(ctx context.Context) (time.Time, error) {
return now.UTC(), nil
}

// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
// - main difference is how the PSQL driver handles null values
func (mds *Datastore) TxIDBefore(ctx context.Context, before time.Time) (datastore.Revision, error) {
// Find the highest transaction ID before the GC window.
Expand All @@ -69,7 +67,6 @@ func (mds *Datastore) TxIDBefore(ctx context.Context, before time.Time) (datasto
return revisions.NewForTransactionID(uint64(value.Int64)), nil
}

// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
// - implementation misses metrics
func (mds *Datastore) DeleteBeforeTx(
ctx context.Context,
Expand All @@ -95,7 +92,6 @@ func (mds *Datastore) DeleteBeforeTx(
return
}

// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
// - query was reworked to make it compatible with Vitess
// - API differences with PSQL driver
func (mds *Datastore) batchDelete(ctx context.Context, tableName string, filter sqlFilter) (int64, error) {
Expand Down
8 changes: 0 additions & 8 deletions internal/datastore/mysql/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ const (
errUnableToQueryTuples = "unable to query tuples: %w"
)

// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
var schema = common.NewSchemaInformation(
colNamespace,
colObjectID,
Expand All @@ -52,7 +51,6 @@ func (mr *mysqlReader) QueryRelationships(
filter datastore.RelationshipsFilter,
opts ...options.QueryOptionsOption,
) (iter datastore.RelationshipIterator, err error) {
// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
qBuilder, err := common.NewSchemaQueryFilterer(schema, mr.filterer(mr.QueryTuplesQuery)).FilterWithRelationshipsFilter(filter)
if err != nil {
return nil, err
Expand All @@ -66,7 +64,6 @@ func (mr *mysqlReader) ReverseQueryRelationships(
subjectsFilter datastore.SubjectsFilter,
opts ...options.ReverseQueryOptionsOption,
) (iter datastore.RelationshipIterator, err error) {
// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
qBuilder, err := common.NewSchemaQueryFilterer(schema, mr.filterer(mr.QueryTuplesQuery)).
FilterWithSubjectsSelectors(subjectsFilter.AsSelector())
if err != nil {
Expand All @@ -91,7 +88,6 @@ func (mr *mysqlReader) ReverseQueryRelationships(
}

func (mr *mysqlReader) ReadNamespaceByName(ctx context.Context, nsName string) (*core.NamespaceDefinition, datastore.Revision, error) {
// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
tx, txCleanup, err := mr.txSource(ctx)
if err != nil {
return nil, datastore.NoRevision, fmt.Errorf(errUnableToReadConfig, err)
Expand All @@ -110,7 +106,6 @@ func (mr *mysqlReader) ReadNamespaceByName(ctx context.Context, nsName string) (
}

func loadNamespace(ctx context.Context, namespace string, tx *sql.Tx, baseQuery sq.SelectBuilder) (*core.NamespaceDefinition, datastore.Revision, error) {
// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
ctx, span := tracer.Start(ctx, "loadNamespace")
defer span.End()

Expand Down Expand Up @@ -138,7 +133,6 @@ func loadNamespace(ctx context.Context, namespace string, tx *sql.Tx, baseQuery
}

func (mr *mysqlReader) ListAllNamespaces(ctx context.Context) ([]datastore.RevisionedNamespace, error) {
// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
tx, txCleanup, err := mr.txSource(ctx)
if err != nil {
return nil, err
Expand All @@ -160,7 +154,6 @@ func (mr *mysqlReader) LookupNamespacesWithNames(ctx context.Context, nsNames []
return nil, nil
}

// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
tx, txCleanup, err := mr.txSource(ctx)
if err != nil {
return nil, err
Expand All @@ -183,7 +176,6 @@ func (mr *mysqlReader) LookupNamespacesWithNames(ctx context.Context, nsNames []
}

func loadAllNamespaces(ctx context.Context, tx *sql.Tx, queryBuilder sq.SelectBuilder) ([]datastore.RevisionedNamespace, error) {
// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
query, args, err := queryBuilder.ToSql()
if err != nil {
return nil, err
Expand Down
16 changes: 2 additions & 14 deletions internal/datastore/mysql/readwrite.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,6 @@ func (cc *caveatContextWrapper) Value() (driver.Value, error) {
func (rwt *mysqlReadWriteTXN) WriteRelationships(ctx context.Context, mutations []*core.RelationTupleUpdate) error {
// TODO(jschorr): Determine if we can do this in a more efficient manner using ON CONFLICT UPDATE
// rather than SELECT FOR UPDATE as we've been doing.
//
// NOTE: There are some fundamental changes introduced to prevent a deadlock in MySQL vs the initial
// Postgres implementation from which this was copied.

bulkWrite := rwt.WriteTupleQuery
bulkWriteHasValues := false

Expand Down Expand Up @@ -218,7 +214,6 @@ func (rwt *mysqlReadWriteTXN) WriteRelationships(ctx context.Context, mutations
}

func (rwt *mysqlReadWriteTXN) DeleteRelationships(ctx context.Context, filter *v1.RelationshipFilter) error {
// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
// Add clauses for the ResourceFilter
query := rwt.DeleteTupleQuery.Where(sq.Eq{colNamespace: filter.ResourceType})
if filter.OptionalResourceId != "" {
Expand Down Expand Up @@ -254,8 +249,6 @@ func (rwt *mysqlReadWriteTXN) DeleteRelationships(ctx context.Context, filter *v
}

func (rwt *mysqlReadWriteTXN) WriteNamespaces(ctx context.Context, newNamespaces ...*core.NamespaceDefinition) error {
// TODO (@vroldanbet) dupe from postgres datastore - need to refactor

deletedNamespaceClause := sq.Or{}
writeQuery := rwt.WriteNamespaceQuery

Expand Down Expand Up @@ -296,8 +289,6 @@ func (rwt *mysqlReadWriteTXN) WriteNamespaces(ctx context.Context, newNamespaces
}

func (rwt *mysqlReadWriteTXN) DeleteNamespaces(ctx context.Context, nsNames ...string) error {
// TODO (@vroldanbet) dupe from postgres datastore - need to refactor

// For each namespace, check they exist and collect predicates for the
// "WHERE" clause to delete the namespaces and associated tuples.
nsClauses := make([]sq.Sqlizer, 0, len(nsNames))
Expand All @@ -311,13 +302,11 @@ func (rwt *mysqlReadWriteTXN) DeleteNamespaces(ctx context.Context, nsNames ...s
// TODO(jzelinskie): return the name of the missing namespace
return err
case err == nil:
break
nsClauses = append(nsClauses, sq.Eq{colNamespace: nsName, colCreatedTxn: createdAt})
tplClauses = append(tplClauses, sq.Eq{colNamespace: nsName})
default:
return fmt.Errorf(errUnableToDeleteConfig, err)
}

nsClauses = append(nsClauses, sq.Eq{colNamespace: nsName, colCreatedTxn: createdAt})
tplClauses = append(tplClauses, sq.Eq{colNamespace: nsName})
}

delSQL, delArgs, err := rwt.DeleteNamespaceQuery.
Expand Down Expand Up @@ -459,7 +448,6 @@ func convertToWriteConstraintError(err error) error {
return nil
}

// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
func exactRelationshipClause(r *core.RelationTuple) sq.Eq {
return sq.Eq{
colNamespace: r.ResourceAndRelation.Namespace,
Expand Down
1 change: 0 additions & 1 deletion internal/datastore/mysql/revisions.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,6 @@ func (mds *Datastore) CheckRevision(ctx context.Context, revision datastore.Revi
}

func (mds *Datastore) loadRevision(ctx context.Context) (uint64, error) {
// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
// slightly changed to support no revisions at all, needed for runtime seeding of first transaction
ctx, span := tracer.Start(ctx, "loadRevision")
defer span.End()
Expand Down
1 change: 0 additions & 1 deletion internal/datastore/mysql/watch.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,6 @@ func (mds *Datastore) Watch(ctx context.Context, afterRevisionRaw datastore.Revi
return updates, errs
}

// TODO (@vroldanbet) dupe from postgres datastore - need to refactor
func (mds *Datastore) loadChanges(
ctx context.Context,
afterRevision uint64,
Expand Down
3 changes: 1 addition & 2 deletions internal/datastore/postgres/revisions.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,8 +220,7 @@ func parseRevisionDecimal(revisionStr string) (datastore.Revision, error) {
return nil, fmt.Errorf("received revision delta in excess of that expected; are you sure you're not passing a ZedToken from an incompatible datastore?")
}

// TODO(jschorr): Remove this deprecated code path at some point and maybe look into
// a more memory-efficient encoding of the XIP list if necessary.
// TODO(jschorr): Remove this deprecated code path once we have per-datastore-marked ZedTokens.
xipList = make([]uint64, 0, xmax-xmin)
for i := xmin; i < xid; i++ {
xipList = append(xipList, i)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ const (
version_num STRING(1024) NOT NULL
) PRIMARY KEY (version_num)`

// TODO see if we can make the operation smaller
createChangelog = `CREATE TABLE changelog (
timestamp TIMESTAMP NOT NULL OPTIONS (allow_commit_timestamp=true),
uuid STRING(36) NOT NULL,
Expand Down
1 change: 0 additions & 1 deletion internal/datastore/spanner/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ func generateConfig(options []Option) (spannerOptions, error) {
}

// Run any checks on the config that need to be done
// TODO set a limit to revision quantization?
if computed.revisionQuantization >= maxRevisionQuantization {
return computed, fmt.Errorf(
errQuantizationTooLarge,
Expand Down
2 changes: 0 additions & 2 deletions internal/dispatch/keys/keys.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,6 @@ func (c *CanonicalKeyHandler) CheckCacheKey(ctx context.Context, req *v1.Dispatc
return emptyDispatchCacheKey, err
}

// TODO(jschorr): Remove this conditional once we have a verified migration ordering system that ensures a backfill migration has
// run after the namespace annotation code has been fully deployed by users.
if relation.CanonicalCacheKey != "" {
return checkRequestToKeyWithCanonical(req, relation.CanonicalCacheKey)
}
Expand Down
3 changes: 0 additions & 3 deletions internal/graph/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,6 @@ import (
// branchContext returns a context disconnected from the parent context, but populated with the datastore.
// Also returns a function for canceling the newly created context, without canceling the parent context.
func branchContext(ctx context.Context) (context.Context, func(cancelErr error)) {
// TODO(jschorr): Replace with https://pkg.go.dev/context@master#WithoutCancel once
// Go 1.21 lands.

// Add tracing to the context.
span := trace.SpanFromContext(ctx)
detachedContext := trace.ContextWithSpan(context.Background(), span)
Expand Down
4 changes: 2 additions & 2 deletions pkg/datastore/test/tuples.go
Original file line number Diff line number Diff line change
Expand Up @@ -384,8 +384,8 @@ func DeleteRelationshipsTest(t *testing.T, tester DatastoreTester) {

tRequire := testfixtures.TupleChecker{Require: require, DS: ds}

// TODO temporarily store tuples in multiple calls to ReadWriteTransaction since no Datastore
// handles correctly duplicate tuples
// NOTE: we write tuples in multiple calls to ReadWriteTransaction because it is not allowed to change
// the same tuple in the same transaction.
_, err = ds.ReadWriteTx(ctx, func(ctx context.Context, rwt datastore.ReadWriteTransaction) error {
for _, tpl := range tt.inputTuples {
update := tuple.Touch(tpl)
Expand Down

0 comments on commit acd4a68

Please sign in to comment.