Skip to content

Commit

Permalink
CNDB-11189: Fix usages of waitForTableIndexesQueryable
Browse files Browse the repository at this point in the history
  • Loading branch information
adelapena committed Oct 8, 2024
1 parent 0c24b28 commit 69bbf62
Show file tree
Hide file tree
Showing 7 changed files with 15 additions and 30 deletions.
9 changes: 3 additions & 6 deletions test/long/index/sai/cql/AnalyzerQueryLongTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,12 @@
public class AnalyzerQueryLongTest extends CQLTester
{
@Test
public void manyWritesTest() throws Throwable
public void manyWritesTest()
{
createTable("CREATE TABLE %s (pk int PRIMARY KEY, not_analyzed int, val text)");
createIndex("CREATE CUSTOM INDEX ON %s(val) " +
"USING 'org.apache.cassandra.index.sai.StorageAttachedIndex' " +
"WITH OPTIONS = { 'index_analyzer': 'standard' }");
waitForIndexQueryable(KEYSPACE,"val");
var iterations = 15000;
for (int i = 0; i < iterations; i++)
{
Expand Down Expand Up @@ -63,13 +62,12 @@ else if (i % 2 == 0)
}

@Test
public void manyWritesAndUpsertsTest() throws Throwable
public void manyWritesAndUpsertsTest()
{
createTable("CREATE TABLE %s (pk int PRIMARY KEY, val text)");
createIndex("CREATE CUSTOM INDEX ON %s(val) " +
"USING 'org.apache.cassandra.index.sai.StorageAttachedIndex' " +
"WITH OPTIONS = { 'index_analyzer': 'standard' }");
waitForIndexQueryable(KEYSPACE,"val");
var iterations = 15000;
for (int i = 0; i < iterations; i++)
{
Expand All @@ -94,13 +92,12 @@ public void manyWritesAndUpsertsTest() throws Throwable
assertThat(result).hasSize(iterations / 2 + 1);
}
@Test
public void manyWritesUpsertsAndDeletesForSamePKTest() throws Throwable
public void manyWritesUpsertsAndDeletesForSamePKTest()
{
createTable("CREATE TABLE %s (pk int PRIMARY KEY, val text)");
createIndex("CREATE CUSTOM INDEX ON %s(val) " +
"USING 'org.apache.cassandra.index.sai.StorageAttachedIndex' " +
"WITH OPTIONS = { 'index_analyzer': 'standard' }");
waitForIndexQueryable(KEYSPACE, "val");
var iterations = 15000;
for (int i = 0; i < iterations; i++)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ public void setup()
@Test
public void testDropAndRecreate() throws Throwable
{
String tableName = createTable("CREATE TABLE %s (pk text, value text, PRIMARY KEY (pk))");
createTable("CREATE TABLE %s (pk text, value text, PRIMARY KEY (pk))");
populateOneSSTable();

ColumnFamilyStore cfs = getCurrentColumnFamilyStore();
Expand All @@ -53,7 +53,6 @@ public void testDropAndRecreate() throws Throwable

// create index and drop it: StorageAttachedIndexGroup should be removed
createIndex("CREATE CUSTOM INDEX sai ON %s(value) USING 'StorageAttachedIndex'");
waitForIndexQueryable("sai");

StorageAttachedIndexGroup group = (StorageAttachedIndexGroup) cfs.indexManager.getIndexGroup(StorageAttachedIndexGroup.GROUP_KEY);
assertTrue(tracker.contains(group));
Expand All @@ -70,7 +69,6 @@ public void testDropAndRecreate() throws Throwable

// create index again: expect a new StorageAttachedIndexGroup to be registered into tracker
createIndex("CREATE CUSTOM INDEX sai ON %s(value) USING 'StorageAttachedIndex'");
waitForIndexQueryable("sai");

StorageAttachedIndexGroup newGroup = (StorageAttachedIndexGroup) cfs.indexManager.getIndexGroup(StorageAttachedIndexGroup.GROUP_KEY);
assertNotSame(group, newGroup);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,6 @@ public void testParallelIndexBuild() throws Throwable

// create indexes
String index = createIndex(String.format(CREATE_INDEX_TEMPLATE, "v1"));
waitForIndexQueryable(index);
assertTrue(getCurrentColumnFamilyStore().getLiveSSTables().isEmpty());

Injections.Counter parallelBuildCounter = Injections.newCounter("IndexParallelBuildCounter")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,7 @@ private void runRandomTest() throws Throwable

createTable(schema.toTableDefinition());

List<String> indexes = schema.generateIndexStrings().stream().map(this::createIndex).collect(Collectors.toList());

indexes.forEach(this::waitForIndexQueryable);
schema.generateIndexStrings().forEach(this::createIndex);

List<RandomRow> data = schema.generateDataset();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,7 @@ public void setup()
public void testTokenRangeRead() throws Throwable
{
createTable("CREATE TABLE %s (k1 int, v1 text, PRIMARY KEY (k1))");
String index = createIndex(format("CREATE CUSTOM INDEX ON %%s(v1) USING '%s'", StorageAttachedIndex.class.getName()));
waitForIndexQueryable(index);
createIndex(format("CREATE CUSTOM INDEX ON %%s(v1) USING '%s'", StorageAttachedIndex.class.getName()));

execute("INSERT INTO %S(k1, v1) values(1, '1')");

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ public void setup() throws Exception
}

@Test
public void verifyIndexGroupMetrics() throws Throwable
public void verifyIndexGroupMetrics()
{
// create first index
createTable(CREATE_TABLE_TEMPLATE);
Expand Down Expand Up @@ -75,7 +75,6 @@ public void verifyIndexGroupMetrics() throws Throwable
// create second index
String v2IndexName = createIndex(String.format(CREATE_INDEX_TEMPLATE, "v2"));
IndexContext v2IndexContext = createIndexContext(v2IndexName, UTF8Type.instance);
waitForIndexQueryable(v2IndexName);

// same number of sstables, but more string index files.
int stringIndexOpenFileCount = sstables * V1OnDiskFormat.instance.openFilesPerIndex(v2IndexContext);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ public void testSameIndexNameAcrossKeyspaces()
}

@Test
public void testMetricRelease() throws Throwable
public void testMetricRelease()
{
String table = "test_metric_release";
String index = "test_metric_release_index";
Expand All @@ -97,7 +97,6 @@ public void testMetricRelease() throws Throwable

createTable(String.format(CREATE_TABLE_TEMPLATE, keyspace, table));
createIndex(String.format(CREATE_INDEX_TEMPLATE, index, keyspace, table, "v1"));
waitForTableIndexesQueryable(keyspace, table);

execute("INSERT INTO " + keyspace + "." + table + " (id1, v1, v2) VALUES ('0', 0, '0')");

Expand All @@ -116,7 +115,7 @@ public void testMetricRelease() throws Throwable
}

@Test
public void testIndexQueryWithPartitionKey() throws Throwable
public void testIndexQueryWithPartitionKey()
{
String table = "test_range_key_type_with_index";
String index = "test_range_key_type_with_index_index";
Expand All @@ -140,8 +139,6 @@ public void testIndexQueryWithPartitionKey() throws Throwable
flush(keyspace, table);
}

waitForIndexQueryable(keyspace, table);

ResultSet rows2 = executeNet("SELECT id1 FROM " + keyspace + "." + table + " WHERE id1 = '36' and v1 < 51");
assertEquals(1, rows2.all().size());

Expand All @@ -164,7 +161,7 @@ public void testIndexQueryWithPartitionKey() throws Throwable
}

@Test
public void testKDTreeQueryMetricsWithSingleIndex() throws Throwable
public void testKDTreeQueryMetricsWithSingleIndex()
{
String table = "test_metrics_through_write_lifecycle";
String index = "test_metrics_through_write_lifecycle_index";
Expand Down Expand Up @@ -228,7 +225,7 @@ public void testKDTreeQueryMetricsWithSingleIndex() throws Throwable
}

@Test
public void testKDTreePostingsQueryMetricsWithSingleIndex() throws Throwable
public void testKDTreePostingsQueryMetricsWithSingleIndex()
{
String table = "test_kdtree_postings_metrics_through_write_lifecycle";
String v1Index = "test_kdtree_postings_metrics_through_write_lifecycle_v1_index";
Expand Down Expand Up @@ -271,7 +268,7 @@ public void testKDTreePostingsQueryMetricsWithSingleIndex() throws Throwable
}

@Test
public void testInvertedIndexQueryMetricsWithSingleIndex() throws Throwable
public void testInvertedIndexQueryMetricsWithSingleIndex()
{
String table = "test_invertedindex_metrics_through_write_lifecycle";
String index = "test_invertedindex_metrics_through_write_lifecycle_index";
Expand Down Expand Up @@ -334,7 +331,7 @@ public void testInvertedIndexQueryMetricsWithSingleIndex() throws Throwable
}

@Test
public void testKDTreePartitionsReadAndRowsFiltered() throws Throwable
public void testKDTreePartitionsReadAndRowsFiltered()
{
String table = "test_rows_filtered_large_partition";
String index = "test_rows_filtered_large_partition_index";
Expand All @@ -345,7 +342,6 @@ public void testKDTreePartitionsReadAndRowsFiltered() throws Throwable
"WITH compaction = {'class' : 'SizeTieredCompactionStrategy', 'enabled' : false }", keyspace, table));

createIndex(String.format(CREATE_INDEX_TEMPLATE, index, keyspace, table, "v1"));
waitForTableIndexesQueryable(keyspace, table);

execute("INSERT INTO " + keyspace + "." + table + "(pk, ck, v1) VALUES (0, 0, 0)");
execute("INSERT INTO " + keyspace + "." + table + "(pk, ck, v1) VALUES (1, 1, 1)");
Expand All @@ -366,7 +362,7 @@ public void testKDTreePartitionsReadAndRowsFiltered() throws Throwable
}

@Test
public void testKDTreeQueryEarlyExit() throws Throwable
public void testKDTreeQueryEarlyExit()
{
String table = "test_queries_exited_early";
String index = "test_queries_exited_early_index";
Expand All @@ -377,7 +373,6 @@ public void testKDTreeQueryEarlyExit() throws Throwable
"WITH compaction = {'class' : 'SizeTieredCompactionStrategy', 'enabled' : false }", keyspace, table));

createIndex(String.format(CREATE_INDEX_TEMPLATE, index, keyspace, table, "v1"));
waitForTableIndexesQueryable(keyspace, table);

execute("INSERT INTO " + keyspace + "." + table + "(pk, ck, v1) VALUES (0, 0, 0)");
execute("INSERT INTO " + keyspace + "." + table + "(pk, ck, v1) VALUES (1, 1, 1)");
Expand All @@ -402,7 +397,7 @@ public void testKDTreeQueryEarlyExit() throws Throwable
waitForEquals(objectName("KDTreeIntersectionEarlyExits", keyspace, table, index, GLOBAL_METRIC_TYPE), 2L);
}

private long getPerQueryMetrics(String keyspace, String table, String metricsName) throws Exception
private long getPerQueryMetrics(String keyspace, String table, String metricsName)
{
return (long) getMetricValue(objectNameNoIndex(metricsName, keyspace, table, PER_QUERY_METRIC_TYPE));
}
Expand Down

1 comment on commit 69bbf62

@cassci-bot
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Build rejected: 19 NEW test failure(s) in 1 builds., Build 1: ran 20903 tests with 28 failures and 321 skipped.
Butler analysis done on ds-cassandra-pr-gate/CNDB-11189-main-5.0 vs last 16 runs of ds-cassandra-build-nightly/main-5.0.
Showing only first 13 NEW test failures
org.apache.cassandra.io.DiskSpaceMetricsTest.testFlushSize: test is constantly failing. No results on upstream;
branch story: [F] vs upstream: []; [NEW]
org.apache.cassandra.io.compress.CompressionMetadataTest.testMemoryIsFreed: test is constantly failing. No results on upstream;
branch story: [F] vs upstream: []; [NEW]
org.apache.cassandra.index.sai.cql.TinySegmentQueryWriteLifecycleTest.testWriteLifecycle[ca_CompositePartitionKeyDataModel{primaryKey=p1, p2}]: test is constantly failing. No failures on upstream;
branch story: [F] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.distributed.test.DropUDTWithRestartTest.loadCommitLogAndSSTablesWithDroppedColumnTestCC50: test is constantly failing. No results on upstream;
branch story: [F] vs upstream: []; [NEW]
org.apache.cassandra.distributed.test.DropUDTWithRestartTest.loadCommitLogAndSSTablesWithDroppedColumnTestCC40: test is constantly failing. No results on upstream;
branch story: [F] vs upstream: []; [NEW]
org.apache.cassandra.io.compress.CompressionMetadataTest.testMemoryIsShared: test is constantly failing. No results on upstream;
branch story: [F] vs upstream: []; [NEW]
org.apache.cassandra.utils.binlog.BinLogTest.testTruncationReleasesLogSpace: test is constantly failing. No failures on upstream;
branch story: [F] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.index.sai.plan.PlanTest.testLazyAccessPropagation: test is constantly failing. No failures on upstream;
branch story: [F] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.index.sai.virtual.IndexesSystemViewTest.testVirtualTableThroughIndexLifeCycle: test is constantly failing. No failures on upstream;
branch story: [F] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.tools.TopPartitionsTest.testServiceTopPartitionsSingleTable: test is constantly failing. No failures on upstream;
branch story: [F] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.sensors.SensorsIndexWriteTest.testSingleRowMutationWithSecondaryIndex: test is constantly failing. No failures on upstream;
branch story: [F] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
org.apache.cassandra.distributed.test.DropUDTWithRestartTest.testReadingValuesOfDroppedColumns: test is constantly failing. No failures on upstream;
branch story: [F] vs upstream: [+++++++++++++++]; [NEW]
org.apache.cassandra.cql3.validation.operations.TTLTest.testRecoverOverflowedExpirationWithScrub: test is constantly failing. No failures on upstream;
branch story: [F] vs upstream: [++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++]; [NEW]
butler comparison

Please sign in to comment.