use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class RowTest method testHashCode.
@Test
public void testHashCode() {
ColumnMetadata defA = metadata.getColumn(new ColumnIdentifier("a", true));
ColumnMetadata defB = metadata.getColumn(new ColumnIdentifier("b", true));
Row.Builder builder = BTreeRow.unsortedBuilder(nowInSeconds);
builder.newRow(metadata.comparator.make("c1"));
writeSimpleCellValue(builder, defA, "a1", 0);
writeSimpleCellValue(builder, defA, "a2", 1);
writeSimpleCellValue(builder, defB, "b1", 1);
Row row = builder.build();
Map<Row, Integer> map = new HashMap<>();
map.put(row, 1);
assertEquals(Integer.valueOf(1), map.get(row));
}
use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class RowTest method testResolve.
@Test
public void testResolve() {
ColumnMetadata defA = metadata.getColumn(new ColumnIdentifier("a", true));
ColumnMetadata defB = metadata.getColumn(new ColumnIdentifier("b", true));
Row.Builder builder = BTreeRow.unsortedBuilder(nowInSeconds);
builder.newRow(metadata.comparator.make("c1"));
writeSimpleCellValue(builder, defA, "a1", 0);
writeSimpleCellValue(builder, defA, "a2", 1);
writeSimpleCellValue(builder, defB, "b1", 1);
Row row = builder.build();
PartitionUpdate update = PartitionUpdate.singleRowUpdate(metadata, dk, row);
Unfiltered unfiltered = update.unfilteredIterator().next();
assertTrue(unfiltered.kind() == Unfiltered.Kind.ROW);
row = (Row) unfiltered;
assertEquals("a2", defA.cellValueType().getString(row.getCell(defA).value()));
assertEquals("b1", defB.cellValueType().getString(row.getCell(defB).value()));
assertEquals(2, row.columns().size());
}
use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class TimeSortTest method testTimeSort.
@Test
public void testTimeSort() throws Throwable {
String tableName = createTable("CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, b))");
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE).getColumnFamilyStore(tableName);
for (int i = 900; i < 1000; ++i) for (int j = 0; j < 8; ++j) execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?) USING TIMESTAMP ?", i, j * 2, 0, (long) j * 2);
validateTimeSort();
cfs.forceBlockingFlush();
validateTimeSort();
// interleave some new data to test memtable + sstable
DecoratedKey key = Util.dk("900");
for (int j = 0; j < 4; ++j) execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?) USING TIMESTAMP ?", 900, j * 2 + 1, 1, (long) j * 2 + 1);
// and some overwrites
execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?) USING TIMESTAMP ?", 900, 0, 2, 100L);
execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?) USING TIMESTAMP ?", 900, 10, 2, 100L);
// verify
UntypedResultSet results = execute("SELECT * FROM %s WHERE a = ? AND b >= ? LIMIT 1000", 900, 0);
assertEquals(12, results.size());
Iterator<UntypedResultSet.Row> iter = results.iterator();
for (int j = 0; j < 8; j++) {
UntypedResultSet.Row row = iter.next();
assertEquals(j, row.getInt("b"));
}
assertRows(execute("SELECT * FROM %s WHERE a = ? AND b IN (?, ?)", 900, 0, 10), row(900, 0, 2), row(900, 10, 2));
}
use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class CompactionsPurgeTest method testRowTombstoneObservedBeforePurging.
@Test
public void testRowTombstoneObservedBeforePurging() {
String keyspace = "cql_keyspace";
String table = "table1";
ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(table);
cfs.disableAutoCompaction();
// write a row out to one sstable
QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v1, v2) VALUES (%d, '%s', %d)", keyspace, table, 1, "foo", 1));
cfs.forceBlockingFlush();
UntypedResultSet result = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(1, result.size());
// write a row tombstone out to a second sstable
QueryProcessor.executeInternal(String.format("DELETE FROM %s.%s WHERE k = %d", keyspace, table, 1));
cfs.forceBlockingFlush();
// basic check that the row is considered deleted
assertEquals(2, cfs.getLiveSSTables().size());
result = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(0, result.size());
// compact the two sstables with a gcBefore that does *not* allow the row tombstone to be purged
FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(cfs, (int) (System.currentTimeMillis() / 1000) - 10000, false));
// the data should be gone, but the tombstone should still exist
assertEquals(1, cfs.getLiveSSTables().size());
result = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(0, result.size());
// write a row out to one sstable
QueryProcessor.executeInternal(String.format("INSERT INTO %s.%s (k, v1, v2) VALUES (%d, '%s', %d)", keyspace, table, 1, "foo", 1));
cfs.forceBlockingFlush();
assertEquals(2, cfs.getLiveSSTables().size());
result = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(1, result.size());
// write a row tombstone out to a different sstable
QueryProcessor.executeInternal(String.format("DELETE FROM %s.%s WHERE k = %d", keyspace, table, 1));
cfs.forceBlockingFlush();
// compact the two sstables with a gcBefore that *does* allow the row tombstone to be purged
FBUtilities.waitOnFutures(CompactionManager.instance.submitMaximal(cfs, (int) (System.currentTimeMillis() / 1000) + 10000, false));
// both the data and the tombstone should be gone this time
assertEquals(0, cfs.getLiveSSTables().size());
result = QueryProcessor.executeInternal(String.format("SELECT * FROM %s.%s WHERE k = %d", keyspace, table, 1));
assertEquals(0, result.size());
}
use of org.apache.cassandra.cql3.UntypedResultSet.Row in project cassandra by apache.
the class PartitionImplementationTest method makeRow.
Row makeRow(Clustering clustering, String colValue) {
ColumnMetadata defCol = metadata.getColumn(new ColumnIdentifier("col", true));
Row.Builder row = BTreeRow.unsortedBuilder(TIMESTAMP);
row.newRow(clustering);
row.addCell(BufferCell.live(defCol, TIMESTAMP, ByteBufferUtil.bytes(colValue)));
return row.build();
}
Aggregations