use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class BatchlogManagerTest method testTruncatedReplay.
@Test
public void testTruncatedReplay() throws InterruptedException, ExecutionException {
TableMetadata cf2 = Schema.instance.getTableMetadata(KEYSPACE1, CF_STANDARD2);
TableMetadata cf3 = Schema.instance.getTableMetadata(KEYSPACE1, CF_STANDARD3);
// In the middle of the process, 'truncate' Standard2.
for (int i = 0; i < 1000; i++) {
Mutation mutation1 = new RowUpdateBuilder(cf2, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(i)).clustering("name" + i).add("val", "val" + i).build();
Mutation mutation2 = new RowUpdateBuilder(cf3, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(i)).clustering("name" + i).add("val", "val" + i).build();
List<Mutation> mutations = Lists.newArrayList(mutation1, mutation2);
// Make sure it's ready to be replayed, so adjust the timestamp.
long timestamp = System.currentTimeMillis() - BatchlogManager.getBatchlogTimeout();
if (i == 500)
SystemKeyspace.saveTruncationRecord(Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD2), timestamp, CommitLogPosition.NONE);
// Adjust the timestamp (slightly) to make the test deterministic.
if (i >= 500)
timestamp++;
else
timestamp--;
BatchlogManager.store(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i), FBUtilities.timestampMicros(), mutations));
}
// Flush the batchlog to disk (see CASSANDRA-6822).
Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush();
// Force batchlog replay and wait for it to complete.
BatchlogManager.instance.startBatchlogReplay().get();
// We should see half of Standard2-targeted mutations written after the replay and all of Standard3 mutations applied.
for (int i = 0; i < 1000; i++) {
UntypedResultSet result = executeInternal(String.format("SELECT * FROM \"%s\".\"%s\" WHERE key = intAsBlob(%d)", KEYSPACE1, CF_STANDARD2, i));
assertNotNull(result);
if (i >= 500) {
assertEquals(ByteBufferUtil.bytes(i), result.one().getBytes("key"));
assertEquals("name" + i, result.one().getString("name"));
assertEquals("val" + i, result.one().getString("val"));
} else {
assertTrue(result.isEmpty());
}
}
for (int i = 0; i < 1000; i++) {
UntypedResultSet result = executeInternal(String.format("SELECT * FROM \"%s\".\"%s\" WHERE key = intAsBlob(%d)", KEYSPACE1, CF_STANDARD3, i));
assertNotNull(result);
assertEquals(ByteBufferUtil.bytes(i), result.one().getBytes("key"));
assertEquals("name" + i, result.one().getString("name"));
assertEquals("val" + i, result.one().getString("val"));
}
}
use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class BatchlogManagerTest method testReplay.
@Test
@SuppressWarnings("deprecation")
public void testReplay() throws Exception {
long initialAllBatches = BatchlogManager.instance.countAllBatches();
long initialReplayedBatches = BatchlogManager.instance.getTotalBatchesReplayed();
TableMetadata cfm = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1).metadata();
// Half batches (50) ready to be replayed, half not.
for (int i = 0; i < 100; i++) {
List<Mutation> mutations = new ArrayList<>(10);
for (int j = 0; j < 10; j++) {
mutations.add(new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(i)).clustering("name" + j).add("val", "val" + j).build());
}
long timestamp = i < 50 ? (System.currentTimeMillis() - BatchlogManager.getBatchlogTimeout()) : (System.currentTimeMillis() + BatchlogManager.getBatchlogTimeout());
BatchlogManager.store(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i), timestamp * 1000, mutations));
}
// Flush the batchlog to disk (see CASSANDRA-6822).
Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush();
assertEquals(100, BatchlogManager.instance.countAllBatches() - initialAllBatches);
assertEquals(0, BatchlogManager.instance.getTotalBatchesReplayed() - initialReplayedBatches);
// Force batchlog replay and wait for it to complete.
BatchlogManager.instance.startBatchlogReplay().get();
// Ensure that the first half, and only the first half, got replayed.
assertEquals(50, BatchlogManager.instance.countAllBatches() - initialAllBatches);
assertEquals(50, BatchlogManager.instance.getTotalBatchesReplayed() - initialReplayedBatches);
for (int i = 0; i < 100; i++) {
String query = String.format("SELECT * FROM \"%s\".\"%s\" WHERE key = intAsBlob(%d)", KEYSPACE1, CF_STANDARD1, i);
UntypedResultSet result = executeInternal(query);
assertNotNull(result);
if (i < 50) {
Iterator<UntypedResultSet.Row> it = result.iterator();
assertNotNull(it);
for (int j = 0; j < 10; j++) {
assertTrue(it.hasNext());
UntypedResultSet.Row row = it.next();
assertEquals(ByteBufferUtil.bytes(i), row.getBytes("key"));
assertEquals("name" + j, row.getString("name"));
assertEquals("val" + j, row.getString("val"));
}
assertFalse(it.hasNext());
} else {
assertTrue(result.isEmpty());
}
}
// Ensure that no stray mutations got somehow applied.
UntypedResultSet result = executeInternal(String.format("SELECT count(*) FROM \"%s\".\"%s\"", KEYSPACE1, CF_STANDARD1));
assertNotNull(result);
assertEquals(500, result.one().getLong("count"));
}
use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class BatchlogManagerTest method testDelete.
@Test
public void testDelete() {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1);
TableMetadata cfm = cfs.metadata();
new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes("1234")).clustering("c").add("val", "val" + 1234).build().applyUnsafe();
DecoratedKey dk = cfs.decorateKey(ByteBufferUtil.bytes("1234"));
ImmutableBTreePartition results = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, dk).build());
Iterator<Row> iter = results.iterator();
assert iter.hasNext();
Mutation mutation = new Mutation(PartitionUpdate.fullPartitionDelete(cfm, dk, FBUtilities.timestampMicros(), FBUtilities.nowInSeconds()));
mutation.applyUnsafe();
Util.assertEmpty(Util.cmd(cfs, dk).build());
}
use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class SchemaLoader method insertData.
public static void insertData(String keyspace, String columnFamily, int offset, int numberOfRows) {
TableMetadata cfm = Schema.instance.getTableMetadata(keyspace, columnFamily);
for (int i = offset; i < offset + numberOfRows; i++) {
RowUpdateBuilder builder = new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes("key" + i));
if (cfm.clusteringColumns() != null && !cfm.clusteringColumns().isEmpty())
builder.clustering(ByteBufferUtil.bytes("col" + i)).add("val", ByteBufferUtil.bytes("val" + i));
else
builder.add("val", ByteBufferUtil.bytes("val" + i));
builder.build().apply();
}
}
use of org.apache.cassandra.db.RowUpdateBuilder in project cassandra by apache.
the class CommitLogSegmentManagerCDCTest method testSegmentFlaggingOnCreation.
@Test
public void testSegmentFlaggingOnCreation() throws Throwable {
CommitLogSegmentManagerCDC cdcMgr = (CommitLogSegmentManagerCDC) CommitLog.instance.segmentManager;
String ct = createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) WITH cdc=true;");
int origSize = DatabaseDescriptor.getCDCSpaceInMB();
try {
DatabaseDescriptor.setCDCSpaceInMB(16);
TableMetadata ccfm = Keyspace.open(keyspace()).getColumnFamilyStore(ct).metadata();
// Spin until we hit CDC capacity and make sure we get a WriteTimeout
try {
for (int i = 0; i < 1000; i++) {
new RowUpdateBuilder(ccfm, 0, i).add("data", randomizeBuffer(DatabaseDescriptor.getCommitLogSegmentSize() / 3)).build().apply();
}
Assert.fail("Expected WriteTimeoutException from full CDC but did not receive it.");
} catch (WriteTimeoutException e) {
}
expectCurrentCDCState(CDCState.FORBIDDEN);
CommitLog.instance.forceRecycleAllSegments();
cdcMgr.awaitManagementTasksCompletion();
new File(DatabaseDescriptor.getCDCLogLocation()).listFiles()[0].delete();
cdcMgr.updateCDCTotalSize();
// Confirm cdc update process changes flag on active segment
expectCurrentCDCState(CDCState.PERMITTED);
// Clear out archived CDC files
for (File f : new File(DatabaseDescriptor.getCDCLogLocation()).listFiles()) {
FileUtils.deleteWithConfirm(f);
}
// Set space to 0, confirm newly allocated segments are FORBIDDEN
DatabaseDescriptor.setCDCSpaceInMB(0);
CommitLog.instance.forceRecycleAllSegments();
CommitLog.instance.segmentManager.awaitManagementTasksCompletion();
expectCurrentCDCState(CDCState.FORBIDDEN);
} finally {
DatabaseDescriptor.setCDCSpaceInMB(origSize);
}
}
Aggregations