use of org.apache.cassandra.db.Mutation in project cassandra by apache.
the class MigrationTask method runMayThrow.
public void runMayThrow() throws Exception {
// a higher major.
if (!MigrationManager.shouldPullSchemaFrom(endpoint)) {
logger.info("Skipped sending a migration request: node {} has a higher major version now.", endpoint);
return;
}
if (!FailureDetector.instance.isAlive(endpoint)) {
logger.debug("Can't send schema pull request: node {} is down.", endpoint);
return;
}
MessageOut message = new MessageOut<>(MessagingService.Verb.MIGRATION_REQUEST, null, MigrationManager.MigrationsSerializer.instance);
final CountDownLatch completionLatch = new CountDownLatch(1);
IAsyncCallback<Collection<Mutation>> cb = new IAsyncCallback<Collection<Mutation>>() {
@Override
public void response(MessageIn<Collection<Mutation>> message) {
try {
Schema.instance.mergeAndAnnounceVersion(message.payload);
} catch (ConfigurationException e) {
logger.error("Configuration exception merging remote schema", e);
} finally {
completionLatch.countDown();
}
}
public boolean isLatencyForSnitch() {
return false;
}
};
// Only save the latches if we need bootstrap or are bootstrapping
if (monitoringBootstrapStates.contains(SystemKeyspace.getBootstrapState()))
inflightTasks.offer(completionLatch);
MessagingService.instance().sendRR(message, endpoint, cb);
}
use of org.apache.cassandra.db.Mutation in project cassandra by apache.
the class CommitLogReader method readMutation.
/**
* Deserializes and passes a Mutation to the ICommitLogReadHandler requested
*
* @param handler Handler that will take action based on deserialized Mutations
* @param inputBuffer raw byte array w/Mutation data
* @param size deserialized size of mutation
* @param minPosition We need to suppress replay of mutations that are before the required minPosition
* @param entryLocation filePointer offset of mutation within CommitLogSegment
* @param desc CommitLogDescriptor being worked on
*/
@VisibleForTesting
protected void readMutation(CommitLogReadHandler handler, byte[] inputBuffer, int size, CommitLogPosition minPosition, final int entryLocation, final CommitLogDescriptor desc) throws IOException {
// For now, we need to go through the motions of deserializing the mutation to determine its size and move
// the file pointer forward accordingly, even if we're behind the requested minPosition within this SyncSegment.
boolean shouldReplay = entryLocation > minPosition.position;
final Mutation mutation;
try (RebufferingInputStream bufIn = new DataInputBuffer(inputBuffer, 0, size)) {
mutation = Mutation.serializer.deserialize(bufIn, desc.getMessagingVersion(), SerializationHelper.Flag.LOCAL);
// doublecheck that what we read is still] valid for the current schema
for (PartitionUpdate upd : mutation.getPartitionUpdates()) upd.validate();
} catch (UnknownTableException ex) {
if (ex.id == null)
return;
AtomicInteger i = invalidMutations.get(ex.id);
if (i == null) {
i = new AtomicInteger(1);
invalidMutations.put(ex.id, i);
} else
i.incrementAndGet();
return;
} catch (Throwable t) {
JVMStabilityInspector.inspectThrowable(t);
File f = File.createTempFile("mutation", "dat");
try (DataOutputStream out = new DataOutputStream(new FileOutputStream(f))) {
out.write(inputBuffer, 0, size);
}
// Checksum passed so this error can't be permissible.
handler.handleUnrecoverableError(new CommitLogReadException(String.format("Unexpected error deserializing mutation; saved to %s. " + "This may be caused by replaying a mutation against a table with the same name but incompatible schema. " + "Exception follows: %s", f.getAbsolutePath(), t), CommitLogReadErrorReason.MUTATION_ERROR, false));
return;
}
if (logger.isTraceEnabled())
logger.trace("Read mutation for {}.{}: {}", mutation.getKeyspaceName(), mutation.key(), "{" + StringUtils.join(mutation.getPartitionUpdates().iterator(), ", ") + "}");
if (shouldReplay)
handler.handleMutation(mutation, size, entryLocation, desc);
}
use of org.apache.cassandra.db.Mutation in project cassandra by apache.
the class BatchlogManagerTest method testTruncatedReplay.
@Test
public void testTruncatedReplay() throws InterruptedException, ExecutionException {
TableMetadata cf2 = Schema.instance.getTableMetadata(KEYSPACE1, CF_STANDARD2);
TableMetadata cf3 = Schema.instance.getTableMetadata(KEYSPACE1, CF_STANDARD3);
// In the middle of the process, 'truncate' Standard2.
for (int i = 0; i < 1000; i++) {
Mutation mutation1 = new RowUpdateBuilder(cf2, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(i)).clustering("name" + i).add("val", "val" + i).build();
Mutation mutation2 = new RowUpdateBuilder(cf3, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(i)).clustering("name" + i).add("val", "val" + i).build();
List<Mutation> mutations = Lists.newArrayList(mutation1, mutation2);
// Make sure it's ready to be replayed, so adjust the timestamp.
long timestamp = System.currentTimeMillis() - BatchlogManager.getBatchlogTimeout();
if (i == 500)
SystemKeyspace.saveTruncationRecord(Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD2), timestamp, CommitLogPosition.NONE);
// Adjust the timestamp (slightly) to make the test deterministic.
if (i >= 500)
timestamp++;
else
timestamp--;
BatchlogManager.store(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i), FBUtilities.timestampMicros(), mutations));
}
// Flush the batchlog to disk (see CASSANDRA-6822).
Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush();
// Force batchlog replay and wait for it to complete.
BatchlogManager.instance.startBatchlogReplay().get();
// We should see half of Standard2-targeted mutations written after the replay and all of Standard3 mutations applied.
for (int i = 0; i < 1000; i++) {
UntypedResultSet result = executeInternal(String.format("SELECT * FROM \"%s\".\"%s\" WHERE key = intAsBlob(%d)", KEYSPACE1, CF_STANDARD2, i));
assertNotNull(result);
if (i >= 500) {
assertEquals(ByteBufferUtil.bytes(i), result.one().getBytes("key"));
assertEquals("name" + i, result.one().getString("name"));
assertEquals("val" + i, result.one().getString("val"));
} else {
assertTrue(result.isEmpty());
}
}
for (int i = 0; i < 1000; i++) {
UntypedResultSet result = executeInternal(String.format("SELECT * FROM \"%s\".\"%s\" WHERE key = intAsBlob(%d)", KEYSPACE1, CF_STANDARD3, i));
assertNotNull(result);
assertEquals(ByteBufferUtil.bytes(i), result.one().getBytes("key"));
assertEquals("name" + i, result.one().getString("name"));
assertEquals("val" + i, result.one().getString("val"));
}
}
use of org.apache.cassandra.db.Mutation in project cassandra by apache.
the class BatchlogManagerTest method testReplay.
@Test
@SuppressWarnings("deprecation")
public void testReplay() throws Exception {
long initialAllBatches = BatchlogManager.instance.countAllBatches();
long initialReplayedBatches = BatchlogManager.instance.getTotalBatchesReplayed();
TableMetadata cfm = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1).metadata();
// Half batches (50) ready to be replayed, half not.
for (int i = 0; i < 100; i++) {
List<Mutation> mutations = new ArrayList<>(10);
for (int j = 0; j < 10; j++) {
mutations.add(new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes(i)).clustering("name" + j).add("val", "val" + j).build());
}
long timestamp = i < 50 ? (System.currentTimeMillis() - BatchlogManager.getBatchlogTimeout()) : (System.currentTimeMillis() + BatchlogManager.getBatchlogTimeout());
BatchlogManager.store(Batch.createLocal(UUIDGen.getTimeUUID(timestamp, i), timestamp * 1000, mutations));
}
// Flush the batchlog to disk (see CASSANDRA-6822).
Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES).forceBlockingFlush();
assertEquals(100, BatchlogManager.instance.countAllBatches() - initialAllBatches);
assertEquals(0, BatchlogManager.instance.getTotalBatchesReplayed() - initialReplayedBatches);
// Force batchlog replay and wait for it to complete.
BatchlogManager.instance.startBatchlogReplay().get();
// Ensure that the first half, and only the first half, got replayed.
assertEquals(50, BatchlogManager.instance.countAllBatches() - initialAllBatches);
assertEquals(50, BatchlogManager.instance.getTotalBatchesReplayed() - initialReplayedBatches);
for (int i = 0; i < 100; i++) {
String query = String.format("SELECT * FROM \"%s\".\"%s\" WHERE key = intAsBlob(%d)", KEYSPACE1, CF_STANDARD1, i);
UntypedResultSet result = executeInternal(query);
assertNotNull(result);
if (i < 50) {
Iterator<UntypedResultSet.Row> it = result.iterator();
assertNotNull(it);
for (int j = 0; j < 10; j++) {
assertTrue(it.hasNext());
UntypedResultSet.Row row = it.next();
assertEquals(ByteBufferUtil.bytes(i), row.getBytes("key"));
assertEquals("name" + j, row.getString("name"));
assertEquals("val" + j, row.getString("val"));
}
assertFalse(it.hasNext());
} else {
assertTrue(result.isEmpty());
}
}
// Ensure that no stray mutations got somehow applied.
UntypedResultSet result = executeInternal(String.format("SELECT count(*) FROM \"%s\".\"%s\"", KEYSPACE1, CF_STANDARD1));
assertNotNull(result);
assertEquals(500, result.one().getLong("count"));
}
use of org.apache.cassandra.db.Mutation in project cassandra by apache.
the class BatchlogManagerTest method testDelete.
@Test
public void testDelete() {
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1);
TableMetadata cfm = cfs.metadata();
new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), ByteBufferUtil.bytes("1234")).clustering("c").add("val", "val" + 1234).build().applyUnsafe();
DecoratedKey dk = cfs.decorateKey(ByteBufferUtil.bytes("1234"));
ImmutableBTreePartition results = Util.getOnlyPartitionUnfiltered(Util.cmd(cfs, dk).build());
Iterator<Row> iter = results.iterator();
assert iter.hasNext();
Mutation mutation = new Mutation(PartitionUpdate.fullPartitionDelete(cfm, dk, FBUtilities.timestampMicros(), FBUtilities.nowInSeconds()));
mutation.applyUnsafe();
Util.assertEmpty(Util.cmd(cfs, dk).build());
}
Aggregations