use of org.apache.cassandra.db.partitions.PartitionUpdate in project cassandra by apache.
the class SSTableSimpleWriter method getUpdateFor.
PartitionUpdate getUpdateFor(DecoratedKey key) throws IOException {
assert key != null;
// update for the new key.
if (!key.equals(currentKey)) {
if (update != null)
writePartition(update);
currentKey = key;
update = new PartitionUpdate(metadata.get(), currentKey, columns, 4);
}
assert update != null;
return update;
}
use of org.apache.cassandra.db.partitions.PartitionUpdate in project cassandra by apache.
the class TriggerExecutor method validateForSinglePartition.
private List<PartitionUpdate> validateForSinglePartition(TableId tableId, DecoratedKey key, Collection<Mutation> tmutations) throws InvalidRequestException {
validate(tmutations);
if (tmutations.size() == 1) {
List<PartitionUpdate> updates = Lists.newArrayList(Iterables.getOnlyElement(tmutations).getPartitionUpdates());
if (updates.size() > 1)
throw new InvalidRequestException("The updates generated by triggers are not all for the same partition");
validateSamePartition(tableId, key, Iterables.getOnlyElement(updates));
return updates;
}
ArrayList<PartitionUpdate> updates = new ArrayList<>(tmutations.size());
for (Mutation mutation : tmutations) {
for (PartitionUpdate update : mutation.getPartitionUpdates()) {
validateSamePartition(tableId, key, update);
updates.add(update);
}
}
return updates;
}
use of org.apache.cassandra.db.partitions.PartitionUpdate in project cassandra by apache.
the class CommitLogReaderTest method confirmReadOrder.
/**
* Since we have both table and non mixed into the CL, we ignore updates that aren't for the table the test handler
* is configured to check.
* @param handler
* @param offset integer offset of count we expect to see in record
*/
private void confirmReadOrder(TestCLRHandler handler, int offset) {
ColumnMetadata cd = currentTableMetadata().getColumn(new ColumnIdentifier("data", false));
int i = 0;
int j = 0;
while (i + j < handler.seenMutationCount()) {
PartitionUpdate pu = handler.seenMutations.get(i + j).get(currentTableMetadata());
if (pu == null) {
j++;
continue;
}
for (Row r : pu) {
String expected = Integer.toString(i + offset);
String seen = new String(r.getCell(cd).value().array());
if (!expected.equals(seen))
Assert.fail("Mismatch at index: " + i + ". Offset: " + offset + " Expected: " + expected + " Seen: " + seen);
}
i++;
}
}
use of org.apache.cassandra.db.partitions.PartitionUpdate in project cassandra by apache.
the class RowTest method testResolve.
@Test
public void testResolve() {
ColumnMetadata defA = metadata.getColumn(new ColumnIdentifier("a", true));
ColumnMetadata defB = metadata.getColumn(new ColumnIdentifier("b", true));
Row.Builder builder = BTreeRow.unsortedBuilder(nowInSeconds);
builder.newRow(metadata.comparator.make("c1"));
writeSimpleCellValue(builder, defA, "a1", 0);
writeSimpleCellValue(builder, defA, "a2", 1);
writeSimpleCellValue(builder, defB, "b1", 1);
Row row = builder.build();
PartitionUpdate update = PartitionUpdate.singleRowUpdate(metadata, dk, row);
Unfiltered unfiltered = update.unfilteredIterator().next();
assertTrue(unfiltered.kind() == Unfiltered.Kind.ROW);
row = (Row) unfiltered;
assertEquals("a2", defA.cellValueType().getString(row.getCell(defA).value()));
assertEquals("b1", defB.cellValueType().getString(row.getCell(defB).value()));
assertEquals(2, row.columns().size());
}
use of org.apache.cassandra.db.partitions.PartitionUpdate in project cassandra by apache.
the class ScrubTest method testScrubOutOfOrder.
@Test
public void testScrubOutOfOrder() throws Exception {
// This test assumes ByteOrderPartitioner to create out-of-order SSTable
IPartitioner oldPartitioner = DatabaseDescriptor.getPartitioner();
DatabaseDescriptor.setPartitionerUnsafe(new ByteOrderedPartitioner());
// Create out-of-order SSTable
File tempDir = File.createTempFile("ScrubTest.testScrubOutOfOrder", "").getParentFile();
// create ks/cf directory
File tempDataDir = new File(tempDir, String.join(File.separator, KEYSPACE, CF3));
tempDataDir.mkdirs();
try {
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE);
String columnFamily = CF3;
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(columnFamily);
cfs.clearUnsafe();
List<String> keys = Arrays.asList("t", "a", "b", "z", "c", "y", "d");
Descriptor desc = cfs.newSSTableDescriptor(tempDataDir);
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
try (SSTableTxnWriter writer = new SSTableTxnWriter(txn, createTestWriter(desc, (long) keys.size(), cfs.metadata, txn))) {
for (String k : keys) {
PartitionUpdate update = UpdateBuilder.create(cfs.metadata(), Util.dk(k)).newRow("someName").add("val", "someValue").build();
writer.append(update.unfilteredIterator());
}
writer.finish(false);
}
try {
SSTableReader.open(desc, cfs.metadata);
fail("SSTR validation should have caught the out-of-order rows");
} catch (IllegalStateException ise) {
/* this is expected */
}
// open without validation for scrubbing
Set<Component> components = new HashSet<>();
if (new File(desc.filenameFor(Component.COMPRESSION_INFO)).exists())
components.add(Component.COMPRESSION_INFO);
components.add(Component.DATA);
components.add(Component.PRIMARY_INDEX);
components.add(Component.FILTER);
components.add(Component.STATS);
components.add(Component.SUMMARY);
components.add(Component.TOC);
SSTableReader sstable = SSTableReader.openNoValidation(desc, components, cfs);
if (sstable.last.compareTo(sstable.first) < 0)
sstable.last = sstable.first;
try (LifecycleTransaction scrubTxn = LifecycleTransaction.offline(OperationType.SCRUB, sstable);
Scrubber scrubber = new Scrubber(cfs, scrubTxn, false, true)) {
scrubber.scrub();
}
LifecycleTransaction.waitForDeletions();
cfs.loadNewSSTables();
assertOrderedAll(cfs, 7);
} finally {
FileUtils.deleteRecursive(tempDataDir);
// reset partitioner
DatabaseDescriptor.setPartitionerUnsafe(oldPartitioner);
}
}
Aggregations