Search in sources :

Example 16 with PartitionUpdate

use of org.apache.cassandra.db.partitions.PartitionUpdate in project cassandra by apache.

the class WriteCallbackInfoTest method testShouldHint.

private void testShouldHint(Verb verb, ConsistencyLevel cl, boolean allowHints, boolean expectHint) throws Exception {
    Object payload = verb == Verb.PAXOS_COMMIT ? new Commit(UUID.randomUUID(), new PartitionUpdate(MockSchema.newTableMetadata("", ""), ByteBufferUtil.EMPTY_BYTE_BUFFER, RegularAndStaticColumns.NONE, 1)) : new Mutation("", new BufferDecoratedKey(new Murmur3Partitioner.LongToken(0), ByteBufferUtil.EMPTY_BYTE_BUFFER));
    WriteCallbackInfo wcbi = new WriteCallbackInfo(InetAddress.getByName("192.168.1.1"), null, new MessageOut(verb, payload, null), null, cl, allowHints);
    Assert.assertEquals(expectHint, wcbi.shouldHint());
    if (expectHint) {
        Assert.assertNotNull(wcbi.mutation());
    } else {
        boolean fail = false;
        try {
            wcbi.mutation();
        } catch (Throwable t) {
            fail = true;
        }
        Assert.assertTrue(fail);
    }
}
Also used : Commit(org.apache.cassandra.service.paxos.Commit) BufferDecoratedKey(org.apache.cassandra.db.BufferDecoratedKey) Mutation(org.apache.cassandra.db.Mutation) Murmur3Partitioner(org.apache.cassandra.dht.Murmur3Partitioner) PartitionUpdate(org.apache.cassandra.db.partitions.PartitionUpdate)

Example 17 with PartitionUpdate

use of org.apache.cassandra.db.partitions.PartitionUpdate in project cassandra by apache.

the class PaxosStateTest method testCommittingAfterTruncation.

@Test
public void testCommittingAfterTruncation() throws Exception {
    ColumnFamilyStore cfs = Keyspace.open("PaxosStateTestKeyspace1").getColumnFamilyStore("Standard1");
    String key = "key" + System.nanoTime();
    ByteBuffer value = ByteBufferUtil.bytes(0);
    RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), FBUtilities.timestampMicros(), key);
    builder.clustering("a").add("val", value);
    PartitionUpdate update = Iterables.getOnlyElement(builder.build().getPartitionUpdates());
    // CFS should be empty initially
    assertNoDataPresent(cfs, Util.dk(key));
    // Commit the proposal & verify the data is present
    Commit beforeTruncate = newProposal(0, update);
    PaxosState.commit(beforeTruncate);
    assertDataPresent(cfs, Util.dk(key), "val", value);
    // Truncate then attempt to commit again, mutation should
    // be ignored as the proposal predates the truncation
    cfs.truncateBlocking();
    PaxosState.commit(beforeTruncate);
    assertNoDataPresent(cfs, Util.dk(key));
    // Now try again with a ballot created after the truncation
    long timestamp = SystemKeyspace.getTruncatedAt(update.metadata().id) + 1;
    Commit afterTruncate = newProposal(timestamp, update);
    PaxosState.commit(afterTruncate);
    assertDataPresent(cfs, Util.dk(key), "val", value);
}
Also used : Commit(org.apache.cassandra.service.paxos.Commit) ByteBuffer(java.nio.ByteBuffer) PartitionUpdate(org.apache.cassandra.db.partitions.PartitionUpdate) Test(org.junit.Test)

Example 18 with PartitionUpdate

use of org.apache.cassandra.db.partitions.PartitionUpdate in project cassandra by apache.

the class SchemaKeyspaceTest method checkInverses.

private static void checkInverses(TableMetadata metadata) throws Exception {
    KeyspaceMetadata keyspace = Schema.instance.getKeyspaceMetadata(metadata.keyspace);
    // Test schema conversion
    Mutation rm = SchemaKeyspace.makeCreateTableMutation(keyspace, metadata, FBUtilities.timestampMicros()).build();
    PartitionUpdate serializedCf = rm.getPartitionUpdate(Schema.instance.getTableMetadata(SchemaConstants.SCHEMA_KEYSPACE_NAME, SchemaKeyspace.TABLES));
    PartitionUpdate serializedCD = rm.getPartitionUpdate(Schema.instance.getTableMetadata(SchemaConstants.SCHEMA_KEYSPACE_NAME, SchemaKeyspace.COLUMNS));
    UntypedResultSet.Row tableRow = QueryProcessor.resultify(String.format("SELECT * FROM %s.%s", SchemaConstants.SCHEMA_KEYSPACE_NAME, SchemaKeyspace.TABLES), UnfilteredRowIterators.filter(serializedCf.unfilteredIterator(), FBUtilities.nowInSeconds())).one();
    TableParams params = SchemaKeyspace.createTableParamsFromRow(tableRow);
    UntypedResultSet columnsRows = QueryProcessor.resultify(String.format("SELECT * FROM %s.%s", SchemaConstants.SCHEMA_KEYSPACE_NAME, SchemaKeyspace.COLUMNS), UnfilteredRowIterators.filter(serializedCD.unfilteredIterator(), FBUtilities.nowInSeconds()));
    Set<ColumnMetadata> columns = new HashSet<>();
    for (UntypedResultSet.Row row : columnsRows) columns.add(SchemaKeyspace.createColumnFromRow(row, Types.none()));
    assertEquals(metadata.params, params);
    assertEquals(new HashSet<>(metadata.columns()), columns);
}
Also used : UntypedResultSet(org.apache.cassandra.cql3.UntypedResultSet) Mutation(org.apache.cassandra.db.Mutation) PartitionUpdate(org.apache.cassandra.db.partitions.PartitionUpdate) HashSet(java.util.HashSet)

Example 19 with PartitionUpdate

use of org.apache.cassandra.db.partitions.PartitionUpdate in project cassandra by apache.

the class RowTest method testExpiringColumnExpiration.

@Test
public void testExpiringColumnExpiration() throws IOException {
    int ttl = 1;
    ColumnMetadata def = metadata.getColumn(new ColumnIdentifier("a", true));
    Cell cell = BufferCell.expiring(def, 0, ttl, nowInSeconds, ((AbstractType) def.cellValueType()).decompose("a1"));
    PartitionUpdate update = PartitionUpdate.singleRowUpdate(metadata, dk, BTreeRow.singleCellRow(metadata.comparator.make("c1"), cell));
    new Mutation(update).applyUnsafe();
    // when we read with a nowInSeconds before the cell has expired,
    // the PartitionIterator includes the row we just wrote
    Row row = Util.getOnlyRow(Util.cmd(cfs, dk).includeRow("c1").withNowInSeconds(nowInSeconds).build());
    assertEquals("a1", ByteBufferUtil.string(row.getCell(def).value()));
    // when we read with a nowInSeconds after the cell has expired, the row is filtered
    // so the PartitionIterator is empty
    Util.assertEmpty(Util.cmd(cfs, dk).includeRow("c1").withNowInSeconds(nowInSeconds + ttl + 1).build());
}
Also used : ColumnMetadata(org.apache.cassandra.schema.ColumnMetadata) ColumnIdentifier(org.apache.cassandra.cql3.ColumnIdentifier) PartitionUpdate(org.apache.cassandra.db.partitions.PartitionUpdate) Test(org.junit.Test)

Example 20 with PartitionUpdate

use of org.apache.cassandra.db.partitions.PartitionUpdate in project cassandra by apache.

the class RowTest method testMergeRangeTombstones.

@Test
public void testMergeRangeTombstones() throws InterruptedException {
    PartitionUpdate update1 = new PartitionUpdate(metadata, dk, metadata.regularAndStaticColumns(), 1);
    writeRangeTombstone(update1, "1", "11", 123, 123);
    writeRangeTombstone(update1, "2", "22", 123, 123);
    writeRangeTombstone(update1, "3", "31", 123, 123);
    writeRangeTombstone(update1, "4", "41", 123, 123);
    PartitionUpdate update2 = new PartitionUpdate(metadata, dk, metadata.regularAndStaticColumns(), 1);
    writeRangeTombstone(update2, "1", "11", 123, 123);
    writeRangeTombstone(update2, "111", "112", 1230, 123);
    writeRangeTombstone(update2, "2", "24", 123, 123);
    writeRangeTombstone(update2, "3", "31", 1230, 123);
    writeRangeTombstone(update2, "4", "41", 123, 1230);
    writeRangeTombstone(update2, "5", "51", 123, 1230);
    try (UnfilteredRowIterator merged = UnfilteredRowIterators.merge(ImmutableList.of(update1.unfilteredIterator(), update2.unfilteredIterator()), nowInSeconds)) {
        Object[][] expected = new Object[][] { { "1", "11", 123l, 123 }, { "111", "112", 1230l, 123 }, { "2", "24", 123l, 123 }, { "3", "31", 1230l, 123 }, { "4", "41", 123l, 1230 }, { "5", "51", 123l, 1230 } };
        int i = 0;
        while (merged.hasNext()) {
            RangeTombstoneBoundMarker openMarker = (RangeTombstoneBoundMarker) merged.next();
            ClusteringBound openBound = openMarker.clustering();
            DeletionTime openDeletion = new DeletionTime(openMarker.deletionTime().markedForDeleteAt(), openMarker.deletionTime().localDeletionTime());
            RangeTombstoneBoundMarker closeMarker = (RangeTombstoneBoundMarker) merged.next();
            ClusteringBound closeBound = closeMarker.clustering();
            DeletionTime closeDeletion = new DeletionTime(closeMarker.deletionTime().markedForDeleteAt(), closeMarker.deletionTime().localDeletionTime());
            assertEquals(openDeletion, closeDeletion);
            assertRangeTombstoneMarkers(openBound, closeBound, openDeletion, expected[i++]);
        }
    }
}
Also used : PartitionUpdate(org.apache.cassandra.db.partitions.PartitionUpdate) Test(org.junit.Test)

Aggregations

PartitionUpdate (org.apache.cassandra.db.partitions.PartitionUpdate)40 Test (org.junit.Test)14 TableMetadata (org.apache.cassandra.schema.TableMetadata)7 ColumnIdentifier (org.apache.cassandra.cql3.ColumnIdentifier)3 Mutation (org.apache.cassandra.db.Mutation)3 ColumnMetadata (org.apache.cassandra.schema.ColumnMetadata)3 Commit (org.apache.cassandra.service.paxos.Commit)3 UntypedResultSet (org.apache.cassandra.cql3.UntypedResultSet)2 InvalidRequestException (org.apache.cassandra.exceptions.InvalidRequestException)2 TableId (org.apache.cassandra.schema.TableId)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 InetAddress (java.net.InetAddress)1 ByteBuffer (java.nio.ByteBuffer)1 Collections.emptyMap (java.util.Collections.emptyMap)1 Collections.singletonMap (java.util.Collections.singletonMap)1 HashSet (java.util.HashSet)1 UUID (java.util.UUID)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1