Search in sources :

Example 6 with ReadCommand

use of org.apache.cassandra.db.ReadCommand in project cassandra by apache.

the class AbstractReadExecutor method makeRequests.

private void makeRequests(ReadCommand readCommand, Iterable<Replica> replicas) {
    boolean hasLocalEndpoint = false;
    Message<ReadCommand> message = null;
    for (Replica replica : replicas) {
        assert replica.isFull() || readCommand.acceptsTransient();
        InetAddressAndPort endpoint = replica.endpoint();
        if (replica.isSelf()) {
            hasLocalEndpoint = true;
            continue;
        }
        if (traceState != null)
            traceState.trace("reading {} from {}", readCommand.isDigestQuery() ? "digest" : "data", endpoint);
        if (null == message)
            message = readCommand.createMessage(false);
        MessagingService.instance().sendWithCallback(message, endpoint, handler);
    }
    // We delay the local (potentially blocking) read till the end to avoid stalling remote requests.
    if (hasLocalEndpoint) {
        logger.trace("reading {} locally", readCommand.isDigestQuery() ? "digest" : "data");
        Stage.READ.maybeExecuteImmediately(new LocalReadRunnable(readCommand, handler));
    }
}
Also used : InetAddressAndPort(org.apache.cassandra.locator.InetAddressAndPort) ReadCommand(org.apache.cassandra.db.ReadCommand) SinglePartitionReadCommand(org.apache.cassandra.db.SinglePartitionReadCommand) LocalReadRunnable(org.apache.cassandra.service.StorageProxy.LocalReadRunnable) Replica(org.apache.cassandra.locator.Replica)

Example 7 with ReadCommand

use of org.apache.cassandra.db.ReadCommand in project cassandra by apache.

the class AbstractReadRepair method sendReadCommand.

void sendReadCommand(Replica to, ReadCallback<E, P> readCallback, boolean speculative, boolean trackRepairedStatus) {
    ReadCommand command = this.command;
    if (to.isSelf()) {
        Stage.READ.maybeExecuteImmediately(new StorageProxy.LocalReadRunnable(command, readCallback, trackRepairedStatus));
        return;
    }
    if (to.isTransient()) {
        // It's OK to send queries to transient nodes during RR, as we may have contacted them for their data request initially
        // So long as we don't use these to generate repair mutations, we're fine, and this is enforced by requiring
        // ReadOnlyReadRepair for transient keyspaces.
        command = command.copyAsTransientQuery(to);
    }
    if (Tracing.isTracing()) {
        String type;
        if (speculative)
            type = to.isFull() ? "speculative full" : "speculative transient";
        else
            type = to.isFull() ? "full" : "transient";
        Tracing.trace("Enqueuing {} data read to {}", type, to);
    }
    Message<ReadCommand> message = command.createMessage(trackRepairedStatus && to.isFull());
    MessagingService.instance().sendWithCallback(message, to.endpoint(), readCallback);
}
Also used : StorageProxy(org.apache.cassandra.service.StorageProxy) ReadCommand(org.apache.cassandra.db.ReadCommand) SinglePartitionReadCommand(org.apache.cassandra.db.SinglePartitionReadCommand)

Example 8 with ReadCommand

use of org.apache.cassandra.db.ReadCommand in project cassandra by apache.

the class CoordinatorWarnings method done.

public static void done() {
    Map<ReadCommand, WarningsSnapshot> map = readonly();
    logger.trace("CoordinatorTrackWarnings.done() with state {}", map);
    map.forEach((command, merged) -> {
        ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(command.metadata().id);
        // race condition when dropping tables, also happens in unit tests as Schema may be bypassed
        if (cfs == null)
            return;
        String cql = command.toCQLString();
        String loggableTokens = command.loggableTokens();
        recordAborts(merged.tombstones, cql, loggableTokens, cfs.metric.clientTombstoneAborts, WarningsSnapshot::tombstoneAbortMessage);
        recordWarnings(merged.tombstones, cql, loggableTokens, cfs.metric.clientTombstoneWarnings, WarningsSnapshot::tombstoneWarnMessage);
        recordAborts(merged.localReadSize, cql, loggableTokens, cfs.metric.localReadSizeAborts, WarningsSnapshot::localReadSizeAbortMessage);
        recordWarnings(merged.localReadSize, cql, loggableTokens, cfs.metric.localReadSizeWarnings, WarningsSnapshot::localReadSizeWarnMessage);
        recordAborts(merged.rowIndexTooSize, cql, loggableTokens, cfs.metric.rowIndexSizeAborts, WarningsSnapshot::rowIndexSizeAbortMessage);
        recordWarnings(merged.rowIndexTooSize, cql, loggableTokens, cfs.metric.rowIndexSizeWarnings, WarningsSnapshot::rowIndexSizeWarnMessage);
    });
    // reset the state to block from double publishing
    clearState();
}
Also used : ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) ReadCommand(org.apache.cassandra.db.ReadCommand)

Example 9 with ReadCommand

use of org.apache.cassandra.db.ReadCommand in project cassandra by apache.

the class DataResolverTest method testResolveComplexDelete.

@Test
public void testResolveComplexDelete() {
    EndpointsForRange replicas = makeReplicas(2);
    ReadCommand cmd = Util.cmd(cfs2, dk).withNowInSeconds(nowInSec).build();
    TestableReadRepair readRepair = new TestableReadRepair(cmd);
    DataResolver resolver = new DataResolver(cmd, plan(replicas, ALL), readRepair, nanoTime());
    long[] ts = { 100, 200 };
    Row.Builder builder = BTreeRow.unsortedBuilder();
    builder.newRow(Clustering.EMPTY);
    builder.addComplexDeletion(m, new DeletionTime(ts[0] - 1, nowInSec));
    builder.addCell(mapCell(0, 0, ts[0]));
    InetAddressAndPort peer1 = replicas.get(0).endpoint();
    resolver.preprocess(response(cmd, peer1, iter(PartitionUpdate.singleRowUpdate(cfm2, dk, builder.build()))));
    builder.newRow(Clustering.EMPTY);
    DeletionTime expectedCmplxDelete = new DeletionTime(ts[1] - 1, nowInSec);
    builder.addComplexDeletion(m, expectedCmplxDelete);
    Cell<?> expectedCell = mapCell(1, 1, ts[1]);
    builder.addCell(expectedCell);
    InetAddressAndPort peer2 = replicas.get(1).endpoint();
    resolver.preprocess(response(cmd, peer2, iter(PartitionUpdate.singleRowUpdate(cfm2, dk, builder.build()))));
    try (PartitionIterator data = resolver.resolve()) {
        try (RowIterator rows = Iterators.getOnlyElement(data)) {
            Row row = Iterators.getOnlyElement(rows);
            assertColumns(row, "m");
            Assert.assertNull(row.getCell(m, CellPath.create(bb(0))));
            Assert.assertNotNull(row.getCell(m, CellPath.create(bb(1))));
        }
    }
    Mutation mutation = readRepair.getForEndpoint(peer1);
    Iterator<Row> rowIter = mutation.getPartitionUpdate(cfm2).iterator();
    assertTrue(rowIter.hasNext());
    Row row = rowIter.next();
    assertFalse(rowIter.hasNext());
    ComplexColumnData cd = row.getComplexColumnData(m);
    assertEquals(Collections.singleton(expectedCell), Sets.newHashSet(cd));
    assertEquals(expectedCmplxDelete, cd.complexDeletion());
    Assert.assertNull(readRepair.sent.get(peer2));
}
Also used : InetAddressAndPort(org.apache.cassandra.locator.InetAddressAndPort) TestableReadRepair(org.apache.cassandra.service.reads.repair.TestableReadRepair) DeletionTime(org.apache.cassandra.db.DeletionTime) ReadCommand(org.apache.cassandra.db.ReadCommand) ComplexColumnData(org.apache.cassandra.db.rows.ComplexColumnData) UnfilteredPartitionIterator(org.apache.cassandra.db.partitions.UnfilteredPartitionIterator) PartitionIterator(org.apache.cassandra.db.partitions.PartitionIterator) RowIterator(org.apache.cassandra.db.rows.RowIterator) EndpointsForRange(org.apache.cassandra.locator.EndpointsForRange) BTreeRow(org.apache.cassandra.db.rows.BTreeRow) Row(org.apache.cassandra.db.rows.Row) Mutation(org.apache.cassandra.db.Mutation) Test(org.junit.Test)

Example 10 with ReadCommand

use of org.apache.cassandra.db.ReadCommand in project cassandra by apache.

the class DataResolverTest method testResolveNewCollectionOverwritingDeleted.

@Test
public void testResolveNewCollectionOverwritingDeleted() {
    EndpointsForRange replicas = makeReplicas(2);
    ReadCommand cmd = Util.cmd(cfs2, dk).withNowInSeconds(nowInSec).build();
    TestableReadRepair readRepair = new TestableReadRepair(cmd);
    DataResolver resolver = new DataResolver(cmd, plan(replicas, ALL), readRepair, nanoTime());
    long[] ts = { 100, 200 };
    // cleared map column
    Row.Builder builder = BTreeRow.unsortedBuilder();
    builder.newRow(Clustering.EMPTY);
    builder.addComplexDeletion(m, new DeletionTime(ts[0] - 1, nowInSec));
    InetAddressAndPort peer1 = replicas.get(0).endpoint();
    resolver.preprocess(response(cmd, peer1, iter(PartitionUpdate.singleRowUpdate(cfm2, dk, builder.build()))));
    // newer, overwritten map column
    builder.newRow(Clustering.EMPTY);
    DeletionTime expectedCmplxDelete = new DeletionTime(ts[1] - 1, nowInSec);
    builder.addComplexDeletion(m, expectedCmplxDelete);
    Cell<?> expectedCell = mapCell(1, 1, ts[1]);
    builder.addCell(expectedCell);
    InetAddressAndPort peer2 = replicas.get(1).endpoint();
    resolver.preprocess(response(cmd, peer2, iter(PartitionUpdate.singleRowUpdate(cfm2, dk, builder.build()))));
    try (PartitionIterator data = resolver.resolve()) {
        try (RowIterator rows = Iterators.getOnlyElement(data)) {
            Row row = Iterators.getOnlyElement(rows);
            assertColumns(row, "m");
            ComplexColumnData cd = row.getComplexColumnData(m);
            assertEquals(Collections.singleton(expectedCell), Sets.newHashSet(cd));
        }
    }
    Row row = Iterators.getOnlyElement(readRepair.getForEndpoint(peer1).getPartitionUpdate(cfm2).iterator());
    ComplexColumnData cd = row.getComplexColumnData(m);
    assertEquals(Collections.singleton(expectedCell), Sets.newHashSet(cd));
    assertEquals(expectedCmplxDelete, cd.complexDeletion());
    Assert.assertNull(readRepair.sent.get(peer2));
}
Also used : InetAddressAndPort(org.apache.cassandra.locator.InetAddressAndPort) TestableReadRepair(org.apache.cassandra.service.reads.repair.TestableReadRepair) DeletionTime(org.apache.cassandra.db.DeletionTime) ReadCommand(org.apache.cassandra.db.ReadCommand) ComplexColumnData(org.apache.cassandra.db.rows.ComplexColumnData) UnfilteredPartitionIterator(org.apache.cassandra.db.partitions.UnfilteredPartitionIterator) PartitionIterator(org.apache.cassandra.db.partitions.PartitionIterator) RowIterator(org.apache.cassandra.db.rows.RowIterator) EndpointsForRange(org.apache.cassandra.locator.EndpointsForRange) BTreeRow(org.apache.cassandra.db.rows.BTreeRow) Row(org.apache.cassandra.db.rows.Row) Test(org.junit.Test)

Aggregations

ReadCommand (org.apache.cassandra.db.ReadCommand)13 UnfilteredPartitionIterator (org.apache.cassandra.db.partitions.UnfilteredPartitionIterator)6 PartitionIterator (org.apache.cassandra.db.partitions.PartitionIterator)5 EndpointsForRange (org.apache.cassandra.locator.EndpointsForRange)5 InetAddressAndPort (org.apache.cassandra.locator.InetAddressAndPort)5 Test (org.junit.Test)5 DeletionTime (org.apache.cassandra.db.DeletionTime)4 SinglePartitionReadCommand (org.apache.cassandra.db.SinglePartitionReadCommand)4 BTreeRow (org.apache.cassandra.db.rows.BTreeRow)4 ComplexColumnData (org.apache.cassandra.db.rows.ComplexColumnData)4 Row (org.apache.cassandra.db.rows.Row)4 TestableReadRepair (org.apache.cassandra.service.reads.repair.TestableReadRepair)4 Mutation (org.apache.cassandra.db.Mutation)3 RowIterator (org.apache.cassandra.db.rows.RowIterator)3 ColumnFamilyStore (org.apache.cassandra.db.ColumnFamilyStore)2 PartitionRangeReadCommand (org.apache.cassandra.db.PartitionRangeReadCommand)2 Replica (org.apache.cassandra.locator.Replica)2 StorageProxy (org.apache.cassandra.service.StorageProxy)2 LocalReadRunnable (org.apache.cassandra.service.StorageProxy.LocalReadRunnable)2 InetAddress (java.net.InetAddress)1