Search in sources :

Example 1 with DataInputPlus

use of org.apache.cassandra.io.util.DataInputPlus in project cassandra by apache.

the class IncomingTcpConnection method receiveMessages.

// Not closing constructed DataInputPlus's as the stream needs to remain open.
@SuppressWarnings("resource")
private void receiveMessages() throws IOException {
    // handshake (true) endpoint versions
    DataOutputStream out = new DataOutputStream(socket.getOutputStream());
    // if this version is < the MS version the other node is trying
    // to connect with, the other node will disconnect
    out.writeInt(MessagingService.current_version);
    out.flush();
    DataInputPlus in = new DataInputStreamPlus(socket.getInputStream());
    int maxVersion = in.readInt();
    // outbound side will reconnect if necessary to upgrade version
    assert version <= MessagingService.current_version;
    from = CompactEndpointSerializationHelper.deserialize(in);
    // record the (true) version of the endpoint
    MessagingService.instance().setVersion(from, maxVersion);
    logger.trace("Set version for {} to {} (will use {})", from, maxVersion, MessagingService.instance().getVersion(from));
    if (compressed) {
        logger.trace("Upgrading incoming connection to be compressed");
        LZ4FastDecompressor decompressor = LZ4Factory.fastestInstance().fastDecompressor();
        Checksum checksum = XXHashFactory.fastestInstance().newStreamingHash32(OutboundTcpConnection.LZ4_HASH_SEED).asChecksum();
        in = new DataInputStreamPlus(new LZ4BlockInputStream(socket.getInputStream(), decompressor, checksum));
    } else {
        ReadableByteChannel channel = socket.getChannel();
        in = new NIODataInputStream(channel != null ? channel : Channels.newChannel(socket.getInputStream()), BUFFER_SIZE);
    }
    while (true) {
        MessagingService.validateMagic(in.readInt());
        receiveMessage(in, version);
    }
}
Also used : ReadableByteChannel(java.nio.channels.ReadableByteChannel) LZ4FastDecompressor(net.jpountz.lz4.LZ4FastDecompressor) Checksum(java.util.zip.Checksum) DataInputStreamPlus(org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus) DataInputPlus(org.apache.cassandra.io.util.DataInputPlus) LZ4BlockInputStream(net.jpountz.lz4.LZ4BlockInputStream) NIODataInputStream(org.apache.cassandra.io.util.NIODataInputStream)

Example 2 with DataInputPlus

use of org.apache.cassandra.io.util.DataInputPlus in project cassandra by apache.

the class EmptyTypeTest method read.

@Test
public void read() {
    DataInputPlus input = Mockito.mock(DataInputPlus.class);
    ByteBuffer buffer = EmptyType.instance.readBuffer(input);
    assertThat(buffer).isNotNull().matches(b -> !b.hasRemaining());
    buffer = EmptyType.instance.readBuffer(input, 42);
    assertThat(buffer).isNotNull().matches(b -> !b.hasRemaining());
    Mockito.verifyNoInteractions(input);
}
Also used : DataInputPlus(org.apache.cassandra.io.util.DataInputPlus) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 3 with DataInputPlus

use of org.apache.cassandra.io.util.DataInputPlus in project cassandra by apache.

the class SinglePartitionSliceCommandTest method staticColumnsAreReturned.

@Test
public void staticColumnsAreReturned() throws IOException {
    DecoratedKey key = metadata.partitioner.decorateKey(ByteBufferUtil.bytes("k1"));
    QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, s) VALUES ('k1', 's')");
    Assert.assertFalse(QueryProcessor.executeInternal("SELECT s FROM ks.tbl WHERE k='k1'").isEmpty());
    ColumnFilter columnFilter = ColumnFilter.selection(RegularAndStaticColumns.of(s));
    ClusteringIndexSliceFilter sliceFilter = new ClusteringIndexSliceFilter(Slices.NONE, false);
    ReadCommand cmd = SinglePartitionReadCommand.create(metadata, FBUtilities.nowInSeconds(), columnFilter, RowFilter.NONE, DataLimits.NONE, key, sliceFilter);
    // check raw iterator for static cell
    try (ReadExecutionController executionController = cmd.executionController();
        UnfilteredPartitionIterator pi = cmd.executeLocally(executionController)) {
        checkForS(pi);
    }
    ReadResponse response;
    DataOutputBuffer out;
    DataInputPlus in;
    ReadResponse dst;
    // check (de)serialized iterator for memtable static cell
    try (ReadExecutionController executionController = cmd.executionController();
        UnfilteredPartitionIterator pi = cmd.executeLocally(executionController)) {
        response = ReadResponse.createDataResponse(pi, cmd, executionController.getRepairedDataInfo());
    }
    out = new DataOutputBuffer((int) ReadResponse.serializer.serializedSize(response, MessagingService.VERSION_30));
    ReadResponse.serializer.serialize(response, out, MessagingService.VERSION_30);
    in = new DataInputBuffer(out.buffer(), true);
    dst = ReadResponse.serializer.deserialize(in, MessagingService.VERSION_30);
    try (UnfilteredPartitionIterator pi = dst.makeIterator(cmd)) {
        checkForS(pi);
    }
    // check (de)serialized iterator for sstable static cell
    Schema.instance.getColumnFamilyStoreInstance(metadata.id).forceBlockingFlush();
    try (ReadExecutionController executionController = cmd.executionController();
        UnfilteredPartitionIterator pi = cmd.executeLocally(executionController)) {
        response = ReadResponse.createDataResponse(pi, cmd, executionController.getRepairedDataInfo());
    }
    out = new DataOutputBuffer((int) ReadResponse.serializer.serializedSize(response, MessagingService.VERSION_30));
    ReadResponse.serializer.serialize(response, out, MessagingService.VERSION_30);
    in = new DataInputBuffer(out.buffer(), true);
    dst = ReadResponse.serializer.deserialize(in, MessagingService.VERSION_30);
    try (UnfilteredPartitionIterator pi = dst.makeIterator(cmd)) {
        checkForS(pi);
    }
}
Also used : ClusteringIndexSliceFilter(org.apache.cassandra.db.filter.ClusteringIndexSliceFilter) DataInputBuffer(org.apache.cassandra.io.util.DataInputBuffer) DataOutputBuffer(org.apache.cassandra.io.util.DataOutputBuffer) UnfilteredPartitionIterator(org.apache.cassandra.db.partitions.UnfilteredPartitionIterator) DataInputPlus(org.apache.cassandra.io.util.DataInputPlus) ColumnFilter(org.apache.cassandra.db.filter.ColumnFilter) Test(org.junit.Test)

Example 4 with DataInputPlus

use of org.apache.cassandra.io.util.DataInputPlus in project cassandra by apache.

the class CassandraEntireSSTableStreamReader method read.

/**
 * @param in where this reads data from
 * @return SSTable transferred
 * @throws IOException if reading the remote sstable fails. Will throw an RTE if local write fails.
 */
// input needs to remain open, streams on top of it can't be closed
@SuppressWarnings("resource")
@Override
public SSTableMultiWriter read(DataInputPlus in) throws Throwable {
    ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(tableId);
    if (cfs == null) {
        // schema was dropped during streaming
        throw new IOException("Table " + tableId + " was dropped during streaming");
    }
    ComponentManifest manifest = header.componentManifest;
    long totalSize = manifest.totalSize();
    logger.debug("[Stream #{}] Started receiving sstable #{} from {}, size = {}, table = {}", session.planId(), fileSequenceNumber, session.peer, prettyPrintMemory(totalSize), cfs.metadata());
    BigTableZeroCopyWriter writer = null;
    try {
        writer = createWriter(cfs, totalSize, manifest.components());
        long bytesRead = 0;
        for (Component component : manifest.components()) {
            long length = manifest.sizeOf(component);
            logger.debug("[Stream #{}] Started receiving {} component from {}, componentSize = {}, readBytes = {}, totalSize = {}", session.planId(), component, session.peer, prettyPrintMemory(length), prettyPrintMemory(bytesRead), prettyPrintMemory(totalSize));
            writer.writeComponent(component.type, in, length);
            session.progress(writer.descriptor.filenameFor(component), ProgressInfo.Direction.IN, length, length);
            bytesRead += length;
            logger.debug("[Stream #{}] Finished receiving {} component from {}, componentSize = {}, readBytes = {}, totalSize = {}", session.planId(), component, session.peer, prettyPrintMemory(length), prettyPrintMemory(bytesRead), prettyPrintMemory(totalSize));
        }
        UnaryOperator<StatsMetadata> transform = stats -> stats.mutateLevel(header.sstableLevel).mutateRepairedMetadata(messageHeader.repairedAt, messageHeader.pendingRepair, false);
        String description = String.format("level %s and repairedAt time %s and pendingRepair %s", header.sstableLevel, messageHeader.repairedAt, messageHeader.pendingRepair);
        writer.descriptor.getMetadataSerializer().mutate(writer.descriptor, description, transform);
        return writer;
    } catch (Throwable e) {
        logger.error("[Stream {}] Error while reading sstable from stream for table = {}", session.planId(), cfs.metadata(), e);
        if (writer != null)
            e = writer.abort(e);
        throw e;
    }
}
Also used : StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) BigTableZeroCopyWriter(org.apache.cassandra.io.sstable.format.big.BigTableZeroCopyWriter) Logger(org.slf4j.Logger) ProgressInfo(org.apache.cassandra.streaming.ProgressInfo) TableId(org.apache.cassandra.schema.TableId) StreamReceiver(org.apache.cassandra.streaming.StreamReceiver) Collection(java.util.Collection) LoggerFactory(org.slf4j.LoggerFactory) File(org.apache.cassandra.io.util.File) IOException(java.io.IOException) UnaryOperator(java.util.function.UnaryOperator) FBUtilities.prettyPrintMemory(org.apache.cassandra.utils.FBUtilities.prettyPrintMemory) String.format(java.lang.String.format) StreamMessageHeader(org.apache.cassandra.streaming.messages.StreamMessageHeader) SSTableMultiWriter(org.apache.cassandra.io.sstable.SSTableMultiWriter) StreamSession(org.apache.cassandra.streaming.StreamSession) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) LifecycleNewTracker(org.apache.cassandra.db.lifecycle.LifecycleNewTracker) Component(org.apache.cassandra.io.sstable.Component) Descriptor(org.apache.cassandra.io.sstable.Descriptor) SSTableFormat(org.apache.cassandra.io.sstable.format.SSTableFormat) DataInputPlus(org.apache.cassandra.io.util.DataInputPlus) StatsMetadata(org.apache.cassandra.io.sstable.metadata.StatsMetadata) Directories(org.apache.cassandra.db.Directories) BigTableZeroCopyWriter(org.apache.cassandra.io.sstable.format.big.BigTableZeroCopyWriter) ColumnFamilyStore(org.apache.cassandra.db.ColumnFamilyStore) IOException(java.io.IOException) Component(org.apache.cassandra.io.sstable.Component)

Example 5 with DataInputPlus

use of org.apache.cassandra.io.util.DataInputPlus in project cassandra by apache.

the class BytesReadTrackerTest method internalTestSkipBytesAndReadFully.

public void internalTestSkipBytesAndReadFully(boolean inputStream) throws Exception {
    String testStr = "1234567890";
    byte[] testData = testStr.getBytes();
    DataInputPlus.DataInputStreamPlus in = new DataInputPlus.DataInputStreamPlus(new ByteArrayInputStream(testData));
    BytesReadTracker tracker = inputStream ? new TrackedInputStream(in) : new TrackedDataInputPlus(in);
    DataInputPlus reader = inputStream ? new DataInputPlus.DataInputStreamPlus((TrackedInputStream) tracker) : (DataInputPlus) tracker;
    try {
        // read first 5 bytes
        byte[] out = new byte[5];
        reader.readFully(out, 0, 5);
        assertEquals("12345", new String(out));
        assertEquals(5, tracker.getBytesRead());
        // then skip 2 bytes
        reader.skipBytes(2);
        assertEquals(7, tracker.getBytesRead());
        // and read the rest
        out = new byte[3];
        reader.readFully(out);
        assertEquals("890", new String(out));
        assertEquals(10, tracker.getBytesRead());
        assertEquals(testData.length, tracker.getBytesRead());
    } finally {
        in.close();
    }
}
Also used : BytesReadTracker(org.apache.cassandra.io.util.BytesReadTracker) ByteArrayInputStream(java.io.ByteArrayInputStream) TrackedInputStream(org.apache.cassandra.io.util.TrackedInputStream) TrackedDataInputPlus(org.apache.cassandra.io.util.TrackedDataInputPlus) DataInputPlus(org.apache.cassandra.io.util.DataInputPlus) TrackedDataInputPlus(org.apache.cassandra.io.util.TrackedDataInputPlus)

Aggregations

DataInputPlus (org.apache.cassandra.io.util.DataInputPlus)28 Test (org.junit.Test)15 DataInputBuffer (org.apache.cassandra.io.util.DataInputBuffer)13 DataOutputBuffer (org.apache.cassandra.io.util.DataOutputBuffer)11 IOException (java.io.IOException)9 DataOutputPlus (org.apache.cassandra.io.util.DataOutputPlus)8 ByteBuffer (java.nio.ByteBuffer)6 RowUpdateBuilder (org.apache.cassandra.db.RowUpdateBuilder)5 ByteArrayInputStream (java.io.ByteArrayInputStream)4 ArrayList (java.util.ArrayList)4 UUID (java.util.UUID)4 CountDownLatch (java.util.concurrent.CountDownLatch)4 Mutation (org.apache.cassandra.db.Mutation)4 BytesReadTracker (org.apache.cassandra.io.util.BytesReadTracker)4 TrackedDataInputPlus (org.apache.cassandra.io.util.TrackedDataInputPlus)4 TrackedInputStream (org.apache.cassandra.io.util.TrackedInputStream)4 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)3 TableMetadata (org.apache.cassandra.schema.TableMetadata)3 ByteBuf (io.netty.buffer.ByteBuf)2 ChannelHandlerContext (io.netty.channel.ChannelHandlerContext)2