Search in sources :

Example 1 with ByteBufferOutputStream

use of com.github.ambry.utils.ByteBufferOutputStream in project ambry by linkedin.

the class HardDeleteRecoveryMetadata method getBlobPropertiesRecord.

private BlobProperties getBlobPropertiesRecord(MessageReadSet readSet, int readSetIndex, long relativeOffset, long blobPropertiesSize) throws MessageFormatException, IOException {
    /* Read the field from the channel */
    ByteBuffer blobProperties = ByteBuffer.allocate((int) blobPropertiesSize);
    readSet.writeTo(readSetIndex, Channels.newChannel(new ByteBufferOutputStream(blobProperties)), relativeOffset, blobPropertiesSize);
    blobProperties.flip();
    return deserializeBlobProperties(new ByteBufferInputStream(blobProperties));
}
Also used : ByteBufferOutputStream(com.github.ambry.utils.ByteBufferOutputStream) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) ByteBuffer(java.nio.ByteBuffer)

Example 2 with ByteBufferOutputStream

use of com.github.ambry.utils.ByteBufferOutputStream in project ambry by linkedin.

the class BlobStoreTest method checkStoreInfo.

/**
 * Verifies the provided {@code storeInfo} for correctness of the {@link MessageInfo}. Also reads the blob as
 * described by the {@link MessageReadSet} inside {@code storeInfo} to verify that it matches the reference.
 * @param storeInfo the {@link StoreInfo} to verify.
 * @param expectedKeys all the {@link MockId}s that are expected to be found in {@code storeInfo}.
 * @throws IOException
 */
private void checkStoreInfo(StoreInfo storeInfo, Set<MockId> expectedKeys) throws IOException {
    List<MessageInfo> messageInfos = storeInfo.getMessageReadSetInfo();
    MessageReadSet readSet = storeInfo.getMessageReadSet();
    assertEquals("ReadSet contains an unexpected number of messages", expectedKeys.size(), readSet.count());
    Set<MockId> examinedKeys = new HashSet<>();
    for (int i = 0; i < messageInfos.size(); i++) {
        MessageInfo messageInfo = messageInfos.get(i);
        MockId id = (MockId) messageInfo.getStoreKey();
        MessageInfo expectedInfo = allKeys.get(id).getFirst();
        assertEquals("Unexpected size in MessageInfo", expectedInfo.getSize(), messageInfo.getSize());
        assertEquals("AccountId mismatch", expectedInfo.getAccountId(), messageInfo.getAccountId());
        assertEquals("ContainerId mismatch", expectedInfo.getContainerId(), messageInfo.getContainerId());
        assertEquals("OperationTime mismatch", expectedInfo.getOperationTimeMs(), messageInfo.getOperationTimeMs());
        assertEquals("Unexpected expiresAtMs in MessageInfo", (expectedInfo.getExpirationTimeInMs() != Utils.Infinite_Time ? (expectedInfo.getExpirationTimeInMs() / Time.MsPerSec) * Time.MsPerSec : Utils.Infinite_Time), messageInfo.getExpirationTimeInMs());
        assertEquals("Unexpected key in readSet", id, readSet.getKeyAt(i));
        assertEquals("Unexpected size in ReadSet", expectedInfo.getSize(), readSet.sizeInBytes(i));
        ByteBuffer readBuf = ByteBuffer.allocate((int) expectedInfo.getSize());
        ByteBufferOutputStream stream = new ByteBufferOutputStream(readBuf);
        WritableByteChannel channel = Channels.newChannel(stream);
        readSet.writeTo(i, channel, 0, expectedInfo.getSize());
        ByteBuffer expectedData = allKeys.get(id).getSecond();
        assertArrayEquals("Data obtained from reset does not match original", expectedData.array(), readBuf.array());
        examinedKeys.add(id);
    }
    assertEquals("Expected and examined keys do not match", expectedKeys, examinedKeys);
}
Also used : ByteBufferOutputStream(com.github.ambry.utils.ByteBufferOutputStream) WritableByteChannel(java.nio.channels.WritableByteChannel) ByteBuffer(java.nio.ByteBuffer) HashSet(java.util.HashSet)

Example 3 with ByteBufferOutputStream

use of com.github.ambry.utils.ByteBufferOutputStream in project ambry by linkedin.

the class ServerReadPerformance method main.

public static void main(String[] args) {
    ConnectionPool connectionPool = null;
    FileWriter writer = null;
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<String> logToReadOpt = parser.accepts("logToRead", "The log that needs to be replayed for traffic").withRequiredArg().describedAs("log_to_read").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> readsPerSecondOpt = parser.accepts("readsPerSecond", "The rate at which reads need to be performed").withRequiredArg().describedAs("The number of reads per second").ofType(Integer.class).defaultsTo(1000);
        ArgumentAcceptingOptionSpec<Long> measurementIntervalOpt = parser.accepts("measurementInterval", "The interval in second to report performance result").withOptionalArg().describedAs("The CPU time spent for getting blobs, not wall time").ofType(Long.class).defaultsTo(300L);
        ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
        ArgumentAcceptingOptionSpec<String> sslEnabledDatacentersOpt = parser.accepts("sslEnabledDatacenters", "Datacenters to which ssl should be enabled").withOptionalArg().describedAs("Comma separated list").ofType(String.class).defaultsTo("");
        ArgumentAcceptingOptionSpec<String> sslKeystorePathOpt = parser.accepts("sslKeystorePath", "SSL key store path").withOptionalArg().describedAs("The file path of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeystoreTypeOpt = parser.accepts("sslKeystoreType", "SSL key store type").withOptionalArg().describedAs("The type of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslTruststorePathOpt = parser.accepts("sslTruststorePath", "SSL trust store path").withOptionalArg().describedAs("The file path of SSL trust store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeystorePasswordOpt = parser.accepts("sslKeystorePassword", "SSL key store password").withOptionalArg().describedAs("The password of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeyPasswordOpt = parser.accepts("sslKeyPassword", "SSL key password").withOptionalArg().describedAs("The password of SSL private key").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslTruststorePasswordOpt = parser.accepts("sslTruststorePassword", "SSL trust store password").withOptionalArg().describedAs("The password of SSL trust store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslCipherSuitesOpt = parser.accepts("sslCipherSuites", "SSL enabled cipher suites").withOptionalArg().describedAs("Comma separated list").defaultsTo("TLS_RSA_WITH_AES_128_CBC_SHA").ofType(String.class);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<>();
        listOpt.add(logToReadOpt);
        listOpt.add(hardwareLayoutOpt);
        listOpt.add(partitionLayoutOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        long measurementIntervalNs = options.valueOf(measurementIntervalOpt) * SystemTime.NsPerSec;
        ToolUtils.validateSSLOptions(options, parser, sslEnabledDatacentersOpt, sslKeystorePathOpt, sslKeystoreTypeOpt, sslTruststorePathOpt, sslKeystorePasswordOpt, sslKeyPasswordOpt, sslTruststorePasswordOpt);
        String sslEnabledDatacenters = options.valueOf(sslEnabledDatacentersOpt);
        Properties sslProperties;
        if (sslEnabledDatacenters.length() != 0) {
            sslProperties = ToolUtils.createSSLProperties(sslEnabledDatacenters, options.valueOf(sslKeystorePathOpt), options.valueOf(sslKeystoreTypeOpt), options.valueOf(sslKeystorePasswordOpt), options.valueOf(sslKeyPasswordOpt), options.valueOf(sslTruststorePathOpt), options.valueOf(sslTruststorePasswordOpt), options.valueOf(sslCipherSuitesOpt));
        } else {
            sslProperties = new Properties();
        }
        ToolUtils.addClusterMapProperties(sslProperties);
        String logToRead = options.valueOf(logToReadOpt);
        int readsPerSecond = options.valueOf(readsPerSecondOpt);
        boolean enableVerboseLogging = options.has(verboseLoggingOpt);
        if (enableVerboseLogging) {
            System.out.println("Enabled verbose logging");
        }
        File logFile = new File(System.getProperty("user.dir"), "readperfresult");
        writer = new FileWriter(logFile);
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(sslProperties));
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        final AtomicLong totalTimeTaken = new AtomicLong(0);
        final AtomicLong totalReads = new AtomicLong(0);
        final AtomicBoolean shutdown = new AtomicBoolean(false);
        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread() {

            public void run() {
                try {
                    System.out.println("Shutdown invoked");
                    shutdown.set(true);
                    String message = "Total reads : " + totalReads.get() + "  Total time taken : " + totalTimeTaken.get() + " Nano Seconds  Average time taken per read " + ((double) totalTimeTaken.get()) / SystemTime.NsPerSec / totalReads.get() + " Seconds";
                    System.out.println(message);
                } catch (Exception e) {
                    System.out.println("Error while shutting down " + e);
                }
            }
        });
        final BufferedReader br = new BufferedReader(new FileReader(logToRead));
        Throttler throttler = new Throttler(readsPerSecond, 100, true, SystemTime.getInstance());
        String line;
        ConnectedChannel channel = null;
        ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(new VerifiableProperties(new Properties()));
        VerifiableProperties vProps = new VerifiableProperties(sslProperties);
        SSLConfig sslConfig = new SSLConfig(vProps);
        clusterMapConfig = new ClusterMapConfig(vProps);
        connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, new MetricRegistry());
        long totalNumberOfGetBlobs = 0;
        long totalLatencyForGetBlobs = 0;
        ArrayList<Long> latenciesForGetBlobs = new ArrayList<Long>();
        long maxLatencyForGetBlobs = 0;
        long minLatencyForGetBlobs = Long.MAX_VALUE;
        while ((line = br.readLine()) != null) {
            String[] id = line.split("-");
            BlobData blobData = null;
            BlobId blobId = new BlobId(id[1], map);
            ArrayList<BlobId> blobIds = new ArrayList<BlobId>();
            blobIds.add(blobId);
            for (ReplicaId replicaId : blobId.getPartition().getReplicaIds()) {
                long startTimeGetBlob = 0;
                ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
                try {
                    partitionRequestInfoList.clear();
                    PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
                    partitionRequestInfoList.add(partitionRequestInfo);
                    GetRequest getRequest = new GetRequest(1, "getperf", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
                    Port port = replicaId.getDataNodeId().getPortToConnectTo();
                    channel = connectionPool.checkOutConnection(replicaId.getDataNodeId().getHostname(), port, 10000);
                    startTimeGetBlob = SystemTime.getInstance().nanoseconds();
                    channel.send(getRequest);
                    DataInputStream receiveStream = channel.receive().getInputStream();
                    GetResponse getResponse = GetResponse.readFrom(receiveStream, map);
                    blobData = MessageFormatRecord.deserializeBlob(getResponse.getInputStream());
                    long sizeRead = 0;
                    byte[] outputBuffer = new byte[(int) blobData.getSize()];
                    ByteBufferOutputStream streamOut = new ByteBufferOutputStream(ByteBuffer.wrap(outputBuffer));
                    ByteBuf buffer = blobData.content();
                    try {
                        buffer.readBytes(streamOut, (int) blobData.getSize());
                    } finally {
                        buffer.release();
                    }
                    long latencyPerBlob = SystemTime.getInstance().nanoseconds() - startTimeGetBlob;
                    totalTimeTaken.addAndGet(latencyPerBlob);
                    latenciesForGetBlobs.add(latencyPerBlob);
                    totalReads.incrementAndGet();
                    totalNumberOfGetBlobs++;
                    totalLatencyForGetBlobs += latencyPerBlob;
                    if (enableVerboseLogging) {
                        System.out.println("Time taken to get blob id " + blobId + " in ms " + latencyPerBlob / SystemTime.NsPerMs);
                    }
                    if (latencyPerBlob > maxLatencyForGetBlobs) {
                        maxLatencyForGetBlobs = latencyPerBlob;
                    }
                    if (latencyPerBlob < minLatencyForGetBlobs) {
                        minLatencyForGetBlobs = latencyPerBlob;
                    }
                    if (totalLatencyForGetBlobs >= measurementIntervalNs) {
                        Collections.sort(latenciesForGetBlobs);
                        int index99 = (int) (latenciesForGetBlobs.size() * 0.99) - 1;
                        int index95 = (int) (latenciesForGetBlobs.size() * 0.95) - 1;
                        String message = totalNumberOfGetBlobs + "," + (double) latenciesForGetBlobs.get(index99) / SystemTime.NsPerSec + "," + (double) latenciesForGetBlobs.get(index95) / SystemTime.NsPerSec + "," + ((double) totalLatencyForGetBlobs / SystemTime.NsPerSec / totalNumberOfGetBlobs);
                        System.out.println(message);
                        writer.write(message + "\n");
                        totalLatencyForGetBlobs = 0;
                        latenciesForGetBlobs.clear();
                        totalNumberOfGetBlobs = 0;
                        maxLatencyForGetBlobs = 0;
                        minLatencyForGetBlobs = Long.MAX_VALUE;
                    }
                    partitionRequestInfoList.clear();
                    partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
                    partitionRequestInfoList.add(partitionRequestInfo);
                    GetRequest getRequestProperties = new GetRequest(1, "getperf", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
                    long startTimeGetBlobProperties = SystemTime.getInstance().nanoseconds();
                    channel.send(getRequestProperties);
                    DataInputStream receivePropertyStream = channel.receive().getInputStream();
                    GetResponse getResponseProperty = GetResponse.readFrom(receivePropertyStream, map);
                    BlobProperties blobProperties = MessageFormatRecord.deserializeBlobProperties(getResponseProperty.getInputStream());
                    long endTimeGetBlobProperties = SystemTime.getInstance().nanoseconds() - startTimeGetBlobProperties;
                    partitionRequestInfoList.clear();
                    partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
                    partitionRequestInfoList.add(partitionRequestInfo);
                    GetRequest getRequestUserMetadata = new GetRequest(1, "getperf", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
                    long startTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds();
                    channel.send(getRequestUserMetadata);
                    DataInputStream receiveUserMetadataStream = channel.receive().getInputStream();
                    GetResponse getResponseUserMetadata = GetResponse.readFrom(receiveUserMetadataStream, map);
                    ByteBuffer userMetadata = MessageFormatRecord.deserializeUserMetadata(getResponseUserMetadata.getInputStream());
                    long endTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds() - startTimeGetBlobUserMetadata;
                    // delete the blob
                    DeleteRequest deleteRequest = new DeleteRequest(0, "perf", blobId, System.currentTimeMillis());
                    channel.send(deleteRequest);
                    DeleteResponse deleteResponse = DeleteResponse.readFrom(channel.receive().getInputStream());
                    if (deleteResponse.getError() != ServerErrorCode.No_Error) {
                        throw new UnexpectedException("error " + deleteResponse.getError());
                    }
                    throttler.maybeThrottle(1);
                } finally {
                    if (channel != null) {
                        connectionPool.checkInConnection(channel);
                        channel = null;
                    }
                }
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        System.out.println("Error in server read performance " + e);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (Exception e) {
                System.out.println("Error when closing writer");
            }
        }
        if (connectionPool != null) {
            connectionPool.shutdown();
        }
    }
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) GetRequest(com.github.ambry.protocol.GetRequest) BlobData(com.github.ambry.messageformat.BlobData) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Throttler(com.github.ambry.utils.Throttler) UnexpectedException(java.rmi.UnexpectedException) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) ConnectedChannel(com.github.ambry.network.ConnectedChannel) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) DeleteResponse(com.github.ambry.protocol.DeleteResponse) AtomicLong(java.util.concurrent.atomic.AtomicLong) File(java.io.File) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) ConnectionPool(com.github.ambry.network.ConnectionPool) OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ConnectionPoolConfig(com.github.ambry.config.ConnectionPoolConfig) FileWriter(java.io.FileWriter) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ByteBuf(io.netty.buffer.ByteBuf) OptionParser(joptsimple.OptionParser) FileReader(java.io.FileReader) SSLConfig(com.github.ambry.config.SSLConfig) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) DataInputStream(java.io.DataInputStream) GetResponse(com.github.ambry.protocol.GetResponse) ByteBuffer(java.nio.ByteBuffer) UnexpectedException(java.rmi.UnexpectedException) ReplicaId(com.github.ambry.clustermap.ReplicaId) AtomicLong(java.util.concurrent.atomic.AtomicLong) ByteBufferOutputStream(com.github.ambry.utils.ByteBufferOutputStream) BlobProperties(com.github.ambry.messageformat.BlobProperties) BufferedReader(java.io.BufferedReader) OptionSet(joptsimple.OptionSet) BlobId(com.github.ambry.commons.BlobId) DeleteRequest(com.github.ambry.protocol.DeleteRequest)

Example 4 with ByteBufferOutputStream

use of com.github.ambry.utils.ByteBufferOutputStream in project ambry by linkedin.

the class CloudBlobStoreTest method testGetForExistingBlobs.

/**
 * Test cloud store get with a list of blobs that are all valid and previously uploaded in the store
 * @param blobIds list of blob ids to get
 * @param blobIdToUploadedDataMap map of expected blobid to data buffers
 * @throws Exception
 */
private void testGetForExistingBlobs(List<BlobId> blobIds, Map<BlobId, ByteBuffer> blobIdToUploadedDataMap) throws Exception {
    StoreInfo storeInfo = store.get(blobIds, EnumSet.noneOf(StoreGetOptions.class));
    assertEquals("Number of records returned by get should be same as uploaded", storeInfo.getMessageReadSetInfo().size(), blobIds.size());
    for (int i = 0; i < storeInfo.getMessageReadSetInfo().size(); i++) {
        MessageInfo messageInfo = storeInfo.getMessageReadSetInfo().get(i);
        if (blobIdToUploadedDataMap.containsKey(messageInfo.getStoreKey())) {
            ByteBuffer uploadedData = blobIdToUploadedDataMap.get(messageInfo.getStoreKey());
            ByteBuffer downloadedData = ByteBuffer.allocate((int) messageInfo.getSize());
            WritableByteChannel writableByteChannel = Channels.newChannel(new ByteBufferOutputStream(downloadedData));
            storeInfo.getMessageReadSet().writeTo(i, writableByteChannel, 0, messageInfo.getSize());
            downloadedData.flip();
            assertEquals(uploadedData, downloadedData);
            break;
        }
    }
}
Also used : ByteBufferOutputStream(com.github.ambry.utils.ByteBufferOutputStream) StoreGetOptions(com.github.ambry.store.StoreGetOptions) WritableByteChannel(java.nio.channels.WritableByteChannel) StoreInfo(com.github.ambry.store.StoreInfo) ByteBuffer(java.nio.ByteBuffer) MessageInfo(com.github.ambry.store.MessageInfo)

Example 5 with ByteBufferOutputStream

use of com.github.ambry.utils.ByteBufferOutputStream in project ambry by linkedin.

the class HardDeleteRecoveryMetadata method getHardDeleteInfo.

/**
 * For the message at readSetIndex, does the following:
 *   1. Reads the whole blob and does a crc check. If the crc check fails, returns null - this means that the record
 *   is not retrievable anyway.
 *   2. Adds to a hard delete replacement write set.
 *   3. Returns the hard delete info.
 */
private HardDeleteInfo getHardDeleteInfo(int readSetIndex) {
    HardDeleteInfo hardDeleteInfo = null;
    try {
        /* Read the version field in the header */
        ByteBuffer headerVersionBuf = ByteBuffer.allocate(Version_Field_Size_In_Bytes);
        readSet.writeTo(readSetIndex, Channels.newChannel(new ByteBufferOutputStream(headerVersionBuf)), 0, Version_Field_Size_In_Bytes);
        headerVersionBuf.flip();
        short headerVersion = headerVersionBuf.getShort();
        if (!isValidHeaderVersion(headerVersion)) {
            throw new MessageFormatException("Unknown header version during hard delete " + headerVersion + " storeKey " + readSet.getKeyAt(readSetIndex), MessageFormatErrorCodes.Unknown_Format_Version);
        }
        ByteBuffer header = ByteBuffer.allocate(getHeaderSizeForVersion(headerVersion));
        /* Read the rest of the header */
        header.putShort(headerVersion);
        readSet.writeTo(readSetIndex, Channels.newChannel(new ByteBufferOutputStream(header)), Version_Field_Size_In_Bytes, header.capacity() - Version_Field_Size_In_Bytes);
        header.flip();
        MessageHeader_Format headerFormat = getMessageHeader(headerVersion, header);
        headerFormat.verifyHeader();
        StoreKey storeKey = storeKeyFactory.getStoreKey(new DataInputStream(new MessageReadSetIndexInputStream(readSet, readSetIndex, header.capacity())));
        if (storeKey.compareTo(readSet.getKeyAt(readSetIndex)) != 0) {
            throw new MessageFormatException("Id mismatch between metadata and store - metadataId " + readSet.getKeyAt(readSetIndex) + " storeId " + storeKey, MessageFormatErrorCodes.Store_Key_Id_MisMatch);
        }
        if (!headerFormat.isPutRecord()) {
            throw new MessageFormatException("Cleanup operation for a non-PUT record is unsupported", MessageFormatErrorCodes.IO_Error);
        } else {
            HardDeleteRecoveryMetadata hardDeleteRecoveryMetadata = recoveryInfoMap.get(storeKey);
            int userMetadataRelativeOffset = headerFormat.getUserMetadataRecordRelativeOffset();
            short userMetadataVersion;
            int userMetadataSize;
            short blobRecordVersion;
            BlobType blobType;
            long blobStreamSize;
            DeserializedUserMetadata userMetadataInfo;
            DeserializedBlob blobRecordInfo;
            if (hardDeleteRecoveryMetadata == null) {
                userMetadataInfo = getUserMetadataInfo(readSet, readSetIndex, headerFormat.getUserMetadataRecordRelativeOffset(), headerFormat.getUserMetadataRecordSize());
                userMetadataSize = userMetadataInfo.getUserMetadata().capacity();
                userMetadataVersion = userMetadataInfo.getVersion();
                blobRecordInfo = getBlobRecordInfo(readSet, readSetIndex, headerFormat.getBlobRecordRelativeOffset(), headerFormat.getBlobRecordSize());
                blobStreamSize = blobRecordInfo.getBlobData().getSize();
                blobRecordVersion = blobRecordInfo.getVersion();
                blobType = blobRecordInfo.getBlobData().getBlobType();
                hardDeleteRecoveryMetadata = new HardDeleteRecoveryMetadata(headerVersion, userMetadataVersion, userMetadataSize, blobRecordVersion, blobType, blobStreamSize, storeKey);
            } else {
                logger.trace("Skipping crc check for user metadata and blob stream fields for key {}", storeKey);
                userMetadataVersion = hardDeleteRecoveryMetadata.getUserMetadataVersion();
                blobRecordVersion = hardDeleteRecoveryMetadata.getBlobRecordVersion();
                blobType = hardDeleteRecoveryMetadata.getBlobType();
                userMetadataSize = hardDeleteRecoveryMetadata.getUserMetadataSize();
                blobStreamSize = hardDeleteRecoveryMetadata.getBlobStreamSize();
            }
            HardDeleteMessageFormatInputStream hardDeleteStream = new HardDeleteMessageFormatInputStream(userMetadataRelativeOffset, userMetadataVersion, userMetadataSize, blobRecordVersion, blobType, blobStreamSize);
            hardDeleteInfo = new HardDeleteInfo(Channels.newChannel(hardDeleteStream), hardDeleteStream.getSize(), hardDeleteStream.getHardDeleteStreamRelativeOffset(), hardDeleteRecoveryMetadata.toBytes());
        }
    } catch (Exception e) {
        logger.error("Exception when reading blob: ", e);
    }
    return hardDeleteInfo;
}
Also used : DataInputStream(java.io.DataInputStream) ByteBuffer(java.nio.ByteBuffer) StoreKey(com.github.ambry.store.StoreKey) IOException(java.io.IOException) NoSuchElementException(java.util.NoSuchElementException) ByteBufferOutputStream(com.github.ambry.utils.ByteBufferOutputStream) HardDeleteInfo(com.github.ambry.store.HardDeleteInfo)

Aggregations

ByteBufferOutputStream (com.github.ambry.utils.ByteBufferOutputStream)21 ByteBuffer (java.nio.ByteBuffer)21 WritableByteChannel (java.nio.channels.WritableByteChannel)10 ByteBufferInputStream (com.github.ambry.utils.ByteBufferInputStream)8 ArrayList (java.util.ArrayList)7 MetricRegistry (com.codahale.metrics.MetricRegistry)4 StoreKey (com.github.ambry.store.StoreKey)4 DataInputStream (java.io.DataInputStream)4 MockIdFactory (com.github.ambry.store.MockIdFactory)3 Test (org.junit.Test)3 MessageReadSet (com.github.ambry.store.MessageReadSet)2 MockId (com.github.ambry.store.MockId)2 IOException (java.io.IOException)2 Random (java.util.Random)2 ClusterAgentsFactory (com.github.ambry.clustermap.ClusterAgentsFactory)1 ClusterMap (com.github.ambry.clustermap.ClusterMap)1 ReplicaId (com.github.ambry.clustermap.ReplicaId)1 BlobId (com.github.ambry.commons.BlobId)1 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)1 ConnectionPoolConfig (com.github.ambry.config.ConnectionPoolConfig)1