Search in sources :

Example 31 with BlobProperties

use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.

the class InMemoryRouter method updateBlobTtl.

@Override
public Future<Void> updateBlobTtl(String blobId, String serviceId, long expiresAtMs, Callback<Void> callback, QuotaChargeCallback quotaChargeCallback) {
    FutureResult<Void> futureResult = new FutureResult<>();
    if (!handlePrechecks(futureResult, callback)) {
        return futureResult;
    }
    Exception exception = null;
    try {
        // to make sure Blob ID is ok
        checkBlobId(blobId);
        if (!deletedBlobs.contains(blobId) && blobs.containsKey(blobId)) {
            InMemoryBlob blob = blobs.get(blobId);
            BlobProperties currentProps = blob.blobProperties;
            long newTtlSecs = Utils.getTtlInSecsFromExpiryMs(expiresAtMs, currentProps.getCreationTimeInMs());
            blob.blobProperties.setTimeToLiveInSeconds(newTtlSecs);
            if (notificationSystem != null) {
                notificationSystem.onBlobTtlUpdated(blobId, serviceId, expiresAtMs, null, null);
            }
        } else if (deletedBlobs.contains(blobId)) {
            exception = new RouterException("Blob has been deleted", RouterErrorCode.BlobDeleted);
        } else {
            exception = new RouterException("Blob not found", RouterErrorCode.BlobDoesNotExist);
        }
    } catch (RouterException e) {
        exception = e;
    } catch (Exception e) {
        exception = new RouterException(e, RouterErrorCode.UnexpectedInternalError);
    } finally {
        completeOperation(futureResult, callback, null, exception);
    }
    return futureResult;
}
Also used : BlobProperties(com.github.ambry.messageformat.BlobProperties)

Example 32 with BlobProperties

use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.

the class HardDeleteVerifier method deserializeBlobProperties.

boolean deserializeBlobProperties(InputStream streamlog, InputStream oldStreamlog, boolean isDeleted) throws ContinueException {
    boolean caughtException = false;
    boolean caughtExceptionInOld = false;
    BlobProperties props = null;
    BlobProperties oldProps = null;
    try {
        props = MessageFormatRecord.deserializeBlobProperties(streamlog);
    } catch (MessageFormatException e) {
        caughtException = true;
    } catch (IOException e) {
        caughtException = true;
    }
    try {
        oldProps = MessageFormatRecord.deserializeBlobProperties(oldStreamlog);
    } catch (MessageFormatException e) {
        caughtExceptionInOld = true;
    } catch (IOException e) {
        caughtExceptionInOld = true;
    }
    if (!caughtException) {
        if (props.toString().compareTo(oldProps.toString()) != 0) {
            System.out.println("Blob id mismatch!");
            return false;
        }
    } else if (!caughtExceptionInOld) {
        if (isDeleted) {
            corruptDeleted++;
        } else {
            corruptNonDeleted++;
        }
        throw new ContinueException("blob properties could not be deserialized.");
    } else {
        throw new ContinueException("blob properties could not be deserialized in either");
    }
    return true;
}
Also used : MessageFormatException(com.github.ambry.messageformat.MessageFormatException) BlobProperties(com.github.ambry.messageformat.BlobProperties) IOException(java.io.IOException)

Example 33 with BlobProperties

use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.

the class ServerAdminTool method main.

/**
 * Runs the server admin tool
 * @param args associated arguments.
 * @throws Exception
 */
public static void main(String[] args) throws Exception {
    VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
    ServerAdminToolConfig config = new ServerAdminToolConfig(verifiableProperties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap();
    SSLFactory sslFactory = !clusterMapConfig.clusterMapSslEnabledDatacenters.isEmpty() ? SSLFactory.getNewInstance(new SSLConfig(verifiableProperties)) : null;
    ServerAdminTool serverAdminTool = new ServerAdminTool(clusterMap, sslFactory, verifiableProperties);
    File file = new File(config.dataOutputFilePath);
    if (!file.exists() && !file.createNewFile()) {
        throw new IllegalStateException("Could not create " + file);
    }
    FileOutputStream outputFileStream = new FileOutputStream(config.dataOutputFilePath);
    DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
    if (dataNodeId == null) {
        throw new IllegalArgumentException("Could not find a data node corresponding to " + config.hostname + ":" + config.port);
    }
    switch(config.typeOfOperation) {
        case GetBlobProperties:
            BlobId blobId = new BlobId(config.blobId, clusterMap);
            Pair<ServerErrorCode, BlobProperties> bpResponse = serverAdminTool.getBlobProperties(dataNodeId, blobId, config.getOption, clusterMap);
            if (bpResponse.getFirst() == ServerErrorCode.No_Error) {
                LOGGER.info("Blob properties for {} from {}: {}", blobId, dataNodeId, bpResponse.getSecond());
            } else {
                LOGGER.error("Failed to get blob properties for {} from {} with option {}. Error code is {}", blobId, dataNodeId, config.getOption, bpResponse.getFirst());
            }
            break;
        case GetUserMetadata:
            blobId = new BlobId(config.blobId, clusterMap);
            Pair<ServerErrorCode, ByteBuffer> umResponse = serverAdminTool.getUserMetadata(dataNodeId, blobId, config.getOption, clusterMap);
            if (umResponse.getFirst() == ServerErrorCode.No_Error) {
                writeBufferToFile(umResponse.getSecond(), outputFileStream);
                LOGGER.info("User metadata for {} from {} written to {}", blobId, dataNodeId, config.dataOutputFilePath);
            } else {
                LOGGER.error("Failed to get user metadata for {} from {} with option {}. Error code is {}", blobId, dataNodeId, config.getOption, umResponse.getFirst());
            }
            break;
        case GetBlob:
            blobId = new BlobId(config.blobId, clusterMap);
            Pair<ServerErrorCode, BlobData> bResponse = serverAdminTool.getBlob(dataNodeId, blobId, config.getOption, clusterMap);
            if (bResponse.getFirst() == ServerErrorCode.No_Error) {
                LOGGER.info("Blob type of {} from {} is {}", blobId, dataNodeId, bResponse.getSecond().getBlobType());
                ByteBuf buffer = bResponse.getSecond().content();
                try {
                    writeByteBufToFile(buffer, outputFileStream);
                } finally {
                    buffer.release();
                }
                LOGGER.info("Blob data for {} from {} written to {}", blobId, dataNodeId, config.dataOutputFilePath);
            } else {
                LOGGER.error("Failed to get blob data for {} from {} with option {}. Error code is {}", blobId, dataNodeId, config.getOption, bResponse.getFirst());
            }
            break;
        case TriggerCompaction:
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    ServerErrorCode errorCode = serverAdminTool.triggerCompaction(dataNodeId, partitionId);
                    if (errorCode == ServerErrorCode.No_Error) {
                        LOGGER.info("Compaction has been triggered for {} on {}", partitionId, dataNodeId);
                    } else {
                        LOGGER.error("From {}, received server error code {} for trigger compaction request on {}", dataNodeId, errorCode, partitionId);
                    }
                }
            } else {
                LOGGER.error("There were no partitions provided to trigger compaction on");
            }
            break;
        case RequestControl:
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    sendRequestControlRequest(serverAdminTool, dataNodeId, partitionId, config.requestTypeToControl, config.enableState);
                }
            } else {
                LOGGER.info("No partition list provided. Requesting enable status of {} to be set to {} on all partitions", config.requestTypeToControl, config.enableState);
                sendRequestControlRequest(serverAdminTool, dataNodeId, null, config.requestTypeToControl, config.enableState);
            }
            break;
        case ReplicationControl:
            List<String> origins = Collections.emptyList();
            if (config.origins.length > 0 && !config.origins[0].isEmpty()) {
                origins = Arrays.asList(config.origins);
            }
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    sendReplicationControlRequest(serverAdminTool, dataNodeId, partitionId, origins, config.enableState);
                }
            } else {
                LOGGER.info("No partition list provided. Requesting enable status for replication from {} to be set to {} on " + "all partitions", origins.isEmpty() ? "all DCs" : origins, config.enableState);
                sendReplicationControlRequest(serverAdminTool, dataNodeId, null, origins, config.enableState);
            }
            break;
        case CatchupStatus:
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    Pair<ServerErrorCode, Boolean> response = serverAdminTool.isCaughtUp(dataNodeId, partitionId, config.acceptableLagInBytes, config.numReplicasCaughtUpPerPartition);
                    if (response.getFirst() == ServerErrorCode.No_Error) {
                        LOGGER.info("Replicas are {} within {} bytes for {}", response.getSecond() ? "" : "NOT", config.acceptableLagInBytes, partitionId);
                    } else {
                        LOGGER.error("From {}, received server error code {} for request for catchup status of {}", dataNodeId, response.getFirst(), partitionId);
                    }
                }
            } else {
                Pair<ServerErrorCode, Boolean> response = serverAdminTool.isCaughtUp(dataNodeId, null, config.acceptableLagInBytes, config.numReplicasCaughtUpPerPartition);
                if (response.getFirst() == ServerErrorCode.No_Error) {
                    LOGGER.info("Replicas are {} within {} bytes for all partitions", response.getSecond() ? "" : "NOT", config.acceptableLagInBytes);
                } else {
                    LOGGER.error("From {}, received server error code {} for request for catchup status of all partitions", dataNodeId, response.getFirst());
                }
            }
            break;
        case BlobStoreControl:
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    sendBlobStoreControlRequest(serverAdminTool, dataNodeId, partitionId, config.numReplicasCaughtUpPerPartition, config.storeControlRequestType);
                }
            } else {
                LOGGER.error("There were no partitions provided to be controlled (Start/Stop)");
            }
            break;
        default:
            throw new IllegalStateException("Recognized but unsupported operation: " + config.typeOfOperation);
    }
    serverAdminTool.close();
    outputFileStream.close();
    clusterMap.close();
    System.out.println("Server admin tool is safely closed");
    System.exit(0);
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) SSLFactory(com.github.ambry.commons.SSLFactory) ByteBuf(io.netty.buffer.ByteBuf) BlobData(com.github.ambry.messageformat.BlobData) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) SSLConfig(com.github.ambry.config.SSLConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PartitionId(com.github.ambry.clustermap.PartitionId) ByteBuffer(java.nio.ByteBuffer) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ServerErrorCode(com.github.ambry.server.ServerErrorCode) FileOutputStream(java.io.FileOutputStream) BlobProperties(com.github.ambry.messageformat.BlobProperties) File(java.io.File) DataNodeId(com.github.ambry.clustermap.DataNodeId) BlobId(com.github.ambry.commons.BlobId)

Example 34 with BlobProperties

use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.

the class ServerReadPerformance method main.

public static void main(String[] args) {
    ConnectionPool connectionPool = null;
    FileWriter writer = null;
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<String> logToReadOpt = parser.accepts("logToRead", "The log that needs to be replayed for traffic").withRequiredArg().describedAs("log_to_read").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> readsPerSecondOpt = parser.accepts("readsPerSecond", "The rate at which reads need to be performed").withRequiredArg().describedAs("The number of reads per second").ofType(Integer.class).defaultsTo(1000);
        ArgumentAcceptingOptionSpec<Long> measurementIntervalOpt = parser.accepts("measurementInterval", "The interval in second to report performance result").withOptionalArg().describedAs("The CPU time spent for getting blobs, not wall time").ofType(Long.class).defaultsTo(300L);
        ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
        ArgumentAcceptingOptionSpec<String> sslEnabledDatacentersOpt = parser.accepts("sslEnabledDatacenters", "Datacenters to which ssl should be enabled").withOptionalArg().describedAs("Comma separated list").ofType(String.class).defaultsTo("");
        ArgumentAcceptingOptionSpec<String> sslKeystorePathOpt = parser.accepts("sslKeystorePath", "SSL key store path").withOptionalArg().describedAs("The file path of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeystoreTypeOpt = parser.accepts("sslKeystoreType", "SSL key store type").withOptionalArg().describedAs("The type of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslTruststorePathOpt = parser.accepts("sslTruststorePath", "SSL trust store path").withOptionalArg().describedAs("The file path of SSL trust store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeystorePasswordOpt = parser.accepts("sslKeystorePassword", "SSL key store password").withOptionalArg().describedAs("The password of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeyPasswordOpt = parser.accepts("sslKeyPassword", "SSL key password").withOptionalArg().describedAs("The password of SSL private key").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslTruststorePasswordOpt = parser.accepts("sslTruststorePassword", "SSL trust store password").withOptionalArg().describedAs("The password of SSL trust store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslCipherSuitesOpt = parser.accepts("sslCipherSuites", "SSL enabled cipher suites").withOptionalArg().describedAs("Comma separated list").defaultsTo("TLS_RSA_WITH_AES_128_CBC_SHA").ofType(String.class);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<>();
        listOpt.add(logToReadOpt);
        listOpt.add(hardwareLayoutOpt);
        listOpt.add(partitionLayoutOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        long measurementIntervalNs = options.valueOf(measurementIntervalOpt) * SystemTime.NsPerSec;
        ToolUtils.validateSSLOptions(options, parser, sslEnabledDatacentersOpt, sslKeystorePathOpt, sslKeystoreTypeOpt, sslTruststorePathOpt, sslKeystorePasswordOpt, sslKeyPasswordOpt, sslTruststorePasswordOpt);
        String sslEnabledDatacenters = options.valueOf(sslEnabledDatacentersOpt);
        Properties sslProperties;
        if (sslEnabledDatacenters.length() != 0) {
            sslProperties = ToolUtils.createSSLProperties(sslEnabledDatacenters, options.valueOf(sslKeystorePathOpt), options.valueOf(sslKeystoreTypeOpt), options.valueOf(sslKeystorePasswordOpt), options.valueOf(sslKeyPasswordOpt), options.valueOf(sslTruststorePathOpt), options.valueOf(sslTruststorePasswordOpt), options.valueOf(sslCipherSuitesOpt));
        } else {
            sslProperties = new Properties();
        }
        ToolUtils.addClusterMapProperties(sslProperties);
        String logToRead = options.valueOf(logToReadOpt);
        int readsPerSecond = options.valueOf(readsPerSecondOpt);
        boolean enableVerboseLogging = options.has(verboseLoggingOpt);
        if (enableVerboseLogging) {
            System.out.println("Enabled verbose logging");
        }
        File logFile = new File(System.getProperty("user.dir"), "readperfresult");
        writer = new FileWriter(logFile);
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(sslProperties));
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        final AtomicLong totalTimeTaken = new AtomicLong(0);
        final AtomicLong totalReads = new AtomicLong(0);
        final AtomicBoolean shutdown = new AtomicBoolean(false);
        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread() {

            public void run() {
                try {
                    System.out.println("Shutdown invoked");
                    shutdown.set(true);
                    String message = "Total reads : " + totalReads.get() + "  Total time taken : " + totalTimeTaken.get() + " Nano Seconds  Average time taken per read " + ((double) totalTimeTaken.get()) / SystemTime.NsPerSec / totalReads.get() + " Seconds";
                    System.out.println(message);
                } catch (Exception e) {
                    System.out.println("Error while shutting down " + e);
                }
            }
        });
        final BufferedReader br = new BufferedReader(new FileReader(logToRead));
        Throttler throttler = new Throttler(readsPerSecond, 100, true, SystemTime.getInstance());
        String line;
        ConnectedChannel channel = null;
        ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(new VerifiableProperties(new Properties()));
        VerifiableProperties vProps = new VerifiableProperties(sslProperties);
        SSLConfig sslConfig = new SSLConfig(vProps);
        clusterMapConfig = new ClusterMapConfig(vProps);
        connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, new MetricRegistry());
        long totalNumberOfGetBlobs = 0;
        long totalLatencyForGetBlobs = 0;
        ArrayList<Long> latenciesForGetBlobs = new ArrayList<Long>();
        long maxLatencyForGetBlobs = 0;
        long minLatencyForGetBlobs = Long.MAX_VALUE;
        while ((line = br.readLine()) != null) {
            String[] id = line.split("-");
            BlobData blobData = null;
            BlobId blobId = new BlobId(id[1], map);
            ArrayList<BlobId> blobIds = new ArrayList<BlobId>();
            blobIds.add(blobId);
            for (ReplicaId replicaId : blobId.getPartition().getReplicaIds()) {
                long startTimeGetBlob = 0;
                ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
                try {
                    partitionRequestInfoList.clear();
                    PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
                    partitionRequestInfoList.add(partitionRequestInfo);
                    GetRequest getRequest = new GetRequest(1, "getperf", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
                    Port port = replicaId.getDataNodeId().getPortToConnectTo();
                    channel = connectionPool.checkOutConnection(replicaId.getDataNodeId().getHostname(), port, 10000);
                    startTimeGetBlob = SystemTime.getInstance().nanoseconds();
                    channel.send(getRequest);
                    DataInputStream receiveStream = channel.receive().getInputStream();
                    GetResponse getResponse = GetResponse.readFrom(receiveStream, map);
                    blobData = MessageFormatRecord.deserializeBlob(getResponse.getInputStream());
                    long sizeRead = 0;
                    byte[] outputBuffer = new byte[(int) blobData.getSize()];
                    ByteBufferOutputStream streamOut = new ByteBufferOutputStream(ByteBuffer.wrap(outputBuffer));
                    ByteBuf buffer = blobData.content();
                    try {
                        buffer.readBytes(streamOut, (int) blobData.getSize());
                    } finally {
                        buffer.release();
                    }
                    long latencyPerBlob = SystemTime.getInstance().nanoseconds() - startTimeGetBlob;
                    totalTimeTaken.addAndGet(latencyPerBlob);
                    latenciesForGetBlobs.add(latencyPerBlob);
                    totalReads.incrementAndGet();
                    totalNumberOfGetBlobs++;
                    totalLatencyForGetBlobs += latencyPerBlob;
                    if (enableVerboseLogging) {
                        System.out.println("Time taken to get blob id " + blobId + " in ms " + latencyPerBlob / SystemTime.NsPerMs);
                    }
                    if (latencyPerBlob > maxLatencyForGetBlobs) {
                        maxLatencyForGetBlobs = latencyPerBlob;
                    }
                    if (latencyPerBlob < minLatencyForGetBlobs) {
                        minLatencyForGetBlobs = latencyPerBlob;
                    }
                    if (totalLatencyForGetBlobs >= measurementIntervalNs) {
                        Collections.sort(latenciesForGetBlobs);
                        int index99 = (int) (latenciesForGetBlobs.size() * 0.99) - 1;
                        int index95 = (int) (latenciesForGetBlobs.size() * 0.95) - 1;
                        String message = totalNumberOfGetBlobs + "," + (double) latenciesForGetBlobs.get(index99) / SystemTime.NsPerSec + "," + (double) latenciesForGetBlobs.get(index95) / SystemTime.NsPerSec + "," + ((double) totalLatencyForGetBlobs / SystemTime.NsPerSec / totalNumberOfGetBlobs);
                        System.out.println(message);
                        writer.write(message + "\n");
                        totalLatencyForGetBlobs = 0;
                        latenciesForGetBlobs.clear();
                        totalNumberOfGetBlobs = 0;
                        maxLatencyForGetBlobs = 0;
                        minLatencyForGetBlobs = Long.MAX_VALUE;
                    }
                    partitionRequestInfoList.clear();
                    partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
                    partitionRequestInfoList.add(partitionRequestInfo);
                    GetRequest getRequestProperties = new GetRequest(1, "getperf", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
                    long startTimeGetBlobProperties = SystemTime.getInstance().nanoseconds();
                    channel.send(getRequestProperties);
                    DataInputStream receivePropertyStream = channel.receive().getInputStream();
                    GetResponse getResponseProperty = GetResponse.readFrom(receivePropertyStream, map);
                    BlobProperties blobProperties = MessageFormatRecord.deserializeBlobProperties(getResponseProperty.getInputStream());
                    long endTimeGetBlobProperties = SystemTime.getInstance().nanoseconds() - startTimeGetBlobProperties;
                    partitionRequestInfoList.clear();
                    partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
                    partitionRequestInfoList.add(partitionRequestInfo);
                    GetRequest getRequestUserMetadata = new GetRequest(1, "getperf", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
                    long startTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds();
                    channel.send(getRequestUserMetadata);
                    DataInputStream receiveUserMetadataStream = channel.receive().getInputStream();
                    GetResponse getResponseUserMetadata = GetResponse.readFrom(receiveUserMetadataStream, map);
                    ByteBuffer userMetadata = MessageFormatRecord.deserializeUserMetadata(getResponseUserMetadata.getInputStream());
                    long endTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds() - startTimeGetBlobUserMetadata;
                    // delete the blob
                    DeleteRequest deleteRequest = new DeleteRequest(0, "perf", blobId, System.currentTimeMillis());
                    channel.send(deleteRequest);
                    DeleteResponse deleteResponse = DeleteResponse.readFrom(channel.receive().getInputStream());
                    if (deleteResponse.getError() != ServerErrorCode.No_Error) {
                        throw new UnexpectedException("error " + deleteResponse.getError());
                    }
                    throttler.maybeThrottle(1);
                } finally {
                    if (channel != null) {
                        connectionPool.checkInConnection(channel);
                        channel = null;
                    }
                }
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        System.out.println("Error in server read performance " + e);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (Exception e) {
                System.out.println("Error when closing writer");
            }
        }
        if (connectionPool != null) {
            connectionPool.shutdown();
        }
    }
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) GetRequest(com.github.ambry.protocol.GetRequest) BlobData(com.github.ambry.messageformat.BlobData) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Throttler(com.github.ambry.utils.Throttler) UnexpectedException(java.rmi.UnexpectedException) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) ConnectedChannel(com.github.ambry.network.ConnectedChannel) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) DeleteResponse(com.github.ambry.protocol.DeleteResponse) AtomicLong(java.util.concurrent.atomic.AtomicLong) File(java.io.File) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) ConnectionPool(com.github.ambry.network.ConnectionPool) OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ConnectionPoolConfig(com.github.ambry.config.ConnectionPoolConfig) FileWriter(java.io.FileWriter) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ByteBuf(io.netty.buffer.ByteBuf) OptionParser(joptsimple.OptionParser) FileReader(java.io.FileReader) SSLConfig(com.github.ambry.config.SSLConfig) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) DataInputStream(java.io.DataInputStream) GetResponse(com.github.ambry.protocol.GetResponse) ByteBuffer(java.nio.ByteBuffer) UnexpectedException(java.rmi.UnexpectedException) ReplicaId(com.github.ambry.clustermap.ReplicaId) AtomicLong(java.util.concurrent.atomic.AtomicLong) ByteBufferOutputStream(com.github.ambry.utils.ByteBufferOutputStream) BlobProperties(com.github.ambry.messageformat.BlobProperties) BufferedReader(java.io.BufferedReader) OptionSet(joptsimple.OptionSet) BlobId(com.github.ambry.commons.BlobId) DeleteRequest(com.github.ambry.protocol.DeleteRequest)

Example 35 with BlobProperties

use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.

the class RouterServerTestFramework method startOperationChain.

/**
 * Create an {@link OperationChain} from a queue of {@link OperationType}s.  Start the operation chain asynchronously.
 * @param blobSize the size of the blob generated for put operations.
 * @param chainId a numeric identifying the operation chain.
 * @param operations the queue of operations to perform in the chain
 * @return an {@link OperationChain} object describing the started chain.
 */
OperationChain startOperationChain(int blobSize, int chainId, Queue<OperationType> operations) {
    byte[] userMetadata = new byte[1000];
    byte[] data = new byte[blobSize];
    TestUtils.RANDOM.nextBytes(userMetadata);
    TestUtils.RANDOM.nextBytes(data);
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    BlobProperties properties = new BlobProperties(blobSize, "serviceid1", accountId, containerId, testEncryption);
    OperationChain opChain = new OperationChain(chainId, properties, userMetadata, data, operations);
    continueChain(opChain);
    return opChain;
}
Also used : BlobProperties(com.github.ambry.messageformat.BlobProperties)

Aggregations

BlobProperties (com.github.ambry.messageformat.BlobProperties)79 BlobId (com.github.ambry.commons.BlobId)35 ArrayList (java.util.ArrayList)35 DataInputStream (java.io.DataInputStream)26 Test (org.junit.Test)25 ByteBufferReadableStreamChannel (com.github.ambry.commons.ByteBufferReadableStreamChannel)24 VerifiableProperties (com.github.ambry.config.VerifiableProperties)24 ByteBuffer (java.nio.ByteBuffer)24 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)20 GetResponse (com.github.ambry.protocol.GetResponse)20 PutRequest (com.github.ambry.protocol.PutRequest)20 IOException (java.io.IOException)20 Properties (java.util.Properties)20 InMemAccountService (com.github.ambry.account.InMemAccountService)19 PartitionRequestInfo (com.github.ambry.protocol.PartitionRequestInfo)19 LoggingNotificationSystem (com.github.ambry.commons.LoggingNotificationSystem)18 ByteBuf (io.netty.buffer.ByteBuf)18 GetRequest (com.github.ambry.protocol.GetRequest)17 NettyByteBufDataInputStream (com.github.ambry.utils.NettyByteBufDataInputStream)17 CountDownLatch (java.util.concurrent.CountDownLatch)16