Search in sources :

Example 11 with BlobId

use of com.github.ambry.commons.BlobId in project ambry by linkedin.

the class ServerAdminTool method main.

/**
 * Runs the server admin tool
 * @param args associated arguments.
 * @throws Exception
 */
public static void main(String[] args) throws Exception {
    VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
    ServerAdminToolConfig config = new ServerAdminToolConfig(verifiableProperties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap();
    SSLFactory sslFactory = !clusterMapConfig.clusterMapSslEnabledDatacenters.isEmpty() ? SSLFactory.getNewInstance(new SSLConfig(verifiableProperties)) : null;
    ServerAdminTool serverAdminTool = new ServerAdminTool(clusterMap, sslFactory, verifiableProperties);
    File file = new File(config.dataOutputFilePath);
    if (!file.exists() && !file.createNewFile()) {
        throw new IllegalStateException("Could not create " + file);
    }
    FileOutputStream outputFileStream = new FileOutputStream(config.dataOutputFilePath);
    DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
    if (dataNodeId == null) {
        throw new IllegalArgumentException("Could not find a data node corresponding to " + config.hostname + ":" + config.port);
    }
    switch(config.typeOfOperation) {
        case GetBlobProperties:
            BlobId blobId = new BlobId(config.blobId, clusterMap);
            Pair<ServerErrorCode, BlobProperties> bpResponse = serverAdminTool.getBlobProperties(dataNodeId, blobId, config.getOption, clusterMap);
            if (bpResponse.getFirst() == ServerErrorCode.No_Error) {
                LOGGER.info("Blob properties for {} from {}: {}", blobId, dataNodeId, bpResponse.getSecond());
            } else {
                LOGGER.error("Failed to get blob properties for {} from {} with option {}. Error code is {}", blobId, dataNodeId, config.getOption, bpResponse.getFirst());
            }
            break;
        case GetUserMetadata:
            blobId = new BlobId(config.blobId, clusterMap);
            Pair<ServerErrorCode, ByteBuffer> umResponse = serverAdminTool.getUserMetadata(dataNodeId, blobId, config.getOption, clusterMap);
            if (umResponse.getFirst() == ServerErrorCode.No_Error) {
                writeBufferToFile(umResponse.getSecond(), outputFileStream);
                LOGGER.info("User metadata for {} from {} written to {}", blobId, dataNodeId, config.dataOutputFilePath);
            } else {
                LOGGER.error("Failed to get user metadata for {} from {} with option {}. Error code is {}", blobId, dataNodeId, config.getOption, umResponse.getFirst());
            }
            break;
        case GetBlob:
            blobId = new BlobId(config.blobId, clusterMap);
            Pair<ServerErrorCode, BlobData> bResponse = serverAdminTool.getBlob(dataNodeId, blobId, config.getOption, clusterMap);
            if (bResponse.getFirst() == ServerErrorCode.No_Error) {
                LOGGER.info("Blob type of {} from {} is {}", blobId, dataNodeId, bResponse.getSecond().getBlobType());
                ByteBuf buffer = bResponse.getSecond().content();
                try {
                    writeByteBufToFile(buffer, outputFileStream);
                } finally {
                    buffer.release();
                }
                LOGGER.info("Blob data for {} from {} written to {}", blobId, dataNodeId, config.dataOutputFilePath);
            } else {
                LOGGER.error("Failed to get blob data for {} from {} with option {}. Error code is {}", blobId, dataNodeId, config.getOption, bResponse.getFirst());
            }
            break;
        case TriggerCompaction:
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    ServerErrorCode errorCode = serverAdminTool.triggerCompaction(dataNodeId, partitionId);
                    if (errorCode == ServerErrorCode.No_Error) {
                        LOGGER.info("Compaction has been triggered for {} on {}", partitionId, dataNodeId);
                    } else {
                        LOGGER.error("From {}, received server error code {} for trigger compaction request on {}", dataNodeId, errorCode, partitionId);
                    }
                }
            } else {
                LOGGER.error("There were no partitions provided to trigger compaction on");
            }
            break;
        case RequestControl:
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    sendRequestControlRequest(serverAdminTool, dataNodeId, partitionId, config.requestTypeToControl, config.enableState);
                }
            } else {
                LOGGER.info("No partition list provided. Requesting enable status of {} to be set to {} on all partitions", config.requestTypeToControl, config.enableState);
                sendRequestControlRequest(serverAdminTool, dataNodeId, null, config.requestTypeToControl, config.enableState);
            }
            break;
        case ReplicationControl:
            List<String> origins = Collections.emptyList();
            if (config.origins.length > 0 && !config.origins[0].isEmpty()) {
                origins = Arrays.asList(config.origins);
            }
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    sendReplicationControlRequest(serverAdminTool, dataNodeId, partitionId, origins, config.enableState);
                }
            } else {
                LOGGER.info("No partition list provided. Requesting enable status for replication from {} to be set to {} on " + "all partitions", origins.isEmpty() ? "all DCs" : origins, config.enableState);
                sendReplicationControlRequest(serverAdminTool, dataNodeId, null, origins, config.enableState);
            }
            break;
        case CatchupStatus:
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    Pair<ServerErrorCode, Boolean> response = serverAdminTool.isCaughtUp(dataNodeId, partitionId, config.acceptableLagInBytes, config.numReplicasCaughtUpPerPartition);
                    if (response.getFirst() == ServerErrorCode.No_Error) {
                        LOGGER.info("Replicas are {} within {} bytes for {}", response.getSecond() ? "" : "NOT", config.acceptableLagInBytes, partitionId);
                    } else {
                        LOGGER.error("From {}, received server error code {} for request for catchup status of {}", dataNodeId, response.getFirst(), partitionId);
                    }
                }
            } else {
                Pair<ServerErrorCode, Boolean> response = serverAdminTool.isCaughtUp(dataNodeId, null, config.acceptableLagInBytes, config.numReplicasCaughtUpPerPartition);
                if (response.getFirst() == ServerErrorCode.No_Error) {
                    LOGGER.info("Replicas are {} within {} bytes for all partitions", response.getSecond() ? "" : "NOT", config.acceptableLagInBytes);
                } else {
                    LOGGER.error("From {}, received server error code {} for request for catchup status of all partitions", dataNodeId, response.getFirst());
                }
            }
            break;
        case BlobStoreControl:
            if (config.partitionIds.length > 0 && !config.partitionIds[0].isEmpty()) {
                for (String partitionIdStr : config.partitionIds) {
                    PartitionId partitionId = getPartitionIdFromStr(partitionIdStr, clusterMap);
                    sendBlobStoreControlRequest(serverAdminTool, dataNodeId, partitionId, config.numReplicasCaughtUpPerPartition, config.storeControlRequestType);
                }
            } else {
                LOGGER.error("There were no partitions provided to be controlled (Start/Stop)");
            }
            break;
        default:
            throw new IllegalStateException("Recognized but unsupported operation: " + config.typeOfOperation);
    }
    serverAdminTool.close();
    outputFileStream.close();
    clusterMap.close();
    System.out.println("Server admin tool is safely closed");
    System.exit(0);
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) SSLFactory(com.github.ambry.commons.SSLFactory) ByteBuf(io.netty.buffer.ByteBuf) BlobData(com.github.ambry.messageformat.BlobData) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) SSLConfig(com.github.ambry.config.SSLConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PartitionId(com.github.ambry.clustermap.PartitionId) ByteBuffer(java.nio.ByteBuffer) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ServerErrorCode(com.github.ambry.server.ServerErrorCode) FileOutputStream(java.io.FileOutputStream) BlobProperties(com.github.ambry.messageformat.BlobProperties) File(java.io.File) DataNodeId(com.github.ambry.clustermap.DataNodeId) BlobId(com.github.ambry.commons.BlobId)

Example 12 with BlobId

use of com.github.ambry.commons.BlobId in project ambry by linkedin.

the class ServerReadPerformance method main.

public static void main(String[] args) {
    ConnectionPool connectionPool = null;
    FileWriter writer = null;
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<String> logToReadOpt = parser.accepts("logToRead", "The log that needs to be replayed for traffic").withRequiredArg().describedAs("log_to_read").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> readsPerSecondOpt = parser.accepts("readsPerSecond", "The rate at which reads need to be performed").withRequiredArg().describedAs("The number of reads per second").ofType(Integer.class).defaultsTo(1000);
        ArgumentAcceptingOptionSpec<Long> measurementIntervalOpt = parser.accepts("measurementInterval", "The interval in second to report performance result").withOptionalArg().describedAs("The CPU time spent for getting blobs, not wall time").ofType(Long.class).defaultsTo(300L);
        ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
        ArgumentAcceptingOptionSpec<String> sslEnabledDatacentersOpt = parser.accepts("sslEnabledDatacenters", "Datacenters to which ssl should be enabled").withOptionalArg().describedAs("Comma separated list").ofType(String.class).defaultsTo("");
        ArgumentAcceptingOptionSpec<String> sslKeystorePathOpt = parser.accepts("sslKeystorePath", "SSL key store path").withOptionalArg().describedAs("The file path of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeystoreTypeOpt = parser.accepts("sslKeystoreType", "SSL key store type").withOptionalArg().describedAs("The type of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslTruststorePathOpt = parser.accepts("sslTruststorePath", "SSL trust store path").withOptionalArg().describedAs("The file path of SSL trust store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeystorePasswordOpt = parser.accepts("sslKeystorePassword", "SSL key store password").withOptionalArg().describedAs("The password of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeyPasswordOpt = parser.accepts("sslKeyPassword", "SSL key password").withOptionalArg().describedAs("The password of SSL private key").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslTruststorePasswordOpt = parser.accepts("sslTruststorePassword", "SSL trust store password").withOptionalArg().describedAs("The password of SSL trust store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslCipherSuitesOpt = parser.accepts("sslCipherSuites", "SSL enabled cipher suites").withOptionalArg().describedAs("Comma separated list").defaultsTo("TLS_RSA_WITH_AES_128_CBC_SHA").ofType(String.class);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<>();
        listOpt.add(logToReadOpt);
        listOpt.add(hardwareLayoutOpt);
        listOpt.add(partitionLayoutOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        long measurementIntervalNs = options.valueOf(measurementIntervalOpt) * SystemTime.NsPerSec;
        ToolUtils.validateSSLOptions(options, parser, sslEnabledDatacentersOpt, sslKeystorePathOpt, sslKeystoreTypeOpt, sslTruststorePathOpt, sslKeystorePasswordOpt, sslKeyPasswordOpt, sslTruststorePasswordOpt);
        String sslEnabledDatacenters = options.valueOf(sslEnabledDatacentersOpt);
        Properties sslProperties;
        if (sslEnabledDatacenters.length() != 0) {
            sslProperties = ToolUtils.createSSLProperties(sslEnabledDatacenters, options.valueOf(sslKeystorePathOpt), options.valueOf(sslKeystoreTypeOpt), options.valueOf(sslKeystorePasswordOpt), options.valueOf(sslKeyPasswordOpt), options.valueOf(sslTruststorePathOpt), options.valueOf(sslTruststorePasswordOpt), options.valueOf(sslCipherSuitesOpt));
        } else {
            sslProperties = new Properties();
        }
        ToolUtils.addClusterMapProperties(sslProperties);
        String logToRead = options.valueOf(logToReadOpt);
        int readsPerSecond = options.valueOf(readsPerSecondOpt);
        boolean enableVerboseLogging = options.has(verboseLoggingOpt);
        if (enableVerboseLogging) {
            System.out.println("Enabled verbose logging");
        }
        File logFile = new File(System.getProperty("user.dir"), "readperfresult");
        writer = new FileWriter(logFile);
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(sslProperties));
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        final AtomicLong totalTimeTaken = new AtomicLong(0);
        final AtomicLong totalReads = new AtomicLong(0);
        final AtomicBoolean shutdown = new AtomicBoolean(false);
        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread() {

            public void run() {
                try {
                    System.out.println("Shutdown invoked");
                    shutdown.set(true);
                    String message = "Total reads : " + totalReads.get() + "  Total time taken : " + totalTimeTaken.get() + " Nano Seconds  Average time taken per read " + ((double) totalTimeTaken.get()) / SystemTime.NsPerSec / totalReads.get() + " Seconds";
                    System.out.println(message);
                } catch (Exception e) {
                    System.out.println("Error while shutting down " + e);
                }
            }
        });
        final BufferedReader br = new BufferedReader(new FileReader(logToRead));
        Throttler throttler = new Throttler(readsPerSecond, 100, true, SystemTime.getInstance());
        String line;
        ConnectedChannel channel = null;
        ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(new VerifiableProperties(new Properties()));
        VerifiableProperties vProps = new VerifiableProperties(sslProperties);
        SSLConfig sslConfig = new SSLConfig(vProps);
        clusterMapConfig = new ClusterMapConfig(vProps);
        connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, new MetricRegistry());
        long totalNumberOfGetBlobs = 0;
        long totalLatencyForGetBlobs = 0;
        ArrayList<Long> latenciesForGetBlobs = new ArrayList<Long>();
        long maxLatencyForGetBlobs = 0;
        long minLatencyForGetBlobs = Long.MAX_VALUE;
        while ((line = br.readLine()) != null) {
            String[] id = line.split("-");
            BlobData blobData = null;
            BlobId blobId = new BlobId(id[1], map);
            ArrayList<BlobId> blobIds = new ArrayList<BlobId>();
            blobIds.add(blobId);
            for (ReplicaId replicaId : blobId.getPartition().getReplicaIds()) {
                long startTimeGetBlob = 0;
                ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
                try {
                    partitionRequestInfoList.clear();
                    PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
                    partitionRequestInfoList.add(partitionRequestInfo);
                    GetRequest getRequest = new GetRequest(1, "getperf", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
                    Port port = replicaId.getDataNodeId().getPortToConnectTo();
                    channel = connectionPool.checkOutConnection(replicaId.getDataNodeId().getHostname(), port, 10000);
                    startTimeGetBlob = SystemTime.getInstance().nanoseconds();
                    channel.send(getRequest);
                    DataInputStream receiveStream = channel.receive().getInputStream();
                    GetResponse getResponse = GetResponse.readFrom(receiveStream, map);
                    blobData = MessageFormatRecord.deserializeBlob(getResponse.getInputStream());
                    long sizeRead = 0;
                    byte[] outputBuffer = new byte[(int) blobData.getSize()];
                    ByteBufferOutputStream streamOut = new ByteBufferOutputStream(ByteBuffer.wrap(outputBuffer));
                    ByteBuf buffer = blobData.content();
                    try {
                        buffer.readBytes(streamOut, (int) blobData.getSize());
                    } finally {
                        buffer.release();
                    }
                    long latencyPerBlob = SystemTime.getInstance().nanoseconds() - startTimeGetBlob;
                    totalTimeTaken.addAndGet(latencyPerBlob);
                    latenciesForGetBlobs.add(latencyPerBlob);
                    totalReads.incrementAndGet();
                    totalNumberOfGetBlobs++;
                    totalLatencyForGetBlobs += latencyPerBlob;
                    if (enableVerboseLogging) {
                        System.out.println("Time taken to get blob id " + blobId + " in ms " + latencyPerBlob / SystemTime.NsPerMs);
                    }
                    if (latencyPerBlob > maxLatencyForGetBlobs) {
                        maxLatencyForGetBlobs = latencyPerBlob;
                    }
                    if (latencyPerBlob < minLatencyForGetBlobs) {
                        minLatencyForGetBlobs = latencyPerBlob;
                    }
                    if (totalLatencyForGetBlobs >= measurementIntervalNs) {
                        Collections.sort(latenciesForGetBlobs);
                        int index99 = (int) (latenciesForGetBlobs.size() * 0.99) - 1;
                        int index95 = (int) (latenciesForGetBlobs.size() * 0.95) - 1;
                        String message = totalNumberOfGetBlobs + "," + (double) latenciesForGetBlobs.get(index99) / SystemTime.NsPerSec + "," + (double) latenciesForGetBlobs.get(index95) / SystemTime.NsPerSec + "," + ((double) totalLatencyForGetBlobs / SystemTime.NsPerSec / totalNumberOfGetBlobs);
                        System.out.println(message);
                        writer.write(message + "\n");
                        totalLatencyForGetBlobs = 0;
                        latenciesForGetBlobs.clear();
                        totalNumberOfGetBlobs = 0;
                        maxLatencyForGetBlobs = 0;
                        minLatencyForGetBlobs = Long.MAX_VALUE;
                    }
                    partitionRequestInfoList.clear();
                    partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
                    partitionRequestInfoList.add(partitionRequestInfo);
                    GetRequest getRequestProperties = new GetRequest(1, "getperf", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
                    long startTimeGetBlobProperties = SystemTime.getInstance().nanoseconds();
                    channel.send(getRequestProperties);
                    DataInputStream receivePropertyStream = channel.receive().getInputStream();
                    GetResponse getResponseProperty = GetResponse.readFrom(receivePropertyStream, map);
                    BlobProperties blobProperties = MessageFormatRecord.deserializeBlobProperties(getResponseProperty.getInputStream());
                    long endTimeGetBlobProperties = SystemTime.getInstance().nanoseconds() - startTimeGetBlobProperties;
                    partitionRequestInfoList.clear();
                    partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
                    partitionRequestInfoList.add(partitionRequestInfo);
                    GetRequest getRequestUserMetadata = new GetRequest(1, "getperf", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
                    long startTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds();
                    channel.send(getRequestUserMetadata);
                    DataInputStream receiveUserMetadataStream = channel.receive().getInputStream();
                    GetResponse getResponseUserMetadata = GetResponse.readFrom(receiveUserMetadataStream, map);
                    ByteBuffer userMetadata = MessageFormatRecord.deserializeUserMetadata(getResponseUserMetadata.getInputStream());
                    long endTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds() - startTimeGetBlobUserMetadata;
                    // delete the blob
                    DeleteRequest deleteRequest = new DeleteRequest(0, "perf", blobId, System.currentTimeMillis());
                    channel.send(deleteRequest);
                    DeleteResponse deleteResponse = DeleteResponse.readFrom(channel.receive().getInputStream());
                    if (deleteResponse.getError() != ServerErrorCode.No_Error) {
                        throw new UnexpectedException("error " + deleteResponse.getError());
                    }
                    throttler.maybeThrottle(1);
                } finally {
                    if (channel != null) {
                        connectionPool.checkInConnection(channel);
                        channel = null;
                    }
                }
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        System.out.println("Error in server read performance " + e);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (Exception e) {
                System.out.println("Error when closing writer");
            }
        }
        if (connectionPool != null) {
            connectionPool.shutdown();
        }
    }
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) GetRequest(com.github.ambry.protocol.GetRequest) BlobData(com.github.ambry.messageformat.BlobData) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Throttler(com.github.ambry.utils.Throttler) UnexpectedException(java.rmi.UnexpectedException) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) ConnectedChannel(com.github.ambry.network.ConnectedChannel) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) DeleteResponse(com.github.ambry.protocol.DeleteResponse) AtomicLong(java.util.concurrent.atomic.AtomicLong) File(java.io.File) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) ConnectionPool(com.github.ambry.network.ConnectionPool) OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ConnectionPoolConfig(com.github.ambry.config.ConnectionPoolConfig) FileWriter(java.io.FileWriter) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ByteBuf(io.netty.buffer.ByteBuf) OptionParser(joptsimple.OptionParser) FileReader(java.io.FileReader) SSLConfig(com.github.ambry.config.SSLConfig) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) DataInputStream(java.io.DataInputStream) GetResponse(com.github.ambry.protocol.GetResponse) ByteBuffer(java.nio.ByteBuffer) UnexpectedException(java.rmi.UnexpectedException) ReplicaId(com.github.ambry.clustermap.ReplicaId) AtomicLong(java.util.concurrent.atomic.AtomicLong) ByteBufferOutputStream(com.github.ambry.utils.ByteBufferOutputStream) BlobProperties(com.github.ambry.messageformat.BlobProperties) BufferedReader(java.io.BufferedReader) OptionSet(joptsimple.OptionSet) BlobId(com.github.ambry.commons.BlobId) DeleteRequest(com.github.ambry.protocol.DeleteRequest)

Example 13 with BlobId

use of com.github.ambry.commons.BlobId in project ambry by linkedin.

the class AzureContainerCompactorIntegrationTest method testCompactAssignedDeprecatedContainers.

@Test
public void testCompactAssignedDeprecatedContainers() throws CloudStorageException, DocumentClientException {
    // Create a deprecated container.
    Set<Container> containers = generateContainers(1);
    cloudDestination.deprecateContainers(containers);
    verifyCosmosData(containers);
    verifyCheckpoint(containers);
    Container testContainer = containers.iterator().next();
    // Create blobs in the deprecated container and test partition.
    int numBlobs = 100;
    PartitionId partitionId = new MockPartitionId(testPartitionId, MockClusterMap.DEFAULT_PARTITION_CLASS);
    long creationTime = System.currentTimeMillis();
    Map<BlobId, byte[]> blobIdtoDataMap = createUnencryptedPermanentBlobs(numBlobs, dataCenterId, testContainer.getParentAccountId(), testContainer.getId(), partitionId, blobSize, cloudRequestAgent, cloudDestination, creationTime);
    // Assert that blobs exist.
    Map<String, CloudBlobMetadata> metadataMap = getBlobMetadataWithRetry(new ArrayList<>(blobIdtoDataMap.keySet()), partitionId.toPathString(), cloudRequestAgent, cloudDestination);
    assertEquals("Unexpected size of returned metadata map", numBlobs, metadataMap.size());
    // compact the deprecated container.
    cloudDestination.getContainerCompactor().compactAssignedDeprecatedContainers(Collections.singletonList(partitionId));
    // Assert that deprecated container's blobs don't exist anymore.
    assertTrue("Expected empty set after container compaction", getBlobMetadataWithRetry(new ArrayList<>(blobIdtoDataMap.keySet()), partitionId.toPathString(), cloudRequestAgent, cloudDestination).isEmpty());
    cleanup();
}
Also used : Container(com.github.ambry.account.Container) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) Test(org.junit.Test)

Example 14 with BlobId

use of com.github.ambry.commons.BlobId in project ambry by linkedin.

the class AzureCloudDestinationTest method testFindEntriesSinceWithUniqueUpdateTimes.

/**
 * Test findEntriesSince with all entries having unique updateTimes.
 * @throws Exception
 */
private void testFindEntriesSinceWithUniqueUpdateTimes(AzureCloudDestination azureDest) throws Exception {
    long chunkSize = 110000;
    // between 9 and 10 chunks
    long maxTotalSize = 1000000;
    long startTime = System.currentTimeMillis() - TimeUnit.DAYS.toMillis(1);
    int totalBlobs = 20;
    // create metadata list where total size > maxTotalSize
    List<Document> docList = new ArrayList<>();
    List<String> blobIdList = new ArrayList<>();
    for (int j = 0; j < totalBlobs; j++) {
        BlobId blobId = generateBlobId();
        blobIdList.add(blobId.getID());
        CloudBlobMetadata inputMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, chunkSize, CloudBlobMetadata.EncryptionOrigin.NONE);
        inputMetadata.setUploadTime(startTime + j);
        docList.add(AzureTestUtils.createDocumentFromCloudBlobMetadata(inputMetadata, startTime + j));
    }
    Observable<FeedResponse<Document>> mockResponse = mock(Observable.class);
    mockObservableForQuery(docList, mockResponse);
    when(mockumentClient.queryDocuments(anyString(), any(SqlQuerySpec.class), any(FeedOptions.class))).thenReturn(mockResponse);
    CosmosUpdateTimeFindToken findToken = new CosmosUpdateTimeFindToken();
    // Run the query
    FindResult findResult = azureDest.findEntriesSince(blobId.getPartition().toPathString(), findToken, maxTotalSize);
    List<CloudBlobMetadata> firstResult = findResult.getMetadataList();
    findToken = (CosmosUpdateTimeFindToken) findResult.getUpdatedFindToken();
    assertEquals("Did not get expected doc count", maxTotalSize / chunkSize, firstResult.size());
    docList = docList.subList(firstResult.size(), docList.size());
    assertEquals("Find token has wrong last update time", findToken.getLastUpdateTime(), firstResult.get(firstResult.size() - 1).getLastUpdateTime());
    assertEquals("Find token has wrong lastUpdateTimeReadBlobIds", findToken.getLastUpdateTimeReadBlobIds(), new HashSet<>(Collections.singletonList(firstResult.get(firstResult.size() - 1).getId())));
    mockObservableForQuery(docList, mockResponse);
    findResult = azureDest.findEntriesSince(blobId.getPartition().toPathString(), findToken, maxTotalSize);
    List<CloudBlobMetadata> secondResult = findResult.getMetadataList();
    findToken = (CosmosUpdateTimeFindToken) findResult.getUpdatedFindToken();
    assertEquals("Unexpected doc count", maxTotalSize / chunkSize, secondResult.size());
    assertEquals("Unexpected first blobId", blobIdList.get(firstResult.size()), secondResult.get(0).getId());
    mockObservableForQuery(docList, mockResponse);
    findResult = azureDest.findEntriesSince(blobId.getPartition().toPathString(), findToken, chunkSize / 2);
    // Rerun with max size below blob size, and make sure it returns one result
    assertEquals("Expected one result", 1, findResult.getMetadataList().size());
}
Also used : CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) ArrayList(java.util.ArrayList) FeedResponse(com.microsoft.azure.cosmosdb.FeedResponse) Document(com.microsoft.azure.cosmosdb.Document) SqlQuerySpec(com.microsoft.azure.cosmosdb.SqlQuerySpec) FeedOptions(com.microsoft.azure.cosmosdb.FeedOptions) BlobId(com.github.ambry.commons.BlobId) FindResult(com.github.ambry.cloud.FindResult)

Example 15 with BlobId

use of com.github.ambry.commons.BlobId in project ambry by linkedin.

the class AzureCloudDestinationTest method testFindEntriesSinceWithNonUniqueUpdateTimes.

/**
 * Test findEntriesSince with entries having non unique updateTimes.
 * @throws Exception
 */
private void testFindEntriesSinceWithNonUniqueUpdateTimes(AzureCloudDestination azureDest) throws Exception {
    long chunkSize = 110000;
    // between 9 and 10 chunks
    long maxTotalSize = 1000000;
    long startTime = System.currentTimeMillis() - TimeUnit.DAYS.toMillis(1);
    int totalBlobs = 20;
    // create metadata list where total size > maxTotalSize
    List<Document> docList = new ArrayList<>();
    List<String> blobIdList = new ArrayList<>();
    for (int j = 0; j < totalBlobs - 1; j++) {
        BlobId blobId = generateBlobId();
        blobIdList.add(blobId.getID());
        CloudBlobMetadata inputMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, chunkSize, CloudBlobMetadata.EncryptionOrigin.NONE);
        docList.add(AzureTestUtils.createDocumentFromCloudBlobMetadata(inputMetadata, startTime));
    }
    BlobId blobId = generateBlobId();
    blobIdList.add(blobId.getID());
    CloudBlobMetadata inputMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, chunkSize, CloudBlobMetadata.EncryptionOrigin.NONE);
    docList.add(AzureTestUtils.createDocumentFromCloudBlobMetadata(inputMetadata, startTime + 1));
    Observable<FeedResponse<Document>> mockResponse = mock(Observable.class);
    mockObservableForQuery(docList, mockResponse);
    when(mockumentClient.queryDocuments(anyString(), any(SqlQuerySpec.class), any(FeedOptions.class))).thenReturn(mockResponse);
    CosmosUpdateTimeFindToken findToken = new CosmosUpdateTimeFindToken();
    // Run the query
    FindResult findResult = azureDest.findEntriesSince(blobId.getPartition().toPathString(), findToken, maxTotalSize);
    List<CloudBlobMetadata> firstResult = findResult.getMetadataList();
    findToken = (CosmosUpdateTimeFindToken) findResult.getUpdatedFindToken();
    assertEquals("Did not get expected doc count", maxTotalSize / chunkSize, firstResult.size());
    assertEquals("Find token has wrong last update time", findToken.getLastUpdateTime(), firstResult.get(firstResult.size() - 1).getLastUpdateTime());
    Set<String> resultBlobIdSet = firstResult.stream().map(CloudBlobMetadata::getId).collect(Collectors.toSet());
    assertEquals("Find token has wrong lastUpdateTimeReadBlobIds", findToken.getLastUpdateTimeReadBlobIds(), resultBlobIdSet);
    mockObservableForQuery(docList, mockResponse);
    findResult = azureDest.findEntriesSince(blobId.getPartition().toPathString(), findToken, maxTotalSize);
    List<CloudBlobMetadata> secondResult = findResult.getMetadataList();
    CosmosUpdateTimeFindToken secondFindToken = (CosmosUpdateTimeFindToken) findResult.getUpdatedFindToken();
    assertEquals("Unexpected doc count", maxTotalSize / chunkSize, secondResult.size());
    assertEquals("Unexpected first blobId", blobIdList.get(firstResult.size()), secondResult.get(0).getId());
    assertEquals("Find token has wrong last update time", secondFindToken.getLastUpdateTime(), firstResult.get(firstResult.size() - 1).getLastUpdateTime());
    resultBlobIdSet.addAll(secondResult.stream().map(CloudBlobMetadata::getId).collect(Collectors.toSet()));
    assertEquals("Find token has wrong lastUpdateTimeReadBlobIds", secondFindToken.getLastUpdateTimeReadBlobIds(), resultBlobIdSet);
    mockObservableForQuery(docList, mockResponse);
    // Rerun with max size below blob size, and make sure it returns one result
    findResult = azureDest.findEntriesSince(blobId.getPartition().toPathString(), findToken, chunkSize / 2);
    List<CloudBlobMetadata> finalResult = findResult.getMetadataList();
    assertEquals("Expected one result", 1, finalResult.size());
    mockObservableForQuery(docList, mockResponse);
    // Rerun final time, and make sure that it returns all the remaining blobs
    findResult = azureDest.findEntriesSince(blobId.getPartition().toPathString(), secondFindToken, maxTotalSize);
    List<CloudBlobMetadata> thirdResult = findResult.getMetadataList();
    CosmosUpdateTimeFindToken thirdFindToken = (CosmosUpdateTimeFindToken) findResult.getUpdatedFindToken();
    assertEquals("Unexpected doc count", totalBlobs - (firstResult.size() + secondResult.size()), thirdResult.size());
    assertEquals("Unexpected first blobId", blobIdList.get(firstResult.size() + secondResult.size()), thirdResult.get(0).getId());
    assertEquals("Find token has wrong last update time", thirdFindToken.getLastUpdateTime(), startTime + 1);
    assertEquals("Find token has wrong lastUpdateTimeReadBlobIds", thirdFindToken.getLastUpdateTimeReadBlobIds(), new HashSet<>(Collections.singletonList(thirdResult.get(thirdResult.size() - 1).getId())));
}
Also used : CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) ArrayList(java.util.ArrayList) FeedResponse(com.microsoft.azure.cosmosdb.FeedResponse) Document(com.microsoft.azure.cosmosdb.Document) SqlQuerySpec(com.microsoft.azure.cosmosdb.SqlQuerySpec) FeedOptions(com.microsoft.azure.cosmosdb.FeedOptions) BlobId(com.github.ambry.commons.BlobId) FindResult(com.github.ambry.cloud.FindResult)

Aggregations

BlobId (com.github.ambry.commons.BlobId)192 ArrayList (java.util.ArrayList)83 Test (org.junit.Test)75 PartitionId (com.github.ambry.clustermap.PartitionId)52 VerifiableProperties (com.github.ambry.config.VerifiableProperties)45 BlobProperties (com.github.ambry.messageformat.BlobProperties)45 IOException (java.io.IOException)43 ByteBuffer (java.nio.ByteBuffer)43 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)40 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)40 HashMap (java.util.HashMap)40 DataInputStream (java.io.DataInputStream)39 MessageInfo (com.github.ambry.store.MessageInfo)36 List (java.util.List)34 Properties (java.util.Properties)34 CloudBlobMetadata (com.github.ambry.cloud.CloudBlobMetadata)31 Map (java.util.Map)30 GetResponse (com.github.ambry.protocol.GetResponse)28 PartitionRequestInfo (com.github.ambry.protocol.PartitionRequestInfo)28 StoreKey (com.github.ambry.store.StoreKey)26