Search in sources :

Example 96 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class ServerWritePerformance method main.

public static void main(String[] args) {
    FileWriter blobIdsWriter = null;
    FileWriter performanceWriter = null;
    ConnectionPool connectionPool = null;
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> numberOfWritersOpt = parser.accepts("numberOfWriters", "The number of writers that issue put request").withRequiredArg().describedAs("The number of writers").ofType(Integer.class).defaultsTo(4);
        ArgumentAcceptingOptionSpec<Integer> minBlobSizeOpt = parser.accepts("minBlobSizeInBytes", "The minimum size of the blob that can be put").withRequiredArg().describedAs("The minimum blob size in bytes").ofType(Integer.class).defaultsTo(51200);
        ArgumentAcceptingOptionSpec<Integer> maxBlobSizeOpt = parser.accepts("maxBlobSizeInBytes", "The maximum size of the blob that can be put").withRequiredArg().describedAs("The maximum blob size in bytes").ofType(Integer.class).defaultsTo(4194304);
        ArgumentAcceptingOptionSpec<Integer> writesPerSecondOpt = parser.accepts("writesPerSecond", "The rate at which writes need to be performed").withRequiredArg().describedAs("The number of writes per second").ofType(Integer.class).defaultsTo(1000);
        ArgumentAcceptingOptionSpec<Long> measurementIntervalOpt = parser.accepts("measurementInterval", "The interval in second to report performance result").withOptionalArg().describedAs("The CPU time spent for putting blobs, not wall time").ofType(Long.class).defaultsTo(300L);
        ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
        ArgumentAcceptingOptionSpec<String> sslEnabledDatacentersOpt = parser.accepts("sslEnabledDatacenters", "Datacenters to which ssl should be enabled").withOptionalArg().describedAs("Comma separated list").ofType(String.class).defaultsTo("");
        ArgumentAcceptingOptionSpec<String> sslKeystorePathOpt = parser.accepts("sslKeystorePath", "SSL key store path").withOptionalArg().describedAs("The file path of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeystoreTypeOpt = parser.accepts("sslKeystoreType", "SSL key store type").withOptionalArg().describedAs("The type of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslTruststorePathOpt = parser.accepts("sslTruststorePath", "SSL trust store path").withOptionalArg().describedAs("The file path of SSL trust store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeystorePasswordOpt = parser.accepts("sslKeystorePassword", "SSL key store password").withOptionalArg().describedAs("The password of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeyPasswordOpt = parser.accepts("sslKeyPassword", "SSL key password").withOptionalArg().describedAs("The password of SSL private key").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslTruststorePasswordOpt = parser.accepts("sslTruststorePassword", "SSL trust store password").withOptionalArg().describedAs("The password of SSL trust store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslCipherSuitesOpt = parser.accepts("sslCipherSuites", "SSL enabled cipher suites").withOptionalArg().describedAs("Comma separated list").defaultsTo("TLS_RSA_WITH_AES_128_CBC_SHA").ofType(String.class);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<>();
        listOpt.add(hardwareLayoutOpt);
        listOpt.add(partitionLayoutOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        long measurementIntervalNs = options.valueOf(measurementIntervalOpt) * SystemTime.NsPerSec;
        ToolUtils.validateSSLOptions(options, parser, sslEnabledDatacentersOpt, sslKeystorePathOpt, sslKeystoreTypeOpt, sslTruststorePathOpt, sslKeystorePasswordOpt, sslKeyPasswordOpt, sslTruststorePasswordOpt);
        String sslEnabledDatacenters = options.valueOf(sslEnabledDatacentersOpt);
        Properties sslProperties;
        if (sslEnabledDatacenters.length() != 0) {
            sslProperties = ToolUtils.createSSLProperties(sslEnabledDatacenters, options.valueOf(sslKeystorePathOpt), options.valueOf(sslKeystoreTypeOpt), options.valueOf(sslKeystorePasswordOpt), options.valueOf(sslKeyPasswordOpt), options.valueOf(sslTruststorePathOpt), options.valueOf(sslTruststorePasswordOpt), options.valueOf(sslCipherSuitesOpt));
        } else {
            sslProperties = new Properties();
        }
        ToolUtils.addClusterMapProperties(sslProperties);
        int numberOfWriters = options.valueOf(numberOfWritersOpt);
        int writesPerSecond = options.valueOf(writesPerSecondOpt);
        boolean enableVerboseLogging = options.has(verboseLoggingOpt) ? true : false;
        int minBlobSize = options.valueOf(minBlobSizeOpt);
        int maxBlobSize = options.valueOf(maxBlobSizeOpt);
        if (enableVerboseLogging) {
            System.out.println("Enabled verbose logging");
        }
        final AtomicLong totalTimeTaken = new AtomicLong(0);
        final AtomicLong totalWrites = new AtomicLong(0);
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(sslProperties));
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        File logFile = new File(System.getProperty("user.dir"), "writeperflog");
        blobIdsWriter = new FileWriter(logFile);
        File performanceFile = new File(System.getProperty("user.dir"), "writeperfresult");
        performanceWriter = new FileWriter(performanceFile);
        final CountDownLatch latch = new CountDownLatch(numberOfWriters);
        final AtomicBoolean shutdown = new AtomicBoolean(false);
        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread() {

            public void run() {
                try {
                    System.out.println("Shutdown invoked");
                    shutdown.set(true);
                    latch.await();
                    System.out.println("Total writes : " + totalWrites.get() + "  Total time taken : " + totalTimeTaken.get() + " Nano Seconds  Average time taken per write " + ((double) totalTimeTaken.get()) / SystemTime.NsPerSec / totalWrites.get() + " Seconds");
                } catch (Exception e) {
                    System.out.println("Error while shutting down " + e);
                }
            }
        });
        Throttler throttler = new Throttler(writesPerSecond, 100, true, SystemTime.getInstance());
        Thread[] threadIndexPerf = new Thread[numberOfWriters];
        ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(new VerifiableProperties(new Properties()));
        VerifiableProperties vProps = new VerifiableProperties(sslProperties);
        SSLConfig sslConfig = new SSLConfig(vProps);
        clusterMapConfig = new ClusterMapConfig(vProps);
        connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, new MetricRegistry());
        connectionPool.start();
        for (int i = 0; i < numberOfWriters; i++) {
            threadIndexPerf[i] = new Thread(new ServerWritePerfRun(i, throttler, shutdown, latch, minBlobSize, maxBlobSize, blobIdsWriter, performanceWriter, totalTimeTaken, totalWrites, measurementIntervalNs, enableVerboseLogging, map, connectionPool));
            threadIndexPerf[i].start();
        }
        for (int i = 0; i < numberOfWriters; i++) {
            threadIndexPerf[i].join();
        }
    } catch (Exception e) {
        System.err.println("Error on exit " + e);
    } finally {
        if (blobIdsWriter != null) {
            try {
                blobIdsWriter.close();
            } catch (Exception e) {
                System.out.println("Error when closing the blob id writer");
            }
        }
        if (performanceWriter != null) {
            try {
                performanceWriter.close();
            } catch (Exception e) {
                System.out.println("Error when closing the performance writer");
            }
        }
        if (connectionPool != null) {
            connectionPool.shutdown();
        }
    }
}
Also used : BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) ConnectionPool(com.github.ambry.network.ConnectionPool) OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ClusterMap(com.github.ambry.clustermap.ClusterMap) ConnectionPoolConfig(com.github.ambry.config.ConnectionPoolConfig) FileWriter(java.io.FileWriter) ArrayList(java.util.ArrayList) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) OptionParser(joptsimple.OptionParser) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Throttler(com.github.ambry.utils.Throttler) SSLConfig(com.github.ambry.config.SSLConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) CountDownLatch(java.util.concurrent.CountDownLatch) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) UnexpectedException(java.rmi.UnexpectedException) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) OptionSet(joptsimple.OptionSet) File(java.io.File)

Example 97 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class BlobValidator method main.

/**
 * Runs the BlobValidator
 * @param args associated arguments.
 * @throws Exception
 */
public static void main(String[] args) throws Exception {
    VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
    BlobValidatorConfig config = new BlobValidatorConfig(verifiableProperties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap();
    List<BlobId> blobIds = getBlobIds(config, clusterMap);
    SSLFactory sslFactory = !clusterMapConfig.clusterMapSslEnabledDatacenters.isEmpty() ? SSLFactory.getNewInstance(new SSLConfig(verifiableProperties)) : null;
    StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
    BlobValidator validator = new BlobValidator(clusterMap, config.replicasToContactPerSec, sslFactory, verifiableProperties);
    LOGGER.info("Validation starting");
    switch(config.typeOfOperation) {
        case ValidateBlobOnAllReplicas:
            Map<BlobId, List<String>> mismatchDetailsMap = validator.validateBlobsOnAllReplicas(blobIds, config.getOption, clusterMap, storeKeyFactory);
            logMismatches(mismatchDetailsMap);
            break;
        case ValidateBlobOnDatacenter:
            if (config.datacenter.isEmpty() || !clusterMap.hasDatacenter(config.datacenter)) {
                throw new IllegalArgumentException("Please provide a valid datacenter");
            }
            mismatchDetailsMap = validator.validateBlobsOnDatacenter(config.datacenter, blobIds, config.getOption, clusterMap, storeKeyFactory);
            logMismatches(mismatchDetailsMap);
            break;
        case ValidateBlobOnReplica:
            DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
            if (dataNodeId == null) {
                throw new IllegalArgumentException("Could not find a data node corresponding to " + config.hostname + ":" + config.port);
            }
            List<ServerErrorCode> validErrorCodes = Arrays.asList(ServerErrorCode.No_Error, ServerErrorCode.Blob_Deleted, ServerErrorCode.Blob_Expired);
            Map<BlobId, ServerErrorCode> blobIdToErrorCode = validator.validateBlobsOnReplica(dataNodeId, blobIds, config.getOption, clusterMap, storeKeyFactory);
            for (Map.Entry<BlobId, ServerErrorCode> entry : blobIdToErrorCode.entrySet()) {
                ServerErrorCode errorCode = entry.getValue();
                if (!validErrorCodes.contains(errorCode)) {
                    LOGGER.error("[{}] received error code: {}", entry.getKey(), errorCode);
                }
            }
            break;
        default:
            throw new IllegalStateException("Recognized but unsupported operation: " + config.typeOfOperation);
    }
    LOGGER.info("Validation complete");
    validator.close();
    clusterMap.close();
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) SSLConfig(com.github.ambry.config.SSLConfig) SSLFactory(com.github.ambry.commons.SSLFactory) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ServerErrorCode(com.github.ambry.server.ServerErrorCode) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) ArrayList(java.util.ArrayList) List(java.util.List) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) BlobId(com.github.ambry.commons.BlobId) DataNodeId(com.github.ambry.clustermap.DataNodeId) HashMap(java.util.HashMap) Map(java.util.Map) ClusterMap(com.github.ambry.clustermap.ClusterMap)

Example 98 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class DirectoryUploader method main.

public static void main(String[] args) {
    FileWriter writer = null;
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<String> rootDirectoryOpt = parser.accepts("rootDirectory", "The root folder from which all the files will be migrated").withRequiredArg().describedAs("root_directory").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
        ArgumentAcceptingOptionSpec<String> partitionOpt = parser.accepts("partition", "The partition to which the put calls to be made against").withRequiredArg().describedAs("partition").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> datacenterOpt = parser.accepts("datacenter", "The datacenter to which the put calls to be made against").withRequiredArg().describedAs("datacenter").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> outFileOpt = parser.accepts("outFile", "The file to which output should be redirected").withRequiredArg().describedAs("outFile").ofType(String.class);
        // Optional arguments for defining a specific node to write to.
        ArgumentAcceptingOptionSpec<String> nodeHostnameOpt = parser.accepts("nodeHostname", "The hostname of the node to put to (if specifying single node)").withOptionalArg().describedAs("nodeHostname").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> nodePortOpt = parser.accepts("nodePort", "The port of the node to put to (if specifying single node)").withOptionalArg().describedAs("nodePort").ofType(Integer.class);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<>();
        listOpt.add(rootDirectoryOpt);
        listOpt.add(hardwareLayoutOpt);
        listOpt.add(partitionLayoutOpt);
        listOpt.add(partitionOpt);
        listOpt.add(datacenterOpt);
        listOpt.add(outFileOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        System.out.println("Starting to parse arguments");
        boolean enableVerboseLogging = options.has(verboseLoggingOpt);
        if (enableVerboseLogging) {
            System.out.println("Enabled verbose logging");
        }
        String rootDirectory = options.valueOf(rootDirectoryOpt);
        if (enableVerboseLogging) {
            System.out.println("Parsed rootdir " + rootDirectory);
        }
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        if (enableVerboseLogging) {
            System.out.println("Parsed Hardware layout " + hardwareLayoutPath);
        }
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        if (enableVerboseLogging) {
            System.out.println("Parsed partition layout " + partitionLayoutPath);
        }
        String partition = options.valueOf(partitionOpt);
        if (enableVerboseLogging) {
            System.out.println("Parsed partition " + partition);
        }
        partition = "Partition[" + partition + "]";
        String datacenter = options.valueOf(datacenterOpt);
        if (enableVerboseLogging) {
            System.out.println("Parsed datacenter " + datacenter);
        }
        String nodeHostname = options.valueOf(nodeHostnameOpt);
        if (enableVerboseLogging && nodeHostname != null) {
            System.out.println("Parsed node hostname " + nodeHostname);
        }
        Integer nodePort = options.valueOf(nodePortOpt);
        if (enableVerboseLogging && nodePort != null) {
            System.out.println("Parsed node port " + nodePort);
        }
        String outFile = options.valueOf(outFileOpt);
        if (enableVerboseLogging) {
            System.out.println("Parsed outFile " + outFile);
            System.out.println("Done parsing all args");
        }
        VerifiableProperties vprops = new VerifiableProperties((new Properties()));
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(vprops);
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        File logFile = new File(outFile);
        writer = new FileWriter(logFile);
        DirectoryUploader directoryUploader = new DirectoryUploader();
        directoryUploader.setPartitionId(map, partition, enableVerboseLogging);
        if (nodeHostname != null && nodePort != null) {
            directoryUploader.setDataNodeId(map, nodeHostname, nodePort, enableVerboseLogging);
        }
        directoryUploader.walkDirectoryToCreateBlobs(rootDirectory, writer, datacenter, map.getLocalDatacenterId(), enableVerboseLogging);
    } catch (Exception e) {
        System.err.println("Error on exit " + e);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (Exception e) {
                System.out.println("Error when closing the writer");
            }
        }
    }
}
Also used : OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ClusterMap(com.github.ambry.clustermap.ClusterMap) VerifiableProperties(com.github.ambry.config.VerifiableProperties) FileWriter(java.io.FileWriter) ArrayList(java.util.ArrayList) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) OptionParser(joptsimple.OptionParser) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) OptionSet(joptsimple.OptionSet) File(java.io.File)

Example 99 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class IndexReadPerformance method main.

public static void main(String[] args) {
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<String> logToReadOpt = parser.accepts("logToRead", "The log that needs to be replayed for traffic").withRequiredArg().describedAs("log_to_read").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> numberOfReadersOpt = parser.accepts("numberOfReaders", "The number of readers that read to a random index concurrently").withRequiredArg().describedAs("The number of readers").ofType(Integer.class).defaultsTo(4);
        ArgumentAcceptingOptionSpec<Integer> readsPerSecondOpt = parser.accepts("readsPerSecond", "The rate at which reads need to be performed").withRequiredArg().describedAs("The number of reads per second").ofType(Integer.class).defaultsTo(1000);
        ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<>();
        listOpt.add(logToReadOpt);
        listOpt.add(hardwareLayoutOpt);
        listOpt.add(partitionLayoutOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        String logToRead = options.valueOf(logToReadOpt);
        int numberOfReaders = options.valueOf(numberOfReadersOpt);
        int readsPerSecond = options.valueOf(readsPerSecondOpt);
        boolean enableVerboseLogging = options.has(verboseLoggingOpt);
        if (enableVerboseLogging) {
            System.out.println("Enabled verbose logging");
        }
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(new Properties()));
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        StoreKeyFactory factory = new BlobIdFactory(map);
        // Read the log and get the index directories and create the indexes
        final BufferedReader br = new BufferedReader(new FileReader(logToRead));
        final HashMap<String, IndexPayload> hashes = new HashMap<String, IndexPayload>();
        String line;
        MetricRegistry metricRegistry = new MetricRegistry();
        StoreMetrics metrics = new StoreMetrics(metricRegistry);
        ScheduledExecutorService s = Utils.newScheduler(numberOfReaders, "index", true);
        DiskSpaceAllocator diskSpaceAllocator = new DiskSpaceAllocator(false, null, 0, new StorageManagerMetrics(metricRegistry));
        Properties props = new Properties();
        props.setProperty("store.index.memory.size.bytes", "1048576");
        props.setProperty("store.segment.size.in.bytes", "1000");
        StoreConfig config = new StoreConfig(new VerifiableProperties(props));
        Log log = new Log(System.getProperty("user.dir"), 1000, diskSpaceAllocator, config, metrics, null);
        final AtomicLong totalTimeTaken = new AtomicLong(0);
        final AtomicLong totalReads = new AtomicLong(0);
        final CountDownLatch latch = new CountDownLatch(numberOfReaders);
        final AtomicBoolean shutdown = new AtomicBoolean(false);
        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread() {

            public void run() {
                try {
                    System.out.println("Shutdown invoked");
                    shutdown.set(true);
                    latch.await();
                    System.out.println("Total reads : " + totalReads.get() + "  Total time taken : " + totalTimeTaken.get() + " Nano Seconds  Average time taken per read " + ((double) totalReads.get() / totalTimeTaken.get()) / SystemTime.NsPerSec + " Seconds");
                } catch (Exception e) {
                    System.out.println("Error while shutting down " + e);
                }
            }
        });
        ScheduledExecutorService scheduleReadLog = Utils.newScheduler(1, true);
        while ((line = br.readLine()) != null) {
            if (line.startsWith("logdir")) {
                String[] logdirs = line.split("-");
                BlobIndexMetrics metricIndex = new BlobIndexMetrics(logdirs[1], s, log, enableVerboseLogging, totalReads, totalTimeTaken, totalReads, config, null, factory);
                hashes.put(logdirs[1], new IndexPayload(metricIndex, new HashSet<String>()));
            } else {
                break;
            }
        }
        // read next batch of ids after 2 minutes
        scheduleReadLog.scheduleAtFixedRate(new Runnable() {

            @Override
            public void run() {
                populateIds(br, hashes);
            }
        }, 0, 120, TimeUnit.SECONDS);
        Throttler throttler = new Throttler(readsPerSecond, 100, true, SystemTime.getInstance());
        Thread[] threadIndexPerf = new Thread[numberOfReaders];
        for (int i = 0; i < numberOfReaders; i++) {
            threadIndexPerf[i] = new Thread(new IndexReadPerfRun(hashes, throttler, shutdown, latch, map));
            threadIndexPerf[i].start();
        }
        for (int i = 0; i < numberOfReaders; i++) {
            threadIndexPerf[i].join();
        }
        br.close();
    } catch (Exception e) {
        System.out.println("Exiting process with exception " + e);
    }
}
Also used : OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ClusterMap(com.github.ambry.clustermap.ClusterMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) OptionParser(joptsimple.OptionParser) FileReader(java.io.FileReader) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashSet(java.util.HashSet) Throttler(com.github.ambry.utils.Throttler) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) CountDownLatch(java.util.concurrent.CountDownLatch) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) BufferedReader(java.io.BufferedReader) StoreConfig(com.github.ambry.config.StoreConfig) OptionSet(joptsimple.OptionSet)

Example 100 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class HardDeleteVerifier method main.

public static void main(String[] args) {
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> dataDirOpt = parser.accepts("dataDir", "The data directory of the partition/replica that needs to be verified for hard deletes.").withRequiredArg().describedAs("data_dir").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> oldDataDirOpt = parser.accepts("oldDataDir", "[Optional] The data directory of the partition/replica before hard deletes are run for comparison").withOptionalArg().describedAs("old_data_dir").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> outFileOpt = parser.accepts("outFile", "Output file to redirect to ").withRequiredArg().describedAs("outFile").ofType(String.class);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> requiredOpts = new ArrayList<>();
        requiredOpts.add(hardwareLayoutOpt);
        requiredOpts.add(partitionLayoutOpt);
        requiredOpts.add(dataDirOpt);
        requiredOpts.add(outFileOpt);
        ToolUtils.ensureOrExit(requiredOpts, options, parser);
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(new Properties()));
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        String dataDir = options.valueOf(dataDirOpt);
        String oldDataDir = options.has(oldDataDirOpt) ? options.valueOf(oldDataDirOpt) : null;
        String outFile = options.valueOf(outFileOpt);
        HardDeleteVerifier hardDeleteVerifier = new HardDeleteVerifier(map, dataDir, oldDataDir, outFile);
        hardDeleteVerifier.verifyHardDeletes();
    } catch (Exception e) {
        e.printStackTrace();
    }
}
Also used : OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ClusterMap(com.github.ambry.clustermap.ClusterMap) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ArrayList(java.util.ArrayList) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) OptionParser(joptsimple.OptionParser) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) OptionSet(joptsimple.OptionSet)

Aggregations

ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)100 VerifiableProperties (com.github.ambry.config.VerifiableProperties)81 Test (org.junit.Test)56 Properties (java.util.Properties)52 MetricRegistry (com.codahale.metrics.MetricRegistry)47 ArrayList (java.util.ArrayList)31 IOException (java.io.IOException)26 HashSet (java.util.HashSet)25 JSONObject (org.json.JSONObject)25 File (java.io.File)24 ClusterMap (com.github.ambry.clustermap.ClusterMap)23 HashMap (java.util.HashMap)21 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)19 ClusterAgentsFactory (com.github.ambry.clustermap.ClusterAgentsFactory)18 DataNodeId (com.github.ambry.clustermap.DataNodeId)18 StoreConfig (com.github.ambry.config.StoreConfig)18 ReplicaId (com.github.ambry.clustermap.ReplicaId)16 List (java.util.List)16 Map (java.util.Map)16 CountDownLatch (java.util.concurrent.CountDownLatch)16