Search in sources :

Example 56 with StoreConfig

use of com.github.ambry.config.StoreConfig in project ambry by linkedin.

the class StoreTestUtils method createStoreConfig.

/**
 * Create store config with given segment size.
 * @param segmentSize the size of each log segment
 * @param setFilePermission {@code true} if setting file permission is enabled. {@code false} otherwise.
 * @return {@link StoreConfig}
 */
static StoreConfig createStoreConfig(long segmentSize, boolean setFilePermission) {
    Properties properties = new Properties();
    properties.setProperty("store.segment.size.in.bytes", Long.toString(segmentSize));
    properties.setProperty("store.set.file.permission.enabled", Boolean.toString(setFilePermission));
    return new StoreConfig(new VerifiableProperties(properties));
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) StoreConfig(com.github.ambry.config.StoreConfig) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties)

Example 57 with StoreConfig

use of com.github.ambry.config.StoreConfig in project ambry by linkedin.

the class CompactionVerifier method main.

/**
 * Main function to trigger the verifier.
 * @param args CLI arguments
 * @throws Exception if the verifier encountered problems.
 */
public static void main(String[] args) throws Exception {
    VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
    CompactionVerifierConfig verifierConfig = new CompactionVerifierConfig(verifiableProperties);
    StoreConfig storeConfig = new StoreConfig(verifiableProperties);
    assert !storeConfig.storeEnableHardDelete : "Hard delete cannot be enabled in the properties";
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    ClusterMap clusterMap = new StaticClusterAgentsFactory(clusterMapConfig, verifierConfig.hardwareLayoutFilePath, verifierConfig.partitionLayoutFilePath).getClusterMap();
    StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
    try (CompactionVerifier compactionVerifier = new CompactionVerifier(verifierConfig, storeConfig, storeKeyFactory)) {
        compactionVerifier.verifyCompaction();
    }
    LOGGER.info("Verification completed successfully");
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) StaticClusterAgentsFactory(com.github.ambry.clustermap.StaticClusterAgentsFactory) VerifiableProperties(com.github.ambry.config.VerifiableProperties) StoreConfig(com.github.ambry.config.StoreConfig) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig)

Example 58 with StoreConfig

use of com.github.ambry.config.StoreConfig in project ambry by linkedin.

the class DiskReformatter method main.

public static void main(String[] args) throws Exception {
    VerifiableProperties properties = ToolUtils.getVerifiableProperties(args);
    DiskReformatterConfig config = new DiskReformatterConfig(properties);
    StoreConfig storeConfig = new StoreConfig(properties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
    ServerConfig serverConfig = new ServerConfig(properties);
    ClusterAgentsFactory clusterAgentsFactory = Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath);
    try (ClusterMap clusterMap = clusterAgentsFactory.getClusterMap()) {
        StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, clusterMap.getMetricRegistry());
        StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
        DataNodeId dataNodeId = clusterMap.getDataNodeId(config.datanodeHostname, config.datanodePort);
        if (dataNodeId == null) {
            throw new IllegalArgumentException("Did not find node in clustermap with hostname:port - " + config.datanodeHostname + ":" + config.datanodePort);
        }
        DiskReformatter reformatter = new DiskReformatter(dataNodeId, Collections.EMPTY_LIST, config.fetchSizeInBytes, storeConfig, storeKeyFactory, clusterMap, SystemTime.getInstance(), storeKeyConverterFactory.getStoreKeyConverter());
        AtomicInteger exitStatus = new AtomicInteger(0);
        CountDownLatch latch = new CountDownLatch(config.diskMountPaths.length);
        for (int i = 0; i < config.diskMountPaths.length; i++) {
            int finalI = i;
            Runnable runnable = () -> {
                try {
                    reformatter.reformat(config.diskMountPaths[finalI], new File(config.scratchPaths[finalI]));
                    latch.countDown();
                } catch (Exception e) {
                    throw new IllegalStateException(e);
                }
            };
            Thread thread = Utils.newThread(config.diskMountPaths[finalI] + "-reformatter", runnable, true);
            thread.setUncaughtExceptionHandler((t, e) -> {
                exitStatus.set(1);
                logger.error("Reformatting {} failed", config.diskMountPaths[finalI], e);
                latch.countDown();
            });
            thread.start();
        }
        latch.await();
        System.exit(exitStatus.get());
    }
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) VerifiableProperties(com.github.ambry.config.VerifiableProperties) CountDownLatch(java.util.concurrent.CountDownLatch) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ServerConfig(com.github.ambry.config.ServerConfig) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StoreConfig(com.github.ambry.config.StoreConfig) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) DataNodeId(com.github.ambry.clustermap.DataNodeId) File(java.io.File)

Example 59 with StoreConfig

use of com.github.ambry.config.StoreConfig in project ambry by linkedin.

the class DumpIndexTool method main.

public static void main(String[] args) throws Exception {
    final AtomicInteger exitCode = new AtomicInteger(0);
    VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
    DumpIndexToolConfig config = new DumpIndexToolConfig(verifiableProperties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    try (ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap()) {
        StoreConfig storeConfig = new StoreConfig(verifiableProperties);
        // this tool supports only blob IDs. It can become generic if StoreKeyFactory provides a deserFromString method.
        BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
        StoreToolsMetrics metrics = new StoreToolsMetrics(clusterMap.getMetricRegistry());
        StoreMetrics storeMetrics = new StoreMetrics("DumpIndexTool", clusterMap.getMetricRegistry());
        ServerConfig serverConfig = new ServerConfig(verifiableProperties);
        Time time = SystemTime.getInstance();
        Throttler throttler = new Throttler(config.indexEntriesToProcessPerSec, 1000, true, time);
        StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, verifiableProperties, clusterMap.getMetricRegistry());
        DumpIndexTool dumpIndexTool = new DumpIndexTool(blobIdFactory, storeConfig, time, metrics, storeMetrics, throttler, storeKeyConverterFactory.getStoreKeyConverter());
        Set<StoreKey> filterKeySet = new HashSet<>();
        for (String key : config.filterSet) {
            filterKeySet.add(new BlobId(key, clusterMap));
        }
        switch(config.typeOfOperation) {
            case DumpIndex:
                dumpIndex(dumpIndexTool, config.pathOfInput, filterKeySet);
                break;
            case DumpIndexSegment:
                dumpIndexSegment(dumpIndexTool, config.pathOfInput, filterKeySet);
                break;
            case VerifyIndex:
                IndexProcessingResults results = dumpIndexTool.processIndex(config.pathOfInput, filterKeySet, time.milliseconds(), config.detectDuplicatesAcrossKeys);
                exitCode.set(reportVerificationResults(config.pathOfInput, results, config.failIfCraftedIdsPresent));
                break;
            case VerifyDataNode:
                DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
                if (dataNodeId == null) {
                    logger.error("No data node corresponding to {}:{}", config.hostname, config.port);
                } else {
                    Set<File> replicaDirs = clusterMap.getReplicaIds(dataNodeId).stream().map(replicaId -> new File(replicaId.getMountPath())).collect(Collectors.toSet());
                    Map<File, IndexProcessingResults> resultsByReplica = dumpIndexTool.processIndex(replicaDirs, filterKeySet, config.parallelism, config.detectDuplicatesAcrossKeys);
                    replicaDirs.removeAll(resultsByReplica.keySet());
                    if (replicaDirs.size() != 0) {
                        logger.error("Results obtained missing {}", replicaDirs);
                        exitCode.set(5);
                    } else {
                        resultsByReplica.forEach((replicaDir, result) -> exitCode.set(Math.max(exitCode.get(), reportVerificationResults(replicaDir, result, config.failIfCraftedIdsPresent))));
                    }
                }
                break;
            default:
                throw new IllegalArgumentException("Unrecognized operation: " + config.typeOfOperation);
        }
    }
    System.exit(exitCode.get());
}
Also used : Arrays(java.util.Arrays) Default(com.github.ambry.config.Default) DataNodeId(com.github.ambry.clustermap.DataNodeId) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) Future(java.util.concurrent.Future) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) SystemTime(com.github.ambry.utils.SystemTime) Time(com.github.ambry.utils.Time) EnumSet(java.util.EnumSet) ExecutorService(java.util.concurrent.ExecutorService) StoreConfig(com.github.ambry.config.StoreConfig) Logger(org.slf4j.Logger) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Config(com.github.ambry.config.Config) Set(java.util.Set) ClusterMap(com.github.ambry.clustermap.ClusterMap) ServerConfig(com.github.ambry.config.ServerConfig) Utils(com.github.ambry.utils.Utils) IOException(java.io.IOException) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) Collectors(java.util.stream.Collectors) File(java.io.File) Executors(java.util.concurrent.Executors) AtomicLong(java.util.concurrent.atomic.AtomicLong) ToolUtils(com.github.ambry.tools.util.ToolUtils) List(java.util.List) Throttler(com.github.ambry.utils.Throttler) TreeMap(java.util.TreeMap) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Timer(com.codahale.metrics.Timer) Collections(java.util.Collections) BlobId(com.github.ambry.commons.BlobId) SortedMap(java.util.SortedMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) SystemTime(com.github.ambry.utils.SystemTime) Time(com.github.ambry.utils.Time) ServerConfig(com.github.ambry.config.ServerConfig) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) Throttler(com.github.ambry.utils.Throttler) HashSet(java.util.HashSet) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StoreConfig(com.github.ambry.config.StoreConfig) BlobId(com.github.ambry.commons.BlobId) DataNodeId(com.github.ambry.clustermap.DataNodeId) File(java.io.File)

Example 60 with StoreConfig

use of com.github.ambry.config.StoreConfig in project ambry by linkedin.

the class IndexReadPerformance method main.

public static void main(String[] args) {
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<String> logToReadOpt = parser.accepts("logToRead", "The log that needs to be replayed for traffic").withRequiredArg().describedAs("log_to_read").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> numberOfReadersOpt = parser.accepts("numberOfReaders", "The number of readers that read to a random index concurrently").withRequiredArg().describedAs("The number of readers").ofType(Integer.class).defaultsTo(4);
        ArgumentAcceptingOptionSpec<Integer> readsPerSecondOpt = parser.accepts("readsPerSecond", "The rate at which reads need to be performed").withRequiredArg().describedAs("The number of reads per second").ofType(Integer.class).defaultsTo(1000);
        ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<>();
        listOpt.add(logToReadOpt);
        listOpt.add(hardwareLayoutOpt);
        listOpt.add(partitionLayoutOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        String logToRead = options.valueOf(logToReadOpt);
        int numberOfReaders = options.valueOf(numberOfReadersOpt);
        int readsPerSecond = options.valueOf(readsPerSecondOpt);
        boolean enableVerboseLogging = options.has(verboseLoggingOpt);
        if (enableVerboseLogging) {
            System.out.println("Enabled verbose logging");
        }
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(new Properties()));
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        StoreKeyFactory factory = new BlobIdFactory(map);
        // Read the log and get the index directories and create the indexes
        final BufferedReader br = new BufferedReader(new FileReader(logToRead));
        final HashMap<String, IndexPayload> hashes = new HashMap<String, IndexPayload>();
        String line;
        MetricRegistry metricRegistry = new MetricRegistry();
        StoreMetrics metrics = new StoreMetrics(metricRegistry);
        ScheduledExecutorService s = Utils.newScheduler(numberOfReaders, "index", true);
        DiskSpaceAllocator diskSpaceAllocator = new DiskSpaceAllocator(false, null, 0, new StorageManagerMetrics(metricRegistry));
        Properties props = new Properties();
        props.setProperty("store.index.memory.size.bytes", "1048576");
        props.setProperty("store.segment.size.in.bytes", "1000");
        StoreConfig config = new StoreConfig(new VerifiableProperties(props));
        Log log = new Log(System.getProperty("user.dir"), 1000, diskSpaceAllocator, config, metrics, null);
        final AtomicLong totalTimeTaken = new AtomicLong(0);
        final AtomicLong totalReads = new AtomicLong(0);
        final CountDownLatch latch = new CountDownLatch(numberOfReaders);
        final AtomicBoolean shutdown = new AtomicBoolean(false);
        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread() {

            public void run() {
                try {
                    System.out.println("Shutdown invoked");
                    shutdown.set(true);
                    latch.await();
                    System.out.println("Total reads : " + totalReads.get() + "  Total time taken : " + totalTimeTaken.get() + " Nano Seconds  Average time taken per read " + ((double) totalReads.get() / totalTimeTaken.get()) / SystemTime.NsPerSec + " Seconds");
                } catch (Exception e) {
                    System.out.println("Error while shutting down " + e);
                }
            }
        });
        ScheduledExecutorService scheduleReadLog = Utils.newScheduler(1, true);
        while ((line = br.readLine()) != null) {
            if (line.startsWith("logdir")) {
                String[] logdirs = line.split("-");
                BlobIndexMetrics metricIndex = new BlobIndexMetrics(logdirs[1], s, log, enableVerboseLogging, totalReads, totalTimeTaken, totalReads, config, null, factory);
                hashes.put(logdirs[1], new IndexPayload(metricIndex, new HashSet<String>()));
            } else {
                break;
            }
        }
        // read next batch of ids after 2 minutes
        scheduleReadLog.scheduleAtFixedRate(new Runnable() {

            @Override
            public void run() {
                populateIds(br, hashes);
            }
        }, 0, 120, TimeUnit.SECONDS);
        Throttler throttler = new Throttler(readsPerSecond, 100, true, SystemTime.getInstance());
        Thread[] threadIndexPerf = new Thread[numberOfReaders];
        for (int i = 0; i < numberOfReaders; i++) {
            threadIndexPerf[i] = new Thread(new IndexReadPerfRun(hashes, throttler, shutdown, latch, map));
            threadIndexPerf[i].start();
        }
        for (int i = 0; i < numberOfReaders; i++) {
            threadIndexPerf[i].join();
        }
        br.close();
    } catch (Exception e) {
        System.out.println("Exiting process with exception " + e);
    }
}
Also used : OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ClusterMap(com.github.ambry.clustermap.ClusterMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) OptionParser(joptsimple.OptionParser) FileReader(java.io.FileReader) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashSet(java.util.HashSet) Throttler(com.github.ambry.utils.Throttler) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) CountDownLatch(java.util.concurrent.CountDownLatch) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) BufferedReader(java.io.BufferedReader) StoreConfig(com.github.ambry.config.StoreConfig) OptionSet(joptsimple.OptionSet)

Aggregations

StoreConfig (com.github.ambry.config.StoreConfig)60 VerifiableProperties (com.github.ambry.config.VerifiableProperties)50 MetricRegistry (com.codahale.metrics.MetricRegistry)34 Test (org.junit.Test)29 File (java.io.File)18 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)17 ArrayList (java.util.ArrayList)15 Properties (java.util.Properties)15 ClusterMap (com.github.ambry.clustermap.ClusterMap)10 BlobIdFactory (com.github.ambry.commons.BlobIdFactory)9 InMemAccountService (com.github.ambry.account.InMemAccountService)8 DataNodeId (com.github.ambry.clustermap.DataNodeId)8 CountDownLatch (java.util.concurrent.CountDownLatch)8 MockTime (com.github.ambry.utils.MockTime)7 HashSet (java.util.HashSet)7 ClusterAgentsFactory (com.github.ambry.clustermap.ClusterAgentsFactory)6 ReplicaId (com.github.ambry.clustermap.ReplicaId)6 ReplicaStatusDelegate (com.github.ambry.clustermap.ReplicaStatusDelegate)6 DiskManagerConfig (com.github.ambry.config.DiskManagerConfig)6 ReplicationConfig (com.github.ambry.config.ReplicationConfig)6