Search in sources :

Example 91 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class PartitionManager method main.

public static void main(String[] args) {
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<String> operationTypeOpt = parser.accepts("operationType", " REQUIRED: The type of operation to perform on the partition. Currently supported" + " operations are 'AddPartition', 'AddReplicas'").withRequiredArg().describedAs("operation_type").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> hardwareLayoutPathOpt = parser.accepts("hardwareLayoutPath", " REQUIRED: The path to the hardware layout map").withRequiredArg().describedAs("hardware_layout_path").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutPathOpt = parser.accepts("partitionLayoutPath", "REQUIRED: The path to the partition layout map. If outputPartitionLayoutPath is not defined," + "this file is updated with the new partitions").withRequiredArg().describedAs("partition_layout_path").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> serverPropsFilePathOpt = parser.accepts("serverPropsFilePath", "REQUIRED: The path to server properties file").withRequiredArg().describedAs("server_props_file_path").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> outputPartitionLayoutPathOpt = parser.accepts("outputPartitionLayoutPath", "The path to the output partition layout map. The file is updated with the new partitions").withOptionalArg().describedAs("output_partition_layout_path").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> numberOfPartitionsOpt = parser.accepts("numberOfPartitionsToAdd", "The number of partitions to add").withOptionalArg().ofType(Integer.class);
        ArgumentAcceptingOptionSpec<Integer> numberOfReplicasPerDatacenterOpt = parser.accepts("numberOfReplicasPerDatacenter", "The number of replicas for the partition per datacenter when adding partitions").withOptionalArg().ofType(Integer.class);
        ArgumentAcceptingOptionSpec<Long> replicaCapacityInBytesOpt = parser.accepts("replicaCapacityInBytes", "The capacity of each replica in bytes for the partitions to add").withOptionalArg().ofType(Long.class);
        ArgumentAcceptingOptionSpec<String> partitionIdsToAddReplicasToOpt = parser.accepts("partitionIdToAddReplicasTo", "The partitionIds to add replicas to. This can either take a " + "comma separated list of partitions to add replicas to or '.' to add replicas to all partitions in " + "the partitionLayout ").withOptionalArg().ofType(String.class);
        ArgumentAcceptingOptionSpec<String> datacenterToAddReplicasToOpt = parser.accepts("datacenterToAddReplicasTo", "The data center to which replicas need to be added to").withOptionalArg().ofType(String.class);
        String attemptNonRackAwareOnFailureFlag = "attemptNonRackAwareOnFailure";
        parser.accepts(attemptNonRackAwareOnFailureFlag, "If a rack-aware partition allocation cannot be found, attempt a non rack-aware one");
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<OptionSpec>();
        listOpt.add(hardwareLayoutPathOpt);
        listOpt.add(operationTypeOpt);
        listOpt.add(serverPropsFilePathOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        String hardwareLayoutPath = options.valueOf(hardwareLayoutPathOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutPathOpt);
        String serverPropsFilePath = options.valueOf(serverPropsFilePathOpt);
        String outputPartitionLayoutPath = options.has(outputPartitionLayoutPathOpt) ? options.valueOf(outputPartitionLayoutPathOpt) : partitionLayoutPath;
        String operationType = options.valueOf(operationTypeOpt);
        boolean attemptNonRackAwareOnFailure = options.has(attemptNonRackAwareOnFailureFlag);
        String fileString = null;
        try {
            fileString = Utils.readStringFromFile(partitionLayoutPath);
        } catch (FileNotFoundException e) {
            System.out.println("Partition layout path not found. Creating new file");
        }
        StaticClusterManager manager = null;
        Properties properties = Utils.loadProps(serverPropsFilePath);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(properties));
        if (fileString == null) {
            manager = (new StaticClusterAgentsFactory(clusterMapConfig, new PartitionLayout(new HardwareLayout(new JSONObject(Utils.readStringFromFile(hardwareLayoutPath)), clusterMapConfig), clusterMapConfig))).getClusterMap();
        } else {
            manager = (new StaticClusterAgentsFactory(clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        }
        if (operationType.compareToIgnoreCase("AddPartition") == 0) {
            listOpt.add(numberOfPartitionsOpt);
            listOpt.add(numberOfReplicasPerDatacenterOpt);
            listOpt.add(replicaCapacityInBytesOpt);
            ToolUtils.ensureOrExit(listOpt, options, parser);
            int numberOfPartitions = options.valueOf(numberOfPartitionsOpt);
            int numberOfReplicas = options.valueOf(numberOfReplicasPerDatacenterOpt);
            long replicaCapacityInBytes = options.valueOf(replicaCapacityInBytesOpt);
            manager.allocatePartitions(numberOfPartitions, clusterMapConfig.clusterMapDefaultPartitionClass, numberOfReplicas, replicaCapacityInBytes, attemptNonRackAwareOnFailure);
        } else if (operationType.compareToIgnoreCase("AddReplicas") == 0) {
            listOpt.add(partitionIdsToAddReplicasToOpt);
            listOpt.add(datacenterToAddReplicasToOpt);
            listOpt.add(partitionLayoutPathOpt);
            ToolUtils.ensureOrExit(listOpt, options, parser);
            String partitionIdsToAddReplicas = options.valueOf(partitionIdsToAddReplicasToOpt);
            String datacenterToAddReplicasTo = options.valueOf(datacenterToAddReplicasToOpt);
            if (partitionIdsToAddReplicas.compareToIgnoreCase(".") == 0) {
                for (PartitionId partitionId : manager.getAllPartitionIds(null)) {
                    manager.addReplicas(partitionId, datacenterToAddReplicasTo, attemptNonRackAwareOnFailure);
                }
            } else {
                String[] partitionIds = partitionIdsToAddReplicas.split(",");
                for (String partitionId : partitionIds) {
                    for (PartitionId partitionInCluster : manager.getAllPartitionIds(null)) {
                        if (partitionInCluster.isEqual(partitionId)) {
                            manager.addReplicas(partitionInCluster, datacenterToAddReplicasTo, attemptNonRackAwareOnFailure);
                        }
                    }
                }
            }
        }
        manager.persist(hardwareLayoutPath, outputPartitionLayoutPath);
    } catch (Exception e) {
        System.out.println("Error while executing partition command " + e);
    }
}
Also used : ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) OptionSpec(joptsimple.OptionSpec) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) OptionParser(joptsimple.OptionParser) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) FileNotFoundException(java.io.FileNotFoundException) JSONObject(org.json.JSONObject) OptionSet(joptsimple.OptionSet)

Example 92 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class CompactionVerifier method main.

/**
 * Main function to trigger the verifier.
 * @param args CLI arguments
 * @throws Exception if the verifier encountered problems.
 */
public static void main(String[] args) throws Exception {
    VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
    CompactionVerifierConfig verifierConfig = new CompactionVerifierConfig(verifiableProperties);
    StoreConfig storeConfig = new StoreConfig(verifiableProperties);
    assert !storeConfig.storeEnableHardDelete : "Hard delete cannot be enabled in the properties";
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    ClusterMap clusterMap = new StaticClusterAgentsFactory(clusterMapConfig, verifierConfig.hardwareLayoutFilePath, verifierConfig.partitionLayoutFilePath).getClusterMap();
    StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
    try (CompactionVerifier compactionVerifier = new CompactionVerifier(verifierConfig, storeConfig, storeKeyFactory)) {
        compactionVerifier.verifyCompaction();
    }
    LOGGER.info("Verification completed successfully");
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) StaticClusterAgentsFactory(com.github.ambry.clustermap.StaticClusterAgentsFactory) VerifiableProperties(com.github.ambry.config.VerifiableProperties) StoreConfig(com.github.ambry.config.StoreConfig) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig)

Example 93 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class DiskReformatter method main.

public static void main(String[] args) throws Exception {
    VerifiableProperties properties = ToolUtils.getVerifiableProperties(args);
    DiskReformatterConfig config = new DiskReformatterConfig(properties);
    StoreConfig storeConfig = new StoreConfig(properties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
    ServerConfig serverConfig = new ServerConfig(properties);
    ClusterAgentsFactory clusterAgentsFactory = Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath);
    try (ClusterMap clusterMap = clusterAgentsFactory.getClusterMap()) {
        StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, clusterMap.getMetricRegistry());
        StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
        DataNodeId dataNodeId = clusterMap.getDataNodeId(config.datanodeHostname, config.datanodePort);
        if (dataNodeId == null) {
            throw new IllegalArgumentException("Did not find node in clustermap with hostname:port - " + config.datanodeHostname + ":" + config.datanodePort);
        }
        DiskReformatter reformatter = new DiskReformatter(dataNodeId, Collections.EMPTY_LIST, config.fetchSizeInBytes, storeConfig, storeKeyFactory, clusterMap, SystemTime.getInstance(), storeKeyConverterFactory.getStoreKeyConverter());
        AtomicInteger exitStatus = new AtomicInteger(0);
        CountDownLatch latch = new CountDownLatch(config.diskMountPaths.length);
        for (int i = 0; i < config.diskMountPaths.length; i++) {
            int finalI = i;
            Runnable runnable = () -> {
                try {
                    reformatter.reformat(config.diskMountPaths[finalI], new File(config.scratchPaths[finalI]));
                    latch.countDown();
                } catch (Exception e) {
                    throw new IllegalStateException(e);
                }
            };
            Thread thread = Utils.newThread(config.diskMountPaths[finalI] + "-reformatter", runnable, true);
            thread.setUncaughtExceptionHandler((t, e) -> {
                exitStatus.set(1);
                logger.error("Reformatting {} failed", config.diskMountPaths[finalI], e);
                latch.countDown();
            });
            thread.start();
        }
        latch.await();
        System.exit(exitStatus.get());
    }
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) VerifiableProperties(com.github.ambry.config.VerifiableProperties) CountDownLatch(java.util.concurrent.CountDownLatch) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ServerConfig(com.github.ambry.config.ServerConfig) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StoreConfig(com.github.ambry.config.StoreConfig) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) DataNodeId(com.github.ambry.clustermap.DataNodeId) File(java.io.File)

Example 94 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class DumpIndexTool method main.

public static void main(String[] args) throws Exception {
    final AtomicInteger exitCode = new AtomicInteger(0);
    VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
    DumpIndexToolConfig config = new DumpIndexToolConfig(verifiableProperties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    try (ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap()) {
        StoreConfig storeConfig = new StoreConfig(verifiableProperties);
        // this tool supports only blob IDs. It can become generic if StoreKeyFactory provides a deserFromString method.
        BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
        StoreToolsMetrics metrics = new StoreToolsMetrics(clusterMap.getMetricRegistry());
        StoreMetrics storeMetrics = new StoreMetrics("DumpIndexTool", clusterMap.getMetricRegistry());
        ServerConfig serverConfig = new ServerConfig(verifiableProperties);
        Time time = SystemTime.getInstance();
        Throttler throttler = new Throttler(config.indexEntriesToProcessPerSec, 1000, true, time);
        StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, verifiableProperties, clusterMap.getMetricRegistry());
        DumpIndexTool dumpIndexTool = new DumpIndexTool(blobIdFactory, storeConfig, time, metrics, storeMetrics, throttler, storeKeyConverterFactory.getStoreKeyConverter());
        Set<StoreKey> filterKeySet = new HashSet<>();
        for (String key : config.filterSet) {
            filterKeySet.add(new BlobId(key, clusterMap));
        }
        switch(config.typeOfOperation) {
            case DumpIndex:
                dumpIndex(dumpIndexTool, config.pathOfInput, filterKeySet);
                break;
            case DumpIndexSegment:
                dumpIndexSegment(dumpIndexTool, config.pathOfInput, filterKeySet);
                break;
            case VerifyIndex:
                IndexProcessingResults results = dumpIndexTool.processIndex(config.pathOfInput, filterKeySet, time.milliseconds(), config.detectDuplicatesAcrossKeys);
                exitCode.set(reportVerificationResults(config.pathOfInput, results, config.failIfCraftedIdsPresent));
                break;
            case VerifyDataNode:
                DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
                if (dataNodeId == null) {
                    logger.error("No data node corresponding to {}:{}", config.hostname, config.port);
                } else {
                    Set<File> replicaDirs = clusterMap.getReplicaIds(dataNodeId).stream().map(replicaId -> new File(replicaId.getMountPath())).collect(Collectors.toSet());
                    Map<File, IndexProcessingResults> resultsByReplica = dumpIndexTool.processIndex(replicaDirs, filterKeySet, config.parallelism, config.detectDuplicatesAcrossKeys);
                    replicaDirs.removeAll(resultsByReplica.keySet());
                    if (replicaDirs.size() != 0) {
                        logger.error("Results obtained missing {}", replicaDirs);
                        exitCode.set(5);
                    } else {
                        resultsByReplica.forEach((replicaDir, result) -> exitCode.set(Math.max(exitCode.get(), reportVerificationResults(replicaDir, result, config.failIfCraftedIdsPresent))));
                    }
                }
                break;
            default:
                throw new IllegalArgumentException("Unrecognized operation: " + config.typeOfOperation);
        }
    }
    System.exit(exitCode.get());
}
Also used : Arrays(java.util.Arrays) Default(com.github.ambry.config.Default) DataNodeId(com.github.ambry.clustermap.DataNodeId) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) Future(java.util.concurrent.Future) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) SystemTime(com.github.ambry.utils.SystemTime) Time(com.github.ambry.utils.Time) EnumSet(java.util.EnumSet) ExecutorService(java.util.concurrent.ExecutorService) StoreConfig(com.github.ambry.config.StoreConfig) Logger(org.slf4j.Logger) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Config(com.github.ambry.config.Config) Set(java.util.Set) ClusterMap(com.github.ambry.clustermap.ClusterMap) ServerConfig(com.github.ambry.config.ServerConfig) Utils(com.github.ambry.utils.Utils) IOException(java.io.IOException) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) Collectors(java.util.stream.Collectors) File(java.io.File) Executors(java.util.concurrent.Executors) AtomicLong(java.util.concurrent.atomic.AtomicLong) ToolUtils(com.github.ambry.tools.util.ToolUtils) List(java.util.List) Throttler(com.github.ambry.utils.Throttler) TreeMap(java.util.TreeMap) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Timer(com.codahale.metrics.Timer) Collections(java.util.Collections) BlobId(com.github.ambry.commons.BlobId) SortedMap(java.util.SortedMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) SystemTime(com.github.ambry.utils.SystemTime) Time(com.github.ambry.utils.Time) ServerConfig(com.github.ambry.config.ServerConfig) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) Throttler(com.github.ambry.utils.Throttler) HashSet(java.util.HashSet) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StoreConfig(com.github.ambry.config.StoreConfig) BlobId(com.github.ambry.commons.BlobId) DataNodeId(com.github.ambry.clustermap.DataNodeId) File(java.io.File)

Example 95 with ClusterMapConfig

use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.

the class SafeServerShutdownTool method main.

public static void main(String[] args) throws Exception {
    VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
    SafeServerShutdownToolConfig config = new SafeServerShutdownToolConfig(verifiableProperties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    try (ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap()) {
        SSLFactory sslFactory = !clusterMapConfig.clusterMapSslEnabledDatacenters.isEmpty() ? SSLFactory.getNewInstance(new SSLConfig(verifiableProperties)) : null;
        try (ServerAdminTool serverAdminTool = new ServerAdminTool(clusterMap, sslFactory, verifiableProperties)) {
            DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
            if (dataNodeId == null) {
                throw new IllegalArgumentException("Could not find a data node corresponding to " + config.hostname + ":" + config.port);
            }
            SafeServerShutdownTool safeServerShutdownTool = new SafeServerShutdownTool(serverAdminTool, SystemTime.getInstance());
            int exitStatus = safeServerShutdownTool.prepareServerForShutdown(dataNodeId, config.logGrowthPauseLagThresholdBytes, config.numReplicasCaughtUpPerPartition, config.timeoutSecs, config.checkRepeatDelaySecs) ? 0 : 1;
            System.exit(exitStatus);
        }
    }
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) SSLConfig(com.github.ambry.config.SSLConfig) SSLFactory(com.github.ambry.commons.SSLFactory) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) DataNodeId(com.github.ambry.clustermap.DataNodeId) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig)

Aggregations

ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)100 VerifiableProperties (com.github.ambry.config.VerifiableProperties)81 Test (org.junit.Test)56 Properties (java.util.Properties)52 MetricRegistry (com.codahale.metrics.MetricRegistry)47 ArrayList (java.util.ArrayList)31 IOException (java.io.IOException)26 HashSet (java.util.HashSet)25 JSONObject (org.json.JSONObject)25 File (java.io.File)24 ClusterMap (com.github.ambry.clustermap.ClusterMap)23 HashMap (java.util.HashMap)21 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)19 ClusterAgentsFactory (com.github.ambry.clustermap.ClusterAgentsFactory)18 DataNodeId (com.github.ambry.clustermap.DataNodeId)18 StoreConfig (com.github.ambry.config.StoreConfig)18 ReplicaId (com.github.ambry.clustermap.ReplicaId)16 List (java.util.List)16 Map (java.util.Map)16 CountDownLatch (java.util.concurrent.CountDownLatch)16