Search in sources :

Example 6 with ArgumentAcceptingOptionSpec

use of joptsimple.ArgumentAcceptingOptionSpec in project ambry by linkedin.

the class HardDeleteVerifier method main.

public static void main(String[] args) {
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> dataDirOpt = parser.accepts("dataDir", "The data directory of the partition/replica that needs to be verified for hard deletes.").withRequiredArg().describedAs("data_dir").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> oldDataDirOpt = parser.accepts("oldDataDir", "[Optional] The data directory of the partition/replica before hard deletes are run for comparison").withOptionalArg().describedAs("old_data_dir").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> outFileOpt = parser.accepts("outFile", "Output file to redirect to ").withRequiredArg().describedAs("outFile").ofType(String.class);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> requiredOpts = new ArrayList<>();
        requiredOpts.add(hardwareLayoutOpt);
        requiredOpts.add(partitionLayoutOpt);
        requiredOpts.add(dataDirOpt);
        requiredOpts.add(outFileOpt);
        ToolUtils.ensureOrExit(requiredOpts, options, parser);
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(new Properties()));
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        String dataDir = options.valueOf(dataDirOpt);
        String oldDataDir = options.has(oldDataDirOpt) ? options.valueOf(oldDataDirOpt) : null;
        String outFile = options.valueOf(outFileOpt);
        HardDeleteVerifier hardDeleteVerifier = new HardDeleteVerifier(map, dataDir, oldDataDir, outFile);
        hardDeleteVerifier.verifyHardDeletes();
    } catch (Exception e) {
        e.printStackTrace();
    }
}
Also used : OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ClusterMap(com.github.ambry.clustermap.ClusterMap) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ArrayList(java.util.ArrayList) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) OptionParser(joptsimple.OptionParser) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) OptionSet(joptsimple.OptionSet)

Example 7 with ArgumentAcceptingOptionSpec

use of joptsimple.ArgumentAcceptingOptionSpec in project ambry by linkedin.

the class HelixBootstrapUpgradeTool method main.

/**
 * @param args takes in three mandatory arguments: the hardware layout path, the partition layout path and the zk
 *             layout path.
 *             The Zk layout has to be of the following form:
 *             {
 *               "zkInfo" : [
 *                 {
 *                   "datacenter":"dc1",
 *                   "id": "1",
 *                   "zkConnectStr":"abc.example.com:2199",
 *                 },
 *                 {
 *                   "datacenter":"dc2",
 *                    "id": "2",
 *                   "zkConnectStr":"def.example.com:2300",
 *                 }
 *               ]
 *             }
 *
 *             Also takes in an optional argument that specifies the local datacenter name, so that can be used as
 *             the "reference" datacenter. If none provided, the tool simply chooses one of the datacenters in the
 *             layout as the reference datacenter.
 */
public static void main(String[] args) throws Exception {
    OptionParser parser = new OptionParser();
    OptionSpec<Void> dropClusterOpt = parser.accepts("dropCluster", "Drops the given Ambry cluster from Helix. Use this option with care. If present, must be accompanied with and " + "only with the clusterName argument");
    OptionSpec<Void> forceRemove = parser.accepts("forceRemove", "Specifies that any instances or partitions absent in the json files be removed from Helix. Use this with care");
    OptionSpec<Void> addStateModel = parser.accepts("addStateModel", "Attempt to add new state model to Helix StateModelDefs if it doesn't exist. This option will not touch instanceConfig");
    ArgumentAcceptingOptionSpec<String> hardwareLayoutPathOpt = parser.accepts("hardwareLayoutPath", "The path to the hardware layout json file").requiredUnless(dropClusterOpt).withRequiredArg().describedAs("hardware_layout_path").ofType(String.class);
    ArgumentAcceptingOptionSpec<String> partitionLayoutPathOpt = parser.accepts("partitionLayoutPath", "The path to the partition layout json file").requiredUnless(dropClusterOpt).withRequiredArg().describedAs("partition_layout_path").ofType(String.class);
    ArgumentAcceptingOptionSpec<String> zkLayoutPathOpt = parser.accepts("zkLayoutPath", "The path to the json file containing zookeeper connect info. This should be of the following form: \n{\n" + "  \"zkInfo\" : [\n" + "     {\n" + "       \"datacenter\":\"dc1\",\n" + "       \"zkConnectStr\":\"abc.example.com:2199\",\n" + "     },\n" + "     {\n" + "       \"datacenter\":\"dc2\",\n" + "       \"zkConnectStr\":\"def.example.com:2300\",\n" + "     },\n" + "     {\n" + "       \"datacenter\":\"dc3\",\n" + "       \"zkConnectStr\":\"ghi.example.com:2400\",\n" + "     }\n" + "  ]\n" + "}").requiredUnless(dropClusterOpt).withRequiredArg().describedAs("zk_connect_info_path").ofType(String.class);
    ArgumentAcceptingOptionSpec<String> clusterNamePrefixOpt = parser.accepts("clusterNamePrefix", "The prefix for the cluster in Helix to bootstrap or upgrade").withRequiredArg().describedAs("cluster_name_prefix").ofType(String.class);
    ArgumentAcceptingOptionSpec<String> clusterNameOpt = parser.accepts("clusterName", "The cluster in Helix to drop. This should accompany the dropCluster option").requiredIf(dropClusterOpt).withRequiredArg().describedAs("cluster_name").ofType(String.class);
    ArgumentAcceptingOptionSpec<String> dcsNameOpt = parser.accepts("dcs", "The comma-separated datacenters (colos) to update. Use '--dcs all' if updates to every datacenter is intended").withRequiredArg().describedAs("datacenters").required().ofType(String.class);
    ArgumentAcceptingOptionSpec<String> maxPartitionsInOneResourceOpt = parser.accepts("maxPartitionsInOneResource", "(Optional argument) The maximum number of partitions that should be grouped under a Helix resource").withRequiredArg().describedAs("max_partitions_in_one_resource").ofType(String.class);
    ArgumentAcceptingOptionSpec<String> stateModelDefinitionOpt = parser.accepts("stateModelDef", "(Optional argument) The state model definition that should be created in cluster if doesn't exist").withRequiredArg().describedAs("state_model_definition").ofType(String.class);
    ArgumentAcceptingOptionSpec<String> adminConfigsOpt = parser.accepts("adminConfigs", "(Optional argument) Upload cluster admin configs to HelixPropertyStore based on json files. Currently, " + "the tool supports (1) partition override config, (2) replica addition config. The config names are " + "comma-separated and case-sensitive, for example: '--adminConfigs PartitionOverride,ReplicaAddition'. " + "This option will not modify instanceConfig and IdealState").withRequiredArg().describedAs("admin_configs").ofType(String.class);
    ArgumentAcceptingOptionSpec<String> adminOperationOpt = parser.accepts("adminOperation", "(Optional argument) Perform admin operations to manage resources in cluster. For example: " + " '--adminOperation UpdateIdealState' # Update IdealState based on static clustermap. This won't change InstanceConfig" + " '--adminOperation DisablePartition' # Disable partition on certain node. Usually used as first step to decommission replica(s)" + " '--adminOperation EnablePartition' # Enable partition on certain node (if partition is previously disabled)" + " '--adminOperation ResetPartition' # Reset partition on certain node (if partition is previously in error state)" + " '--adminOperation ListSealedPartition' # List all sealed partitions in Helix cluster (aggregated across all datacenters)" + " '--adminOperation ValidateCluster' # Validates the information in static clustermap is consistent with the information in Helix" + " '--adminOperation MigrateToPropertyStore' # Migrate custom instance config properties to DataNodeConfigs in the property store" + " '--adminOperation BootstrapCluster' # (Default operation if not specified) Bootstrap cluster based on static clustermap").withRequiredArg().describedAs("admin_operation").ofType(String.class);
    ArgumentAcceptingOptionSpec<String> hostnameOpt = parser.accepts("hostname", "(Optional argument and is always accompanied with partition control operations, i.e EnablePartition, " + "DisablePartition) The host on which admin operation should be performed").withRequiredArg().describedAs("hostname").ofType(String.class);
    ArgumentAcceptingOptionSpec<String> portOpt = parser.accepts("port", "(Optional argument and is always accompanied with partition control operations, i.e EnablePartition, " + "DisablePartition) The port number associated with the host on which admin operation should be performed." + "If not specified, the tool attempts to find host from static clustermap by searching hostname.").withRequiredArg().describedAs("port").ofType(String.class);
    ArgumentAcceptingOptionSpec<String> partitionIdOpt = parser.accepts("partition", "(Optional argument and is always accompanied with partition control operations, i.e EnablePartition, " + "DisablePartition) The partition on which admin operation should be performed").withRequiredArg().describedAs("partition").ofType(String.class);
    OptionSpecBuilder dryRun = parser.accepts("dryRun", "(Optional argument) Dry run, do not modify the cluster map in Helix.");
    OptionSpecBuilder enableValidatingClusterManager = parser.accepts("enableVCM", "(Optional argument) whether to enable validating cluster manager(VCM) in Helix bootstrap tool.");
    ArgumentAcceptingOptionSpec<String> adminConfigFilePathOpt = parser.accepts("adminConfigFilePath", "The path to a static admin config file. For example, it can be a file that holds a list of partitions" + "(comma separated) that should be overridden to ReadOnly").withRequiredArg().describedAs("admin_config_file_path").ofType(String.class);
    ArgumentAcceptingOptionSpec<String> dataNodeConfigSourceOpt = parser.accepts("dataNodeConfigSource", "(Optional argument) The type of data node config source (default is PROPERTY_STORE). " + "See DataNodeConfigSourceType enum for more details.").withRequiredArg().describedAs("data_node_config_source").ofType(String.class);
    OptionSpecBuilder overrideReplicaStatus = parser.accepts("overrideReplicaStatus", "(Optional argument) whether to override replica status lists (i.e. sealed/stopped/disabled lists) in instance(datanode) config");
    OptionSet options = parser.parse(args);
    String hardwareLayoutPath = options.valueOf(hardwareLayoutPathOpt);
    String partitionLayoutPath = options.valueOf(partitionLayoutPathOpt);
    String adminConfigFilePath = options.valueOf(adminConfigFilePathOpt);
    String zkLayoutPath = options.valueOf(zkLayoutPathOpt);
    String clusterNamePrefix = options.valueOf(clusterNamePrefixOpt);
    String clusterName = options.valueOf(clusterNameOpt);
    String dcs = options.valueOf(dcsNameOpt);
    String adminConfigStr = options.valueOf(adminConfigsOpt);
    String adminOpStr = options.valueOf(adminOperationOpt);
    String hostname = options.valueOf(hostnameOpt);
    String partitionName = options.valueOf(partitionIdOpt);
    String portStr = options.valueOf(portOpt);
    int maxPartitionsInOneResource = options.valueOf(maxPartitionsInOneResourceOpt) == null ? DEFAULT_MAX_PARTITIONS_PER_RESOURCE : Integer.parseInt(options.valueOf(maxPartitionsInOneResourceOpt));
    String stateModelDef = options.valueOf(stateModelDefinitionOpt) == null ? ClusterMapConfig.DEFAULT_STATE_MODEL_DEF : options.valueOf(stateModelDefinitionOpt);
    DataNodeConfigSourceType dataNodeConfigSourceType = options.valueOf(dataNodeConfigSourceOpt) == null ? DataNodeConfigSourceType.PROPERTY_STORE : DataNodeConfigSourceType.valueOf(options.valueOf(dataNodeConfigSourceOpt));
    ArrayList<OptionSpec> listOpt = new ArrayList<>();
    listOpt.add(hardwareLayoutPathOpt);
    listOpt.add(partitionLayoutPathOpt);
    listOpt.add(zkLayoutPathOpt);
    listOpt.add(clusterNamePrefixOpt);
    listOpt.add(dcsNameOpt);
    if (options.has(dropClusterOpt)) {
        List<OptionSpec<?>> expectedOpts = Arrays.asList(dropClusterOpt, clusterNameOpt, zkLayoutPathOpt, dcsNameOpt);
        ToolUtils.ensureExactOrExit(expectedOpts, options.specs(), parser);
        HelixBootstrapUpgradeUtil.dropCluster(zkLayoutPath, clusterName, dcs, new HelixAdminFactory());
    } else if (adminConfigStr != null) {
        listOpt.add(adminConfigsOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        String[] adminTypes = adminConfigStr.replaceAll("\\p{Space}", "").split(",");
        HelixBootstrapUpgradeUtil.uploadOrDeleteAdminConfigs(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterNamePrefix, dcs, options.has(forceRemove), adminTypes, adminConfigFilePath);
    } else if (options.has(addStateModel)) {
        listOpt.add(stateModelDefinitionOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        HelixBootstrapUpgradeUtil.addStateModelDef(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterNamePrefix, dcs, stateModelDef);
    } else {
        // The default operation is BootstrapCluster (if not specified)
        HelixAdminOperation operation = adminOpStr == null ? BootstrapCluster : HelixAdminOperation.valueOf(adminOpStr);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        Integer portNum = portStr == null ? null : Integer.parseInt(portStr);
        switch(operation) {
            case ValidateCluster:
                HelixBootstrapUpgradeUtil.validate(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterNamePrefix, dcs, stateModelDef, dataNodeConfigSourceType);
                break;
            case ListSealedPartition:
                HelixBootstrapUpgradeUtil.listSealedPartition(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterNamePrefix, dcs, dataNodeConfigSourceType);
                break;
            case MigrateToPropertyStore:
                HelixBootstrapUpgradeUtil.migrateToPropertyStore(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterNamePrefix, dcs);
                break;
            case ResetPartition:
            case EnablePartition:
                HelixBootstrapUpgradeUtil.controlPartitionState(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterNamePrefix, dcs, hostname, portNum, operation, partitionName);
                break;
            case DisablePartition:
                // parsing partitions from clustermap.
                if (hostname != null && partitionName != null) {
                    HelixBootstrapUpgradeUtil.controlPartitionState(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterNamePrefix, dcs, hostname, portNum, operation, partitionName);
                    break;
                }
            // automatically.
            default:
                HelixBootstrapUpgradeUtil.bootstrapOrUpgrade(hardwareLayoutPath, partitionLayoutPath, zkLayoutPath, clusterNamePrefix, dcs, maxPartitionsInOneResource, options.has(dryRun), options.has(forceRemove), null, options.has(enableValidatingClusterManager), stateModelDef, operation, dataNodeConfigSourceType, options.has(overrideReplicaStatus));
        }
    }
    System.out.println("======== HelixBootstrapUpgradeTool completed successfully! ========");
    System.out.println("( If program doesn't exit, please use Ctrl-c to terminate. )");
    System.exit(0);
}
Also used : ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) OptionSpec(joptsimple.OptionSpec) ArrayList(java.util.ArrayList) HelixAdminOperation(com.github.ambry.clustermap.HelixBootstrapUpgradeUtil.HelixAdminOperation) OptionParser(joptsimple.OptionParser) OptionSpecBuilder(joptsimple.OptionSpecBuilder) OptionSet(joptsimple.OptionSet)

Example 8 with ArgumentAcceptingOptionSpec

use of joptsimple.ArgumentAcceptingOptionSpec in project ambry by linkedin.

the class HelixVcrPopulateTool method main.

public static void main(String[] args) throws IOException {
    OptionParser parser = new OptionParser();
    OptionSpec createClusterOpt = parser.accepts("createCluster", "Create cluster in dest zk(no resource creation). --createCluster --dest destZkEndpoint/destClusterName --config configFilePath");
    OptionSpec updateClusterOpt = parser.accepts("updateCluster", "Update resources in dest by copying from src to dest. --updateCluster [--src srcZkEndpoint/srcClusterName] --dest destZkEndpoint/destClusterName --config configFilePath");
    OptionSpec dryRunOpt = parser.accepts("dryRun", "Do dry run.");
    OptionSpec controlResourceOpt = parser.accepts("controlResource", "Enable/Disable a resource. --controlResource --dest destZkEndpoint/destClusterName --resource resource --enable true");
    ArgumentAcceptingOptionSpec<String> resourceOpt = parser.accepts("resource").withRequiredArg().describedAs("resource name").ofType(String.class);
    OptionSpec maintenanceOpt = parser.accepts("maintainCluster", "Enter/Exit helix maintenance mode. --maintainCluster --dest destZkEndpoint/destClusterName --enable true");
    // Some shared options.
    // VCR cluster argument is always required
    ArgumentAcceptingOptionSpec<String> destOpt = parser.accepts("dest").withRequiredArg().required().describedAs("vcr zk and cluster name").ofType(String.class);
    ArgumentAcceptingOptionSpec<String> srcOpt = parser.accepts("src").withRequiredArg().describedAs("src zk and cluster name").ofType(String.class);
    ArgumentAcceptingOptionSpec<Boolean> enableOpt = parser.accepts("enable").withRequiredArg().describedAs("enable/disable").ofType(Boolean.class);
    ArgumentAcceptingOptionSpec<String> configFileOpt = parser.accepts("config").withRequiredArg().describedAs("config file path").ofType(String.class);
    OptionSet options = parser.parse(args);
    String[] destZkAndCluster = options.valueOf(destOpt).split(HelixVcrUtil.SEPARATOR);
    if (destZkAndCluster.length != 2) {
        errorAndExit("dest argument must have form 'zkString/clusterName'");
    }
    String destZkString = destZkAndCluster[0];
    String destClusterName = destZkAndCluster[1];
    if (!destClusterName.contains("VCR")) {
        errorAndExit("dest should be a VCR cluster.(VCR string should be included)");
    }
    VcrHelixConfig config = null;
    if (options.has(createClusterOpt) || options.has(updateClusterOpt)) {
        try {
            config = new ObjectMapper().readValue(Utils.readStringFromFile(options.valueOf(configFileOpt)), VcrHelixConfig.class);
        } catch (IOException ioEx) {
            errorAndExit("Couldn't read the config file: " + options.valueOf(configFileOpt));
        }
    }
    if (options.has(createClusterOpt)) {
        System.out.println("Creating cluster: " + destClusterName);
        createCluster(destZkString, destClusterName, config);
    }
    if (options.has(updateClusterOpt)) {
        boolean dryRun = options.has(dryRunOpt);
        if (options.has(srcOpt)) {
            String[] srcZkAndCluster = options.valueOf(srcOpt).split(SEPARATOR);
            if (srcZkAndCluster.length != 2) {
                errorAndExit("src argument must have form 'zkString/clusterName'");
            }
            String srcZkString = srcZkAndCluster[0];
            String srcClusterName = srcZkAndCluster[1];
            System.out.println("Updating cluster: " + destClusterName + " by checking " + srcClusterName);
            updateResourceAndPartition(srcZkString, srcClusterName, destZkString, destClusterName, config, dryRun);
        } else {
            System.out.println("Updating cluster config for: " + destClusterName);
            // Update the cluster config and resources to the latest settings.
            setClusterConfig(getHelixZkClient(destZkString), destClusterName, config, dryRun);
            updateResourceIdealState(destZkString, destClusterName, config, dryRun);
            if (!dryRun) {
                System.out.println("Cluster " + destClusterName + " is updated successfully!");
            }
        }
    }
    if (options.has(controlResourceOpt)) {
        String resourceName = options.valueOf(resourceOpt);
        Boolean enable = options.valueOf(enableOpt);
        controlResource(destZkString, destClusterName, resourceName, enable);
        System.out.println("Resource " + resourceName + " status: " + enable);
    }
    if (options.has(maintenanceOpt)) {
        boolean maintenanceMode = options.valueOf(enableOpt);
        maintainCluster(destZkString, destClusterName, maintenanceMode);
        System.out.println("Cluster " + destClusterName + " maintenance mode: " + maintenanceMode);
    }
    System.out.println("Done.");
}
Also used : ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) OptionSpec(joptsimple.OptionSpec) IOException(java.io.IOException) OptionSet(joptsimple.OptionSet) OptionParser(joptsimple.OptionParser) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper)

Example 9 with ArgumentAcceptingOptionSpec

use of joptsimple.ArgumentAcceptingOptionSpec in project ambry by linkedin.

the class DirectoryUploader method main.

public static void main(String[] args) {
    FileWriter writer = null;
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<String> rootDirectoryOpt = parser.accepts("rootDirectory", "The root folder from which all the files will be migrated").withRequiredArg().describedAs("root_directory").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
        ArgumentAcceptingOptionSpec<String> partitionOpt = parser.accepts("partition", "The partition to which the put calls to be made against").withRequiredArg().describedAs("partition").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> datacenterOpt = parser.accepts("datacenter", "The datacenter to which the put calls to be made against").withRequiredArg().describedAs("datacenter").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> outFileOpt = parser.accepts("outFile", "The file to which output should be redirected").withRequiredArg().describedAs("outFile").ofType(String.class);
        // Optional arguments for defining a specific node to write to.
        ArgumentAcceptingOptionSpec<String> nodeHostnameOpt = parser.accepts("nodeHostname", "The hostname of the node to put to (if specifying single node)").withOptionalArg().describedAs("nodeHostname").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> nodePortOpt = parser.accepts("nodePort", "The port of the node to put to (if specifying single node)").withOptionalArg().describedAs("nodePort").ofType(Integer.class);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<>();
        listOpt.add(rootDirectoryOpt);
        listOpt.add(hardwareLayoutOpt);
        listOpt.add(partitionLayoutOpt);
        listOpt.add(partitionOpt);
        listOpt.add(datacenterOpt);
        listOpt.add(outFileOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        System.out.println("Starting to parse arguments");
        boolean enableVerboseLogging = options.has(verboseLoggingOpt);
        if (enableVerboseLogging) {
            System.out.println("Enabled verbose logging");
        }
        String rootDirectory = options.valueOf(rootDirectoryOpt);
        if (enableVerboseLogging) {
            System.out.println("Parsed rootdir " + rootDirectory);
        }
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        if (enableVerboseLogging) {
            System.out.println("Parsed Hardware layout " + hardwareLayoutPath);
        }
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        if (enableVerboseLogging) {
            System.out.println("Parsed partition layout " + partitionLayoutPath);
        }
        String partition = options.valueOf(partitionOpt);
        if (enableVerboseLogging) {
            System.out.println("Parsed partition " + partition);
        }
        partition = "Partition[" + partition + "]";
        String datacenter = options.valueOf(datacenterOpt);
        if (enableVerboseLogging) {
            System.out.println("Parsed datacenter " + datacenter);
        }
        String nodeHostname = options.valueOf(nodeHostnameOpt);
        if (enableVerboseLogging && nodeHostname != null) {
            System.out.println("Parsed node hostname " + nodeHostname);
        }
        Integer nodePort = options.valueOf(nodePortOpt);
        if (enableVerboseLogging && nodePort != null) {
            System.out.println("Parsed node port " + nodePort);
        }
        String outFile = options.valueOf(outFileOpt);
        if (enableVerboseLogging) {
            System.out.println("Parsed outFile " + outFile);
            System.out.println("Done parsing all args");
        }
        VerifiableProperties vprops = new VerifiableProperties((new Properties()));
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(vprops);
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        File logFile = new File(outFile);
        writer = new FileWriter(logFile);
        DirectoryUploader directoryUploader = new DirectoryUploader();
        directoryUploader.setPartitionId(map, partition, enableVerboseLogging);
        if (nodeHostname != null && nodePort != null) {
            directoryUploader.setDataNodeId(map, nodeHostname, nodePort, enableVerboseLogging);
        }
        directoryUploader.walkDirectoryToCreateBlobs(rootDirectory, writer, datacenter, map.getLocalDatacenterId(), enableVerboseLogging);
    } catch (Exception e) {
        System.err.println("Error on exit " + e);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (Exception e) {
                System.out.println("Error when closing the writer");
            }
        }
    }
}
Also used : OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ClusterMap(com.github.ambry.clustermap.ClusterMap) VerifiableProperties(com.github.ambry.config.VerifiableProperties) FileWriter(java.io.FileWriter) ArrayList(java.util.ArrayList) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) OptionParser(joptsimple.OptionParser) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) OptionSet(joptsimple.OptionSet) File(java.io.File)

Example 10 with ArgumentAcceptingOptionSpec

use of joptsimple.ArgumentAcceptingOptionSpec in project ambry by linkedin.

the class ServerWritePerformance method main.

public static void main(String[] args) {
    FileWriter blobIdsWriter = null;
    FileWriter performanceWriter = null;
    ConnectionPool connectionPool = null;
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> numberOfWritersOpt = parser.accepts("numberOfWriters", "The number of writers that issue put request").withRequiredArg().describedAs("The number of writers").ofType(Integer.class).defaultsTo(4);
        ArgumentAcceptingOptionSpec<Integer> minBlobSizeOpt = parser.accepts("minBlobSizeInBytes", "The minimum size of the blob that can be put").withRequiredArg().describedAs("The minimum blob size in bytes").ofType(Integer.class).defaultsTo(51200);
        ArgumentAcceptingOptionSpec<Integer> maxBlobSizeOpt = parser.accepts("maxBlobSizeInBytes", "The maximum size of the blob that can be put").withRequiredArg().describedAs("The maximum blob size in bytes").ofType(Integer.class).defaultsTo(4194304);
        ArgumentAcceptingOptionSpec<Integer> writesPerSecondOpt = parser.accepts("writesPerSecond", "The rate at which writes need to be performed").withRequiredArg().describedAs("The number of writes per second").ofType(Integer.class).defaultsTo(1000);
        ArgumentAcceptingOptionSpec<Long> measurementIntervalOpt = parser.accepts("measurementInterval", "The interval in second to report performance result").withOptionalArg().describedAs("The CPU time spent for putting blobs, not wall time").ofType(Long.class).defaultsTo(300L);
        ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
        ArgumentAcceptingOptionSpec<String> sslEnabledDatacentersOpt = parser.accepts("sslEnabledDatacenters", "Datacenters to which ssl should be enabled").withOptionalArg().describedAs("Comma separated list").ofType(String.class).defaultsTo("");
        ArgumentAcceptingOptionSpec<String> sslKeystorePathOpt = parser.accepts("sslKeystorePath", "SSL key store path").withOptionalArg().describedAs("The file path of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeystoreTypeOpt = parser.accepts("sslKeystoreType", "SSL key store type").withOptionalArg().describedAs("The type of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslTruststorePathOpt = parser.accepts("sslTruststorePath", "SSL trust store path").withOptionalArg().describedAs("The file path of SSL trust store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeystorePasswordOpt = parser.accepts("sslKeystorePassword", "SSL key store password").withOptionalArg().describedAs("The password of SSL key store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslKeyPasswordOpt = parser.accepts("sslKeyPassword", "SSL key password").withOptionalArg().describedAs("The password of SSL private key").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslTruststorePasswordOpt = parser.accepts("sslTruststorePassword", "SSL trust store password").withOptionalArg().describedAs("The password of SSL trust store").defaultsTo("").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> sslCipherSuitesOpt = parser.accepts("sslCipherSuites", "SSL enabled cipher suites").withOptionalArg().describedAs("Comma separated list").defaultsTo("TLS_RSA_WITH_AES_128_CBC_SHA").ofType(String.class);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<>();
        listOpt.add(hardwareLayoutOpt);
        listOpt.add(partitionLayoutOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        long measurementIntervalNs = options.valueOf(measurementIntervalOpt) * SystemTime.NsPerSec;
        ToolUtils.validateSSLOptions(options, parser, sslEnabledDatacentersOpt, sslKeystorePathOpt, sslKeystoreTypeOpt, sslTruststorePathOpt, sslKeystorePasswordOpt, sslKeyPasswordOpt, sslTruststorePasswordOpt);
        String sslEnabledDatacenters = options.valueOf(sslEnabledDatacentersOpt);
        Properties sslProperties;
        if (sslEnabledDatacenters.length() != 0) {
            sslProperties = ToolUtils.createSSLProperties(sslEnabledDatacenters, options.valueOf(sslKeystorePathOpt), options.valueOf(sslKeystoreTypeOpt), options.valueOf(sslKeystorePasswordOpt), options.valueOf(sslKeyPasswordOpt), options.valueOf(sslTruststorePathOpt), options.valueOf(sslTruststorePasswordOpt), options.valueOf(sslCipherSuitesOpt));
        } else {
            sslProperties = new Properties();
        }
        ToolUtils.addClusterMapProperties(sslProperties);
        int numberOfWriters = options.valueOf(numberOfWritersOpt);
        int writesPerSecond = options.valueOf(writesPerSecondOpt);
        boolean enableVerboseLogging = options.has(verboseLoggingOpt) ? true : false;
        int minBlobSize = options.valueOf(minBlobSizeOpt);
        int maxBlobSize = options.valueOf(maxBlobSizeOpt);
        if (enableVerboseLogging) {
            System.out.println("Enabled verbose logging");
        }
        final AtomicLong totalTimeTaken = new AtomicLong(0);
        final AtomicLong totalWrites = new AtomicLong(0);
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(sslProperties));
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        File logFile = new File(System.getProperty("user.dir"), "writeperflog");
        blobIdsWriter = new FileWriter(logFile);
        File performanceFile = new File(System.getProperty("user.dir"), "writeperfresult");
        performanceWriter = new FileWriter(performanceFile);
        final CountDownLatch latch = new CountDownLatch(numberOfWriters);
        final AtomicBoolean shutdown = new AtomicBoolean(false);
        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread() {

            public void run() {
                try {
                    System.out.println("Shutdown invoked");
                    shutdown.set(true);
                    latch.await();
                    System.out.println("Total writes : " + totalWrites.get() + "  Total time taken : " + totalTimeTaken.get() + " Nano Seconds  Average time taken per write " + ((double) totalTimeTaken.get()) / SystemTime.NsPerSec / totalWrites.get() + " Seconds");
                } catch (Exception e) {
                    System.out.println("Error while shutting down " + e);
                }
            }
        });
        Throttler throttler = new Throttler(writesPerSecond, 100, true, SystemTime.getInstance());
        Thread[] threadIndexPerf = new Thread[numberOfWriters];
        ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(new VerifiableProperties(new Properties()));
        VerifiableProperties vProps = new VerifiableProperties(sslProperties);
        SSLConfig sslConfig = new SSLConfig(vProps);
        clusterMapConfig = new ClusterMapConfig(vProps);
        connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, new MetricRegistry());
        connectionPool.start();
        for (int i = 0; i < numberOfWriters; i++) {
            threadIndexPerf[i] = new Thread(new ServerWritePerfRun(i, throttler, shutdown, latch, minBlobSize, maxBlobSize, blobIdsWriter, performanceWriter, totalTimeTaken, totalWrites, measurementIntervalNs, enableVerboseLogging, map, connectionPool));
            threadIndexPerf[i].start();
        }
        for (int i = 0; i < numberOfWriters; i++) {
            threadIndexPerf[i].join();
        }
    } catch (Exception e) {
        System.err.println("Error on exit " + e);
    } finally {
        if (blobIdsWriter != null) {
            try {
                blobIdsWriter.close();
            } catch (Exception e) {
                System.out.println("Error when closing the blob id writer");
            }
        }
        if (performanceWriter != null) {
            try {
                performanceWriter.close();
            } catch (Exception e) {
                System.out.println("Error when closing the performance writer");
            }
        }
        if (connectionPool != null) {
            connectionPool.shutdown();
        }
    }
}
Also used : BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) ConnectionPool(com.github.ambry.network.ConnectionPool) OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ClusterMap(com.github.ambry.clustermap.ClusterMap) ConnectionPoolConfig(com.github.ambry.config.ConnectionPoolConfig) FileWriter(java.io.FileWriter) ArrayList(java.util.ArrayList) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) OptionParser(joptsimple.OptionParser) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Throttler(com.github.ambry.utils.Throttler) SSLConfig(com.github.ambry.config.SSLConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) CountDownLatch(java.util.concurrent.CountDownLatch) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) UnexpectedException(java.rmi.UnexpectedException) BlockingChannelConnectionPool(com.github.ambry.network.BlockingChannelConnectionPool) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) OptionSet(joptsimple.OptionSet) File(java.io.File)

Aggregations

ArgumentAcceptingOptionSpec (joptsimple.ArgumentAcceptingOptionSpec)11 OptionParser (joptsimple.OptionParser)10 OptionSet (joptsimple.OptionSet)10 OptionSpec (joptsimple.OptionSpec)10 ArrayList (java.util.ArrayList)9 VerifiableProperties (com.github.ambry.config.VerifiableProperties)8 Properties (java.util.Properties)8 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)7 ClusterAgentsFactory (com.github.ambry.clustermap.ClusterAgentsFactory)6 ClusterMap (com.github.ambry.clustermap.ClusterMap)6 MetricRegistry (com.codahale.metrics.MetricRegistry)5 IOException (java.io.IOException)5 BlobProperties (com.github.ambry.messageformat.BlobProperties)4 Throttler (com.github.ambry.utils.Throttler)4 File (java.io.File)4 FileWriter (java.io.FileWriter)4 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)4 AtomicLong (java.util.concurrent.atomic.AtomicLong)4 ReplicaId (com.github.ambry.clustermap.ReplicaId)2 BlobIdFactory (com.github.ambry.commons.BlobIdFactory)2