use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class PartitionManager method main.
public static void main(String[] args) {
try {
OptionParser parser = new OptionParser();
ArgumentAcceptingOptionSpec<String> operationTypeOpt = parser.accepts("operationType", " REQUIRED: The type of operation to perform on the partition. Currently supported" + " operations are 'AddPartition', 'AddReplicas'").withRequiredArg().describedAs("operation_type").ofType(String.class);
ArgumentAcceptingOptionSpec<String> hardwareLayoutPathOpt = parser.accepts("hardwareLayoutPath", " REQUIRED: The path to the hardware layout map").withRequiredArg().describedAs("hardware_layout_path").ofType(String.class);
ArgumentAcceptingOptionSpec<String> partitionLayoutPathOpt = parser.accepts("partitionLayoutPath", "REQUIRED: The path to the partition layout map. If outputPartitionLayoutPath is not defined," + "this file is updated with the new partitions").withRequiredArg().describedAs("partition_layout_path").ofType(String.class);
ArgumentAcceptingOptionSpec<String> serverPropsFilePathOpt = parser.accepts("serverPropsFilePath", "REQUIRED: The path to server properties file").withRequiredArg().describedAs("server_props_file_path").ofType(String.class);
ArgumentAcceptingOptionSpec<String> outputPartitionLayoutPathOpt = parser.accepts("outputPartitionLayoutPath", "The path to the output partition layout map. The file is updated with the new partitions").withOptionalArg().describedAs("output_partition_layout_path").ofType(String.class);
ArgumentAcceptingOptionSpec<Integer> numberOfPartitionsOpt = parser.accepts("numberOfPartitionsToAdd", "The number of partitions to add").withOptionalArg().ofType(Integer.class);
ArgumentAcceptingOptionSpec<Integer> numberOfReplicasPerDatacenterOpt = parser.accepts("numberOfReplicasPerDatacenter", "The number of replicas for the partition per datacenter when adding partitions").withOptionalArg().ofType(Integer.class);
ArgumentAcceptingOptionSpec<Long> replicaCapacityInBytesOpt = parser.accepts("replicaCapacityInBytes", "The capacity of each replica in bytes for the partitions to add").withOptionalArg().ofType(Long.class);
ArgumentAcceptingOptionSpec<String> partitionIdsToAddReplicasToOpt = parser.accepts("partitionIdToAddReplicasTo", "The partitionIds to add replicas to. This can either take a " + "comma separated list of partitions to add replicas to or '.' to add replicas to all partitions in " + "the partitionLayout ").withOptionalArg().ofType(String.class);
ArgumentAcceptingOptionSpec<String> datacenterToAddReplicasToOpt = parser.accepts("datacenterToAddReplicasTo", "The data center to which replicas need to be added to").withOptionalArg().ofType(String.class);
String attemptNonRackAwareOnFailureFlag = "attemptNonRackAwareOnFailure";
parser.accepts(attemptNonRackAwareOnFailureFlag, "If a rack-aware partition allocation cannot be found, attempt a non rack-aware one");
OptionSet options = parser.parse(args);
ArrayList<OptionSpec> listOpt = new ArrayList<OptionSpec>();
listOpt.add(hardwareLayoutPathOpt);
listOpt.add(operationTypeOpt);
listOpt.add(serverPropsFilePathOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
String hardwareLayoutPath = options.valueOf(hardwareLayoutPathOpt);
String partitionLayoutPath = options.valueOf(partitionLayoutPathOpt);
String serverPropsFilePath = options.valueOf(serverPropsFilePathOpt);
String outputPartitionLayoutPath = options.has(outputPartitionLayoutPathOpt) ? options.valueOf(outputPartitionLayoutPathOpt) : partitionLayoutPath;
String operationType = options.valueOf(operationTypeOpt);
boolean attemptNonRackAwareOnFailure = options.has(attemptNonRackAwareOnFailureFlag);
String fileString = null;
try {
fileString = Utils.readStringFromFile(partitionLayoutPath);
} catch (FileNotFoundException e) {
System.out.println("Partition layout path not found. Creating new file");
}
StaticClusterManager manager = null;
Properties properties = Utils.loadProps(serverPropsFilePath);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(properties));
if (fileString == null) {
manager = (new StaticClusterAgentsFactory(clusterMapConfig, new PartitionLayout(new HardwareLayout(new JSONObject(Utils.readStringFromFile(hardwareLayoutPath)), clusterMapConfig), clusterMapConfig))).getClusterMap();
} else {
manager = (new StaticClusterAgentsFactory(clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
}
if (operationType.compareToIgnoreCase("AddPartition") == 0) {
listOpt.add(numberOfPartitionsOpt);
listOpt.add(numberOfReplicasPerDatacenterOpt);
listOpt.add(replicaCapacityInBytesOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
int numberOfPartitions = options.valueOf(numberOfPartitionsOpt);
int numberOfReplicas = options.valueOf(numberOfReplicasPerDatacenterOpt);
long replicaCapacityInBytes = options.valueOf(replicaCapacityInBytesOpt);
manager.allocatePartitions(numberOfPartitions, clusterMapConfig.clusterMapDefaultPartitionClass, numberOfReplicas, replicaCapacityInBytes, attemptNonRackAwareOnFailure);
} else if (operationType.compareToIgnoreCase("AddReplicas") == 0) {
listOpt.add(partitionIdsToAddReplicasToOpt);
listOpt.add(datacenterToAddReplicasToOpt);
listOpt.add(partitionLayoutPathOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
String partitionIdsToAddReplicas = options.valueOf(partitionIdsToAddReplicasToOpt);
String datacenterToAddReplicasTo = options.valueOf(datacenterToAddReplicasToOpt);
if (partitionIdsToAddReplicas.compareToIgnoreCase(".") == 0) {
for (PartitionId partitionId : manager.getAllPartitionIds(null)) {
manager.addReplicas(partitionId, datacenterToAddReplicasTo, attemptNonRackAwareOnFailure);
}
} else {
String[] partitionIds = partitionIdsToAddReplicas.split(",");
for (String partitionId : partitionIds) {
for (PartitionId partitionInCluster : manager.getAllPartitionIds(null)) {
if (partitionInCluster.isEqual(partitionId)) {
manager.addReplicas(partitionInCluster, datacenterToAddReplicasTo, attemptNonRackAwareOnFailure);
}
}
}
}
}
manager.persist(hardwareLayoutPath, outputPartitionLayoutPath);
} catch (Exception e) {
System.out.println("Error while executing partition command " + e);
}
}
use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class CompactionVerifier method main.
/**
* Main function to trigger the verifier.
* @param args CLI arguments
* @throws Exception if the verifier encountered problems.
*/
public static void main(String[] args) throws Exception {
VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
CompactionVerifierConfig verifierConfig = new CompactionVerifierConfig(verifiableProperties);
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
assert !storeConfig.storeEnableHardDelete : "Hard delete cannot be enabled in the properties";
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
ClusterMap clusterMap = new StaticClusterAgentsFactory(clusterMapConfig, verifierConfig.hardwareLayoutFilePath, verifierConfig.partitionLayoutFilePath).getClusterMap();
StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
try (CompactionVerifier compactionVerifier = new CompactionVerifier(verifierConfig, storeConfig, storeKeyFactory)) {
compactionVerifier.verifyCompaction();
}
LOGGER.info("Verification completed successfully");
}
use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class DiskReformatter method main.
public static void main(String[] args) throws Exception {
VerifiableProperties properties = ToolUtils.getVerifiableProperties(args);
DiskReformatterConfig config = new DiskReformatterConfig(properties);
StoreConfig storeConfig = new StoreConfig(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
ServerConfig serverConfig = new ServerConfig(properties);
ClusterAgentsFactory clusterAgentsFactory = Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath);
try (ClusterMap clusterMap = clusterAgentsFactory.getClusterMap()) {
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, clusterMap.getMetricRegistry());
StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
DataNodeId dataNodeId = clusterMap.getDataNodeId(config.datanodeHostname, config.datanodePort);
if (dataNodeId == null) {
throw new IllegalArgumentException("Did not find node in clustermap with hostname:port - " + config.datanodeHostname + ":" + config.datanodePort);
}
DiskReformatter reformatter = new DiskReformatter(dataNodeId, Collections.EMPTY_LIST, config.fetchSizeInBytes, storeConfig, storeKeyFactory, clusterMap, SystemTime.getInstance(), storeKeyConverterFactory.getStoreKeyConverter());
AtomicInteger exitStatus = new AtomicInteger(0);
CountDownLatch latch = new CountDownLatch(config.diskMountPaths.length);
for (int i = 0; i < config.diskMountPaths.length; i++) {
int finalI = i;
Runnable runnable = () -> {
try {
reformatter.reformat(config.diskMountPaths[finalI], new File(config.scratchPaths[finalI]));
latch.countDown();
} catch (Exception e) {
throw new IllegalStateException(e);
}
};
Thread thread = Utils.newThread(config.diskMountPaths[finalI] + "-reformatter", runnable, true);
thread.setUncaughtExceptionHandler((t, e) -> {
exitStatus.set(1);
logger.error("Reformatting {} failed", config.diskMountPaths[finalI], e);
latch.countDown();
});
thread.start();
}
latch.await();
System.exit(exitStatus.get());
}
}
use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class DumpIndexTool method main.
public static void main(String[] args) throws Exception {
final AtomicInteger exitCode = new AtomicInteger(0);
VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
DumpIndexToolConfig config = new DumpIndexToolConfig(verifiableProperties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
try (ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap()) {
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
// this tool supports only blob IDs. It can become generic if StoreKeyFactory provides a deserFromString method.
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
StoreToolsMetrics metrics = new StoreToolsMetrics(clusterMap.getMetricRegistry());
StoreMetrics storeMetrics = new StoreMetrics("DumpIndexTool", clusterMap.getMetricRegistry());
ServerConfig serverConfig = new ServerConfig(verifiableProperties);
Time time = SystemTime.getInstance();
Throttler throttler = new Throttler(config.indexEntriesToProcessPerSec, 1000, true, time);
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, verifiableProperties, clusterMap.getMetricRegistry());
DumpIndexTool dumpIndexTool = new DumpIndexTool(blobIdFactory, storeConfig, time, metrics, storeMetrics, throttler, storeKeyConverterFactory.getStoreKeyConverter());
Set<StoreKey> filterKeySet = new HashSet<>();
for (String key : config.filterSet) {
filterKeySet.add(new BlobId(key, clusterMap));
}
switch(config.typeOfOperation) {
case DumpIndex:
dumpIndex(dumpIndexTool, config.pathOfInput, filterKeySet);
break;
case DumpIndexSegment:
dumpIndexSegment(dumpIndexTool, config.pathOfInput, filterKeySet);
break;
case VerifyIndex:
IndexProcessingResults results = dumpIndexTool.processIndex(config.pathOfInput, filterKeySet, time.milliseconds(), config.detectDuplicatesAcrossKeys);
exitCode.set(reportVerificationResults(config.pathOfInput, results, config.failIfCraftedIdsPresent));
break;
case VerifyDataNode:
DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
if (dataNodeId == null) {
logger.error("No data node corresponding to {}:{}", config.hostname, config.port);
} else {
Set<File> replicaDirs = clusterMap.getReplicaIds(dataNodeId).stream().map(replicaId -> new File(replicaId.getMountPath())).collect(Collectors.toSet());
Map<File, IndexProcessingResults> resultsByReplica = dumpIndexTool.processIndex(replicaDirs, filterKeySet, config.parallelism, config.detectDuplicatesAcrossKeys);
replicaDirs.removeAll(resultsByReplica.keySet());
if (replicaDirs.size() != 0) {
logger.error("Results obtained missing {}", replicaDirs);
exitCode.set(5);
} else {
resultsByReplica.forEach((replicaDir, result) -> exitCode.set(Math.max(exitCode.get(), reportVerificationResults(replicaDir, result, config.failIfCraftedIdsPresent))));
}
}
break;
default:
throw new IllegalArgumentException("Unrecognized operation: " + config.typeOfOperation);
}
}
System.exit(exitCode.get());
}
use of com.github.ambry.config.ClusterMapConfig in project ambry by linkedin.
the class SafeServerShutdownTool method main.
public static void main(String[] args) throws Exception {
VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
SafeServerShutdownToolConfig config = new SafeServerShutdownToolConfig(verifiableProperties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
try (ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap()) {
SSLFactory sslFactory = !clusterMapConfig.clusterMapSslEnabledDatacenters.isEmpty() ? SSLFactory.getNewInstance(new SSLConfig(verifiableProperties)) : null;
try (ServerAdminTool serverAdminTool = new ServerAdminTool(clusterMap, sslFactory, verifiableProperties)) {
DataNodeId dataNodeId = clusterMap.getDataNodeId(config.hostname, config.port);
if (dataNodeId == null) {
throw new IllegalArgumentException("Could not find a data node corresponding to " + config.hostname + ":" + config.port);
}
SafeServerShutdownTool safeServerShutdownTool = new SafeServerShutdownTool(serverAdminTool, SystemTime.getInstance());
int exitStatus = safeServerShutdownTool.prepareServerForShutdown(dataNodeId, config.logGrowthPauseLagThresholdBytes, config.numReplicasCaughtUpPerPartition, config.timeoutSecs, config.checkRepeatDelaySecs) ? 0 : 1;
System.exit(exitStatus);
}
}
}
Aggregations