use of voldemort.server.rebalance.RebalancerState in project voldemort by voldemort.
the class VoldemortAdminTool method main.
@SuppressWarnings("unchecked")
public static void main(String[] args) throws Exception {
OptionParser parser = new OptionParser();
// This is a generic argument that should be eventually supported by all
// RW operations.
// If you omit this argument the operation will be executed in a "batch"
// mode which is useful for scripting
// Otherwise you will be presented with a summary of changes and with a
// Y/N prompt
parser.accepts("auto", "[OPTIONAL] enable auto/batch mode");
parser.accepts("help", "print help information");
parser.accepts("url", "[REQUIRED] bootstrap URL").withRequiredArg().describedAs("bootstrap-url").ofType(String.class);
parser.accepts("node", "node id").withRequiredArg().describedAs("node-id").ofType(Integer.class);
parser.accepts("delete-partitions", "Delete partitions").withRequiredArg().describedAs("partition-ids").withValuesSeparatedBy(',').ofType(Integer.class);
parser.accepts("restore", "Restore from replication [ Optional parallelism param - Default - 5 ]").withOptionalArg().describedAs("parallelism").ofType(Integer.class);
parser.accepts("ascii", "Fetch keys as ASCII");
parser.accepts("fetch-keys", "Fetch keys").withOptionalArg().describedAs("partition-ids").withValuesSeparatedBy(',').ofType(Integer.class);
parser.accepts("fetch-entries", "Fetch full entries").withOptionalArg().describedAs("partition-ids").withValuesSeparatedBy(',').ofType(Integer.class);
parser.accepts("outdir", "Output directory").withRequiredArg().describedAs("output-directory").ofType(String.class);
parser.accepts("nodes", "list of nodes").withRequiredArg().describedAs("nodes").withValuesSeparatedBy(',').ofType(Integer.class);
parser.accepts("stores", "Store names").withRequiredArg().describedAs("store-names").withValuesSeparatedBy(',').ofType(String.class);
parser.accepts("store", "Store name for querying keys").withRequiredArg().describedAs("store-name").ofType(String.class);
parser.accepts("add-stores", "Add stores in this stores.xml").withRequiredArg().describedAs("stores.xml containing just the new stores").ofType(String.class);
parser.accepts("delete-store", "Delete store").withRequiredArg().describedAs("store-name").ofType(String.class);
parser.accepts("update-entries", "Insert or update entries").withRequiredArg().describedAs("input-directory").ofType(String.class);
parser.accepts("get-metadata", "retreive metadata information " + MetadataStore.METADATA_KEYS).withOptionalArg().describedAs("metadata-key").ofType(String.class);
parser.accepts("check-metadata", "retreive metadata information from all nodes and checks if they are consistent across [ " + MetadataStore.CLUSTER_KEY + " | " + MetadataStore.STORES_KEY + " | " + MetadataStore.REBALANCING_SOURCE_CLUSTER_XML + " | " + MetadataStore.SERVER_STATE_KEY + " ]").withRequiredArg().describedAs("metadata-key").ofType(String.class);
parser.accepts("ro-metadata", "retrieve version information [current | max | storage-format]").withRequiredArg().describedAs("type").ofType(String.class);
parser.accepts("truncate", "truncate a store").withRequiredArg().describedAs("store-name").ofType(String.class);
parser.accepts("set-metadata", "Forceful setting of metadata [ " + MetadataStore.CLUSTER_KEY + " | " + MetadataStore.STORES_KEY + " | " + MetadataStore.SERVER_STATE_KEY + " | " + MetadataStore.REBALANCING_SOURCE_CLUSTER_XML + " | " + MetadataStore.REBALANCING_STEAL_INFO + " ]").withRequiredArg().describedAs("metadata-key").ofType(String.class);
parser.accepts("set-metadata-value", "The value for the set-metadata [ " + MetadataStore.CLUSTER_KEY + " | " + MetadataStore.STORES_KEY + ", " + MetadataStore.REBALANCING_SOURCE_CLUSTER_XML + ", " + MetadataStore.REBALANCING_STEAL_INFO + " ] - xml file location, [ " + MetadataStore.SERVER_STATE_KEY + " ] - " + MetadataStore.VoldemortState.NORMAL_SERVER + "," + MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER + "," + MetadataStore.VoldemortState.OFFLINE_SERVER).withRequiredArg().describedAs("metadata-value").ofType(String.class);
parser.accepts("update-store-defs", "Update the [" + MetadataStore.STORES_KEY + "] with the new value for only the specified stores in update-value.");
parser.accepts("update-store-value", "The value for update-store-defs ] - xml file location").withRequiredArg().describedAs("stores-xml-value").ofType(String.class);
parser.accepts("set-metadata-pair", "Atomic setting of metadata pair [ " + MetadataStore.CLUSTER_KEY + " & " + MetadataStore.STORES_KEY + " ]").withRequiredArg().describedAs("metadata-keys-pair").withValuesSeparatedBy(',').ofType(String.class);
parser.accepts("set-metadata-value-pair", "The value for the set-metadata pair [ " + MetadataStore.CLUSTER_KEY + " & " + MetadataStore.STORES_KEY + " ]").withRequiredArg().describedAs("metadata-value-pair").withValuesSeparatedBy(',').ofType(String.class);
parser.accepts("clear-rebalancing-metadata", "Remove the metadata related to rebalancing");
parser.accepts("async", "a) Get a list of async job ids [get] b) Stop async job ids [stop] ").withRequiredArg().describedAs("op-type").ofType(String.class);
parser.accepts("async-id", "Comma separated list of async ids to stop").withOptionalArg().describedAs("job-ids").withValuesSeparatedBy(',').ofType(Integer.class);
parser.accepts("repair-job", "Clean after rebalancing is done");
parser.accepts("prune-job", "Prune versioned put data, after rebalancing");
parser.accepts("purge-slops", "Purge the slop stores selectively, based on nodeId or zoneId");
parser.accepts("native-backup", "Perform a native backup").withRequiredArg().describedAs("store-name").ofType(String.class);
parser.accepts("backup-dir").withRequiredArg().describedAs("backup-directory").ofType(String.class);
parser.accepts("backup-timeout").withRequiredArg().describedAs("minutes to wait for backup completion, default 30 mins").ofType(Integer.class);
parser.accepts("backup-verify", "If provided, backup will also verify checksum (with extra overhead)");
parser.accepts("backup-incremental", "Perform an incremental backup for point-in-time recovery." + " By default backup has latest consistent snapshot.");
parser.accepts("zone", "zone id").withRequiredArg().describedAs("zone-id").ofType(Integer.class);
parser.accepts("rollback", "rollback a store").withRequiredArg().describedAs("store-name").ofType(String.class);
parser.accepts("version", "Push version of store to rollback to").withRequiredArg().describedAs("version").ofType(Long.class);
parser.accepts("verify-metadata-version", "Verify the version of Metadata on all the cluster nodes");
parser.accepts("synchronize-metadata-version", "Synchronize the metadata versions across all the nodes.");
parser.accepts("reserve-memory", "Memory in MB to reserve for the store").withRequiredArg().describedAs("size-in-mb").ofType(Long.class);
parser.accepts("query-key", "Get values of a key on specific node").withRequiredArg().describedAs("query-key").ofType(String.class);
parser.accepts("query-key-format", "Format of the query key. Can be one of [hex|readable]").withRequiredArg().describedAs("key-format").ofType(String.class);
parser.accepts("show-routing-plan", "Routing plan of the specified keys").withRequiredArg().describedAs("keys-to-be-routed").withValuesSeparatedBy(',').ofType(String.class);
parser.accepts("mirror-from-url", "Cluster url to mirror data from").withRequiredArg().describedAs("mirror-cluster-bootstrap-url").ofType(String.class);
parser.accepts("mirror-node", "Node id in the mirror cluster to mirror from").withRequiredArg().describedAs("id-of-mirror-node").ofType(Integer.class);
parser.accepts("fetch-orphaned", "Fetch any orphaned keys/entries in the node");
parser.accepts("set-quota", "Enforce some quota on the servers").withRequiredArg().describedAs("quota-type").ofType(String.class);
parser.accepts("quota-value", "Value of the quota enforced on the servers").withRequiredArg().describedAs("quota-value").ofType(String.class);
parser.accepts("unset-quota", "Remove some quota already enforced on the servers").withRequiredArg().describedAs("quota-type").ofType(String.class);
// TODO add a way to retrieve all quotas for a given store.
parser.accepts("get-quota", "Retrieve some quota already enforced on the servers").withRequiredArg().describedAs("quota-type").ofType(String.class);
OptionSet options = parser.parse(args);
if (options.has("help")) {
printHelp(System.out, parser);
System.exit(0);
}
Set<String> missing = CmdUtils.missing(options, "url", "node");
if (missing.size() > 0) {
// options; all these can live without explicit node ids
if (!(missing.equals(ImmutableSet.of("node")) && (options.has("add-stores") || options.has("delete-store") || options.has("ro-metadata") || options.has("set-metadata") || options.has("update-store-defs") || options.has("set-metadata-pair") || options.has("get-metadata") || options.has("check-metadata")) || options.has("truncate") || options.has("clear-rebalancing-metadata") || options.has("async") || options.has("native-backup") || options.has("rollback") || options.has("verify-metadata-version") || options.has("reserve-memory") || options.has("purge-slops") || options.has("show-routing-plan") || options.has("query-key") || options.has("set-quota") || options.has("unset-quota") || options.has("get-quota") || options.has("synchronize-metadata-version"))) {
System.err.println("Missing required arguments: " + Joiner.on(", ").join(missing));
printHelp(System.err, parser);
System.exit(1);
}
}
try {
String url = (String) options.valueOf("url");
Integer nodeId = CmdUtils.valueOf(options, "node", -1);
int parallelism = CmdUtils.valueOf(options, "restore", 5);
Integer zoneId = CmdUtils.valueOf(options, "zone", -1);
AdminClient adminClient = new AdminClient(url);
List<String> storeNames = null;
if (options.has("store") && options.has("stores")) {
throw new VoldemortException("Must not specify both --stores and --store options");
} else if (options.has("stores")) {
storeNames = (List<String>) options.valuesOf("stores");
} else if (options.has("store")) {
storeNames = Arrays.asList((String) options.valueOf("store"));
}
String outputDir = null;
if (options.has("outdir")) {
outputDir = (String) options.valueOf("outdir");
}
if (options.has("add-stores")) {
String storesXml = (String) options.valueOf("add-stores");
executeAddStores(adminClient, storesXml, nodeId);
} else if (options.has("async")) {
String asyncKey = (String) options.valueOf("async");
List<Integer> asyncIds = null;
if (options.hasArgument("async-id"))
asyncIds = (List<Integer>) options.valuesOf("async-id");
executeAsync(nodeId, adminClient, asyncKey, asyncIds);
} else if (options.has("check-metadata")) {
String metadataKey = (String) options.valueOf("check-metadata");
executeCheckMetadata(adminClient, metadataKey);
} else if (options.has("delete-partitions")) {
System.out.println("Starting delete-partitions");
List<Integer> partitionIdList = (List<Integer>) options.valuesOf("delete-partitions");
executeDeletePartitions(nodeId, adminClient, partitionIdList, storeNames);
System.out.println("Finished delete-partitions");
} else if (options.has("ro-metadata")) {
String type = (String) options.valueOf("ro-metadata");
executeROMetadata(nodeId, adminClient, storeNames, type);
} else if (options.has("reserve-memory")) {
if (!options.has("stores")) {
Utils.croak("Specify the list of stores to reserve memory");
}
long reserveMB = (Long) options.valueOf("reserve-memory");
adminClient.quotaMgmtOps.reserveMemory(nodeId, storeNames, reserveMB);
} else if (options.has("get-metadata")) {
String metadataKey = ALL_METADATA;
if (options.hasArgument("get-metadata")) {
metadataKey = (String) options.valueOf("get-metadata");
}
executeGetMetadata(nodeId, adminClient, metadataKey, outputDir);
} else if (options.has("mirror-from-url")) {
if (!options.has("mirror-node")) {
Utils.croak("Specify the mirror node to fetch from");
}
if (nodeId == -1) {
System.err.println("Cannot run mirroring without node id");
System.exit(1);
}
Integer mirrorNodeId = CmdUtils.valueOf(options, "mirror-node", -1);
if (mirrorNodeId == -1) {
System.err.println("Cannot run mirroring without mirror node id");
System.exit(1);
}
adminClient.restoreOps.mirrorData(nodeId, mirrorNodeId, (String) options.valueOf("mirror-from-url"), storeNames);
} else if (options.has("clear-rebalancing-metadata")) {
executeClearRebalancing(nodeId, adminClient);
} else if (options.has("prune-job")) {
if (storeNames == null) {
Utils.croak("Must specify --stores to run the prune job");
}
executePruneJob(nodeId, adminClient, storeNames);
} else if (options.has("fetch-keys")) {
boolean useAscii = options.has("ascii");
System.out.println("Starting fetch keys");
List<Integer> partitionIdList = null;
if (options.hasArgument("fetch-keys"))
partitionIdList = (List<Integer>) options.valuesOf("fetch-keys");
executeFetchKeys(nodeId, adminClient, partitionIdList, outputDir, storeNames, useAscii, options.has("fetch-orphaned"));
} else if (options.has("repair-job")) {
executeRepairJob(nodeId, adminClient);
} else if (options.has("set-metadata-pair")) {
List<String> metadataKeyPair = (List<String>) options.valuesOf("set-metadata-pair");
if (metadataKeyPair.size() != 2) {
throw new VoldemortException("Missing set-metadata-pair keys (only two keys are needed and allowed)");
}
if (!options.has("set-metadata-value-pair")) {
throw new VoldemortException("Missing set-metadata-value-pair");
} else {
List<String> metadataValuePair = (List<String>) options.valuesOf("set-metadata-value-pair");
if (metadataValuePair.size() != 2) {
throw new VoldemortException("Missing set-metadata--value-pair values (only two values are needed and allowed)");
}
if (metadataKeyPair.contains(MetadataStore.CLUSTER_KEY) && metadataKeyPair.contains(MetadataStore.STORES_KEY)) {
ClusterMapper clusterMapper = new ClusterMapper();
StoreDefinitionsMapper storeDefsMapper = new StoreDefinitionsMapper();
// original metadata
Integer nodeIdToGetStoreXMLFrom = nodeId;
if (nodeId < 0) {
Collection<Node> nodes = adminClient.getAdminClientCluster().getNodes();
if (nodes.isEmpty()) {
throw new VoldemortException("No nodes in this cluster");
} else {
nodeIdToGetStoreXMLFrom = nodes.iterator().next().getId();
}
}
Versioned<String> storesXML = adminClient.metadataMgmtOps.getRemoteMetadata(nodeIdToGetStoreXMLFrom, MetadataStore.STORES_KEY);
List<StoreDefinition> oldStoreDefs = storeDefsMapper.readStoreList(new StringReader(storesXML.getValue()));
String clusterXMLPath = metadataValuePair.get(metadataKeyPair.indexOf(MetadataStore.CLUSTER_KEY));
clusterXMLPath = clusterXMLPath.replace("~", System.getProperty("user.home"));
if (!Utils.isReadableFile(clusterXMLPath))
throw new VoldemortException("Cluster xml file path incorrect");
Cluster cluster = clusterMapper.readCluster(new File(clusterXMLPath));
String storesXMLPath = metadataValuePair.get(metadataKeyPair.indexOf(MetadataStore.STORES_KEY));
storesXMLPath = storesXMLPath.replace("~", System.getProperty("user.home"));
if (!Utils.isReadableFile(storesXMLPath))
throw new VoldemortException("Stores definition xml file path incorrect");
List<StoreDefinition> newStoreDefs = storeDefsMapper.readStoreList(new File(storesXMLPath));
StoreDefinitionUtils.validateSchemasAsNeeded(newStoreDefs);
executeSetMetadataPair(nodeId, adminClient, MetadataStore.CLUSTER_KEY, clusterMapper.writeCluster(cluster), MetadataStore.STORES_KEY, storeDefsMapper.writeStoreList(newStoreDefs));
executeUpdateMetadataVersionsOnStores(adminClient, oldStoreDefs, newStoreDefs);
} else {
throw new VoldemortException("set-metadata-pair keys should be <cluster.xml, stores.xml>");
}
}
} else if (options.has("set-metadata")) {
String metadataKey = (String) options.valueOf("set-metadata");
if (!options.has("set-metadata-value")) {
throw new VoldemortException("Missing set-metadata-value");
} else {
String metadataValue = (String) options.valueOf("set-metadata-value");
if (metadataKey.compareTo(MetadataStore.CLUSTER_KEY) == 0 || metadataKey.compareTo(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML) == 0) {
if (!Utils.isReadableFile(metadataValue))
throw new VoldemortException("Cluster xml file path incorrect");
ClusterMapper mapper = new ClusterMapper();
Cluster newCluster = mapper.readCluster(new File(metadataValue));
if (options.has("auto")) {
executeSetMetadata(nodeId, adminClient, metadataKey, mapper.writeCluster(newCluster));
} else {
if (confirmMetadataUpdate(nodeId, adminClient, mapper.writeCluster(newCluster))) {
executeSetMetadata(nodeId, adminClient, metadataKey, mapper.writeCluster(newCluster));
} else {
System.out.println("New metadata has not been set");
}
}
} else if (metadataKey.compareTo(MetadataStore.SERVER_STATE_KEY) == 0) {
VoldemortState newState = VoldemortState.valueOf(metadataValue);
executeSetMetadata(nodeId, adminClient, MetadataStore.SERVER_STATE_KEY, newState.toString());
} else if (metadataKey.compareTo(MetadataStore.STORES_KEY) == 0) {
if (!Utils.isReadableFile(metadataValue))
throw new VoldemortException("Stores definition xml file path incorrect");
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
List<StoreDefinition> newStoreDefs = mapper.readStoreList(new File(metadataValue));
StoreDefinitionUtils.validateSchemasAsNeeded(newStoreDefs);
// original metadata
Integer nodeIdToGetStoreXMLFrom = nodeId;
if (nodeId < 0) {
Collection<Node> nodes = adminClient.getAdminClientCluster().getNodes();
if (nodes.isEmpty()) {
throw new VoldemortException("No nodes in this cluster");
} else {
nodeIdToGetStoreXMLFrom = nodes.iterator().next().getId();
}
}
Versioned<String> storesXML = adminClient.metadataMgmtOps.getRemoteMetadata(nodeIdToGetStoreXMLFrom, MetadataStore.STORES_KEY);
List<StoreDefinition> oldStoreDefs = mapper.readStoreList(new StringReader(storesXML.getValue()));
if (options.has("auto")) {
executeSetMetadata(nodeId, adminClient, MetadataStore.STORES_KEY, mapper.writeStoreList(newStoreDefs));
executeUpdateMetadataVersionsOnStores(adminClient, oldStoreDefs, newStoreDefs);
} else {
if (confirmMetadataUpdate(nodeId, adminClient, storesXML.getValue())) {
executeSetMetadata(nodeId, adminClient, MetadataStore.STORES_KEY, mapper.writeStoreList(newStoreDefs));
if (nodeId >= 0) {
System.err.println("WARNING: Metadata version update of stores goes to all servers, " + "although this set-metadata oprations only goes to node " + nodeId);
}
executeUpdateMetadataVersionsOnStores(adminClient, oldStoreDefs, newStoreDefs);
} else {
System.out.println("New metadata has not been set");
}
}
} else if (metadataKey.compareTo(MetadataStore.REBALANCING_STEAL_INFO) == 0) {
if (!Utils.isReadableFile(metadataValue))
throw new VoldemortException("Rebalancing steal info file path incorrect");
String rebalancingStealInfoJsonString = FileUtils.readFileToString(new File(metadataValue));
RebalancerState state = RebalancerState.create(rebalancingStealInfoJsonString);
executeSetMetadata(nodeId, adminClient, MetadataStore.REBALANCING_STEAL_INFO, state.toJsonString());
} else {
throw new VoldemortException("Incorrect metadata key");
}
}
} else if (options.has("update-store-defs")) {
if (!options.has("update-store-value")) {
throw new VoldemortException("Missing update-store-value for update-store-defs");
} else {
String storesXmlValue = (String) options.valueOf("update-store-value");
if (!Utils.isReadableFile(storesXmlValue))
throw new VoldemortException("Stores definition xml file path incorrect");
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
List<StoreDefinition> newStoreDefs = mapper.readStoreList(new File(storesXmlValue));
StoreDefinitionUtils.validateSchemasAsNeeded(newStoreDefs);
if (options.has("auto")) {
executeUpdateStoreDefinitions(nodeId, adminClient, newStoreDefs);
} else {
if (confirmMetadataUpdate(nodeId, adminClient, newStoreDefs)) {
executeUpdateStoreDefinitions(nodeId, adminClient, newStoreDefs);
if (nodeId >= 0) {
System.err.println("WARNING: Metadata version update of stores goes to all servers, " + "although this set-metadata oprations only goes to node " + nodeId);
}
} else {
System.out.println("New metadata has not been set");
}
}
System.out.println("The store definitions have been successfully updated.");
}
} else if (options.has("native-backup")) {
if (!options.has("backup-dir")) {
Utils.croak("A backup directory must be specified with backup-dir option");
}
String backupDir = (String) options.valueOf("backup-dir");
String storeName = (String) options.valueOf("native-backup");
int timeout = CmdUtils.valueOf(options, "backup-timeout", 30);
adminClient.storeMntOps.nativeBackup(nodeId, storeName, backupDir, timeout, options.has("backup-verify"), options.has("backup-incremental"));
} else if (options.has("rollback")) {
if (!options.has("version")) {
Utils.croak("A read-only push version must be specified with rollback option");
}
String storeName = (String) options.valueOf("rollback");
long pushVersion = (Long) options.valueOf("version");
executeRollback(nodeId, storeName, pushVersion, adminClient);
} else if (options.has("query-key")) {
String key = (String) options.valueOf("query-key");
String keyFormat = (String) options.valueOf("query-key-format");
if (keyFormat == null) {
keyFormat = "hex";
}
if (!keyFormat.equals("hex") && !keyFormat.equals("readable")) {
throw new VoldemortException("--query-key-format must be hex or readable");
}
executeQueryKey(nodeId, adminClient, storeNames, key, keyFormat);
} else if (options.has("restore")) {
if (nodeId == -1) {
System.err.println("Cannot run restore without node id");
System.exit(1);
}
System.out.println("Starting restore");
adminClient.restoreOps.restoreDataFromReplications(nodeId, parallelism, zoneId);
System.out.println("Finished restore");
} else if (options.has("delete-store")) {
String storeName = (String) options.valueOf("delete-store");
executeDeleteStore(adminClient, storeName, nodeId);
} else if (options.has("truncate")) {
String storeName = (String) options.valueOf("truncate");
executeTruncateStore(nodeId, adminClient, storeName);
} else if (options.has("update-entries")) {
String inputDir = (String) options.valueOf("update-entries");
executeUpdateEntries(nodeId, adminClient, storeNames, inputDir);
} else if (options.has("fetch-entries")) {
boolean useAscii = options.has("ascii");
System.out.println("Starting fetch entries");
List<Integer> partitionIdList = null;
if (options.hasArgument("fetch-entries"))
partitionIdList = (List<Integer>) options.valuesOf("fetch-entries");
executeFetchEntries(nodeId, adminClient, partitionIdList, outputDir, storeNames, useAscii, options.has("fetch-orphaned"));
} else if (options.has("purge-slops")) {
List<Integer> nodesToPurge = null;
if (options.has("nodes")) {
nodesToPurge = (List<Integer>) options.valuesOf("nodes");
}
if (nodesToPurge == null && zoneId == -1 && storeNames == null) {
Utils.croak("Must specify atleast one of --nodes, --zone-id or --stores with --purge-slops");
}
executePurgeSlops(adminClient, nodesToPurge, zoneId, storeNames);
} else if (options.has("synchronize-metadata-version")) {
synchronizeMetadataVersion(adminClient);
} else if (options.has("verify-metadata-version")) {
checkMetadataVersion(adminClient);
} else if (options.has("show-routing-plan")) {
if (!options.has("store")) {
Utils.croak("Must specify the store the keys belong to using --store ");
}
String storeName = (String) options.valueOf("store");
List<String> keysToRoute = (List<String>) options.valuesOf("show-routing-plan");
if (keysToRoute == null || keysToRoute.size() == 0) {
Utils.croak("Must specify comma separated keys list in hex format");
}
executeShowRoutingPlan(adminClient, storeName, keysToRoute);
} else if (options.has("set-quota")) {
String quotaType = (String) options.valueOf("set-quota");
Set<String> validQuotaTypes = QuotaUtils.validQuotaTypes();
if (!validQuotaTypes.contains(quotaType)) {
Utils.croak("Specify a valid quota type from :" + validQuotaTypes);
}
if (!options.has("store")) {
Utils.croak("Must specify the store to enforce the quota on. ");
}
if (!options.has("quota-value")) {
Utils.croak("Must specify the value of the quota being set");
}
String storeName = (String) options.valueOf("store");
String quotaValue = (String) options.valueOf("quota-value");
executeSetQuota(adminClient, storeName, quotaType, quotaValue);
} else if (options.has("unset-quota")) {
String quotaType = (String) options.valueOf("unset-quota");
Set<String> validQuotaTypes = QuotaUtils.validQuotaTypes();
if (!validQuotaTypes.contains(quotaType)) {
Utils.croak("Specify a valid quota type from :" + validQuotaTypes);
}
if (!options.has("store")) {
Utils.croak("Must specify the store to enforce the quota on. ");
}
String storeName = (String) options.valueOf("store");
executeUnsetQuota(adminClient, storeName, quotaType);
} else if (options.has("get-quota")) {
String quotaType = (String) options.valueOf("get-quota");
Set<String> validQuotaTypes = QuotaUtils.validQuotaTypes();
if (!validQuotaTypes.contains(quotaType)) {
Utils.croak("Specify a valid quota type from :" + validQuotaTypes);
}
if (!options.has("store")) {
Utils.croak("Must specify the store to enforce the quota on. ");
}
String storeName = (String) options.valueOf("store");
executeGetQuota(adminClient, storeName, quotaType);
} else {
Utils.croak("At least one of (delete-partitions, restore, add-node, fetch-entries, " + "fetch-keys, add-stores, delete-store, update-entries, get-metadata, ro-metadata, " + "set-metadata, check-metadata, clear-rebalancing-metadata, async, " + "repair-job, native-backup, rollback, reserve-memory, mirror-url," + " verify-metadata-version, prune-job, purge-slops) must be specified");
}
} catch (Exception e) {
e.printStackTrace();
Utils.croak(e.getMessage());
}
}
use of voldemort.server.rebalance.RebalancerState in project voldemort by voldemort.
the class VoldemortAdminTool method executeClearRebalancing.
private static void executeClearRebalancing(int nodeId, AdminClient adminClient) {
// FIXME: add check for server state, must not be in offline state
System.out.println("Setting " + MetadataStore.SERVER_STATE_KEY + " to " + MetadataStore.VoldemortState.NORMAL_SERVER);
executeSetMetadata(nodeId, adminClient, MetadataStore.SERVER_STATE_KEY, MetadataStore.VoldemortState.NORMAL_SERVER.toString());
RebalancerState state = RebalancerState.create("[]");
System.out.println("Cleaning up " + MetadataStore.REBALANCING_STEAL_INFO + " to " + state.toJsonString());
executeSetMetadata(nodeId, adminClient, MetadataStore.REBALANCING_STEAL_INFO, state.toJsonString());
System.out.println("Cleaning up " + MetadataStore.REBALANCING_SOURCE_CLUSTER_XML + " to empty string");
executeSetMetadata(nodeId, adminClient, MetadataStore.REBALANCING_SOURCE_CLUSTER_XML, "");
}
use of voldemort.server.rebalance.RebalancerState in project voldemort by voldemort.
the class RedirectingStoreTest method setUp.
@Before
public void setUp() throws IOException, InterruptedException {
currentCluster = ServerTestUtils.getLocalCluster(3, new int[][] { { 0, 1 }, { 2, 3 }, {} });
targetCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 2, Arrays.asList(0));
this.primaryPartitionsMoved = Lists.newArrayList(0);
this.secondaryPartitionsMoved = Lists.newArrayList(2, 3);
this.storeDef = new StoreDefinitionBuilder().setName("test").setType(BdbStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(2).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
File tempStoreXml = new File(TestUtils.createTempDir(), "stores.xml");
FileUtils.writeStringToFile(tempStoreXml, new StoreDefinitionsMapper().writeStoreList(Lists.newArrayList(storeDef)));
this.servers = new VoldemortServer[3];
for (int nodeId = 0; nodeId < 3; nodeId++) {
this.servers[nodeId] = startServer(nodeId, tempStoreXml.getAbsolutePath(), currentCluster);
}
// Start another node for only this unit test
HashMap<ByteArray, byte[]> entrySet = ServerTestUtils.createRandomKeyValuePairs(100);
SocketStoreClientFactory factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(Lists.newArrayList("tcp://" + currentCluster.getNodeById(0).getHost() + ":" + currentCluster.getNodeById(0).getSocketPort())));
StoreClient<Object, Object> storeClient = factory.getStoreClient("test");
this.primaryEntriesMoved = Maps.newHashMap();
this.secondaryEntriesMoved = Maps.newHashMap();
this.proxyPutTestPrimaryEntries = Maps.newHashMap();
this.proxyPutTestSecondaryEntries = Maps.newHashMap();
RoutingStrategy strategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, currentCluster);
for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) {
storeClient.put(new String(entry.getKey().get()), new String(entry.getValue()));
List<Integer> pList = strategy.getPartitionList(entry.getKey().get());
if (primaryPartitionsMoved.contains(pList.get(0))) {
primaryEntriesMoved.put(entry.getKey(), entry.getValue());
} else if (secondaryPartitionsMoved.contains(pList.get(0))) {
secondaryEntriesMoved.put(entry.getKey(), entry.getValue());
}
}
// Sleep a while for the queries to go through...
// Hope the 'God of perfect timing' is on our side
Thread.sleep(500);
// steal a few primary key-value pairs for testing proxy put logic
int cnt = 0;
for (Entry<ByteArray, byte[]> entry : primaryEntriesMoved.entrySet()) {
if (cnt > 3)
break;
this.proxyPutTestPrimaryEntries.put(entry.getKey(), entry.getValue());
cnt++;
}
for (ByteArray key : this.proxyPutTestPrimaryEntries.keySet()) {
this.primaryEntriesMoved.remove(key);
}
assertTrue("Not enough primary entries", primaryEntriesMoved.size() > 1);
// steal a few secondary key-value pairs for testing proxy put logic
cnt = 0;
for (Entry<ByteArray, byte[]> entry : secondaryEntriesMoved.entrySet()) {
if (cnt > 3)
break;
this.proxyPutTestSecondaryEntries.put(entry.getKey(), entry.getValue());
cnt++;
}
for (ByteArray key : this.proxyPutTestSecondaryEntries.keySet()) {
this.secondaryEntriesMoved.remove(key);
}
assertTrue("Not enough secondary entries", primaryEntriesMoved.size() > 1);
RebalanceBatchPlan RebalanceBatchPlan = new RebalanceBatchPlan(currentCluster, targetCluster, Lists.newArrayList(storeDef));
List<RebalanceTaskInfo> plans = Lists.newArrayList(RebalanceBatchPlan.getBatchPlan());
// Set into rebalancing state
for (RebalanceTaskInfo partitionPlan : plans) {
servers[partitionPlan.getStealerId()].getMetadataStore().put(MetadataStore.SERVER_STATE_KEY, MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER);
servers[partitionPlan.getStealerId()].getMetadataStore().put(MetadataStore.REBALANCING_STEAL_INFO, new RebalancerState(Lists.newArrayList(partitionPlan)));
servers[partitionPlan.getStealerId()].getMetadataStore().put(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML, currentCluster);
// update original storedefs
servers[partitionPlan.getStealerId()].getMetadataStore().put(MetadataStore.REBALANCING_SOURCE_STORES_XML, Lists.newArrayList(storeDef));
}
// Update the cluster metadata on all three nodes
for (VoldemortServer server : servers) {
server.getMetadataStore().put(MetadataStore.CLUSTER_KEY, targetCluster);
}
}
use of voldemort.server.rebalance.RebalancerState in project voldemort by voldemort.
the class AdminRebalanceTest method testRebalanceNodeRW2.
@Test(timeout = 60000)
public void testRebalanceNodeRW2() throws IOException {
try {
startFourNodeRW();
// Start another node for only this unit test
HashMap<ByteArray, byte[]> entrySet = ServerTestUtils.createRandomKeyValuePairs(TEST_SIZE);
SocketStoreClientFactory factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(Lists.newArrayList("tcp://" + currentCluster.getNodeById(0).getHost() + ":" + currentCluster.getNodeById(0).getSocketPort())));
StoreClient<Object, Object> storeClient1 = factory.getStoreClient("test"), storeClient2 = factory.getStoreClient("test2");
List<Integer> primaryPartitionsMoved = Lists.newArrayList(0);
List<Integer> secondaryPartitionsMoved = Lists.newArrayList(8, 9, 10, 11);
List<Integer> tertiaryPartitionsMoved = Lists.newArrayList(4, 5, 6, 7);
HashMap<ByteArray, byte[]> primaryEntriesMoved = Maps.newHashMap();
HashMap<ByteArray, byte[]> secondaryEntriesMoved = Maps.newHashMap();
HashMap<ByteArray, byte[]> tertiaryEntriesMoved = Maps.newHashMap();
RoutingStrategy strategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDef2, currentCluster);
for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) {
storeClient1.put(new String(entry.getKey().get()), new String(entry.getValue()));
storeClient2.put(new String(entry.getKey().get()), new String(entry.getValue()));
List<Integer> pList = strategy.getPartitionList(entry.getKey().get());
if (primaryPartitionsMoved.contains(pList.get(0))) {
primaryEntriesMoved.put(entry.getKey(), entry.getValue());
} else if (secondaryPartitionsMoved.contains(pList.get(0))) {
secondaryEntriesMoved.put(entry.getKey(), entry.getValue());
} else if (tertiaryPartitionsMoved.contains(pList.get(0))) {
tertiaryEntriesMoved.put(entry.getKey(), entry.getValue());
}
}
// Set into rebalancing state
for (RebalanceTaskInfo partitionPlan : plans) {
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.SERVER_STATE_KEY, MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER);
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_STEAL_INFO, new RebalancerState(Lists.newArrayList(RebalanceTaskInfo.create(partitionPlan.toJsonString()))));
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML, partitionPlan.getInitialCluster());
}
// Update the cluster metadata on all three nodes
for (VoldemortServer server : servers) {
server.getMetadataStore().put(MetadataStore.CLUSTER_KEY, finalCluster);
}
// Actually run it
try {
for (RebalanceTaskInfo currentPlan : plans) {
int asyncId = adminClient.rebalanceOps.rebalanceNode(currentPlan);
assertNotSame("Got a valid rebalanceAsyncId", -1, asyncId);
getAdminClient().rpcOps.waitForCompletion(currentPlan.getStealerId(), asyncId, 300, TimeUnit.SECONDS);
// Test that plan has been removed from the list
assertFalse(getServer(currentPlan.getStealerId()).getMetadataStore().getRebalancerState().getAll().contains(currentPlan));
}
} catch (Exception e) {
e.printStackTrace();
fail("Should not throw any exceptions");
}
Store<ByteArray, byte[], byte[]> storeTest0 = getStore(0, "test2");
Store<ByteArray, byte[], byte[]> storeTest1 = getStore(1, "test2");
Store<ByteArray, byte[], byte[]> storeTest3 = getStore(3, "test2");
Store<ByteArray, byte[], byte[]> storeTest00 = getStore(0, "test");
Store<ByteArray, byte[], byte[]> storeTest30 = getStore(3, "test");
// Primary
for (Entry<ByteArray, byte[]> entry : primaryEntriesMoved.entrySet()) {
// Test 2
// Present on Node 0
assertSame("entry should be present at store", 1, storeTest0.get(entry.getKey(), null).size());
assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest0.get(entry.getKey(), null).get(0).getValue()));
// Present on Node 1
assertSame("entry should be present at store", 1, storeTest1.get(entry.getKey(), null).size());
assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest1.get(entry.getKey(), null).get(0).getValue()));
// Present on Node 3
assertSame("entry should be present at store", 1, storeTest3.get(entry.getKey(), null).size());
assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest3.get(entry.getKey(), null).get(0).getValue()));
// Test
// Present on Node 0
assertSame("entry should be present at store", 1, storeTest00.get(entry.getKey(), null).size());
assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest00.get(entry.getKey(), null).get(0).getValue()));
// Present on Node 3
assertSame("entry should be present at store", 1, storeTest30.get(entry.getKey(), null).size());
assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest30.get(entry.getKey(), null).get(0).getValue()));
}
// Secondary
for (Entry<ByteArray, byte[]> entry : secondaryEntriesMoved.entrySet()) {
// Test 2
// Present on Node 0
assertSame("entry should be present at store", 1, storeTest0.get(entry.getKey(), null).size());
assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest0.get(entry.getKey(), null).get(0).getValue()));
// Present on Node 3
assertSame("entry should be present at store", 1, storeTest3.get(entry.getKey(), null).size());
assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest3.get(entry.getKey(), null).get(0).getValue()));
// Test
// Present on Node 3
assertSame("entry should be present at store", 1, storeTest30.get(entry.getKey(), null).size());
assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest30.get(entry.getKey(), null).get(0).getValue()));
}
// Tertiary
for (Entry<ByteArray, byte[]> entry : tertiaryEntriesMoved.entrySet()) {
// Test 2
// Present on Node 3
assertSame("entry should be present at store", 1, storeTest3.get(entry.getKey(), null).size());
assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest3.get(entry.getKey(), null).get(0).getValue()));
}
// All servers should be back to normal state
for (VoldemortServer server : servers) {
assertEquals(server.getMetadataStore().getRebalancerState(), new RebalancerState(new ArrayList<RebalanceTaskInfo>()));
assertEquals(server.getMetadataStore().getServerStateUnlocked(), MetadataStore.VoldemortState.NORMAL_SERVER);
}
} finally {
shutDown();
}
}
use of voldemort.server.rebalance.RebalancerState in project voldemort by voldemort.
the class AdminRebalanceTest method testRebalanceNodeRW.
@Test(timeout = 60000)
public void testRebalanceNodeRW() throws IOException {
try {
startThreeNodeRW();
// Start another node for only this unit test
HashMap<ByteArray, byte[]> entrySet = ServerTestUtils.createRandomKeyValuePairs(TEST_SIZE);
SocketStoreClientFactory factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(Lists.newArrayList("tcp://" + currentCluster.getNodeById(0).getHost() + ":" + currentCluster.getNodeById(0).getSocketPort())));
StoreClient<Object, Object> storeClient1 = factory.getStoreClient("test"), storeClient2 = factory.getStoreClient("test2");
List<Integer> primaryPartitionsMoved = Lists.newArrayList(0);
List<Integer> secondaryPartitionsMoved = Lists.newArrayList(4, 5, 6, 7);
HashMap<ByteArray, byte[]> primaryEntriesMoved = Maps.newHashMap();
HashMap<ByteArray, byte[]> secondaryEntriesMoved = Maps.newHashMap();
RoutingStrategy strategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDef2, currentCluster);
for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) {
storeClient1.put(new String(entry.getKey().get()), new String(entry.getValue()));
storeClient2.put(new String(entry.getKey().get()), new String(entry.getValue()));
List<Integer> pList = strategy.getPartitionList(entry.getKey().get());
if (primaryPartitionsMoved.contains(pList.get(0))) {
primaryEntriesMoved.put(entry.getKey(), entry.getValue());
} else if (secondaryPartitionsMoved.contains(pList.get(0))) {
secondaryEntriesMoved.put(entry.getKey(), entry.getValue());
}
}
try {
adminClient.rebalanceOps.rebalanceNode(plans.get(0));
fail("Should have thrown an exception since not in rebalancing state");
} catch (VoldemortException e) {
}
// Set into rebalancing state
for (RebalanceTaskInfo partitionPlan : plans) {
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.SERVER_STATE_KEY, MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER);
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML, partitionPlan.getInitialCluster());
}
try {
adminClient.rebalanceOps.rebalanceNode(plans.get(0));
fail("Should have thrown an exception since no steal info");
} catch (VoldemortException e) {
}
// Put a plan different from the plan that we actually want to
// execute
int incorrectStealerId = (plans.get(0).getStealerId() + 1) % 3;
getServer(plans.get(0).getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_STEAL_INFO, new RebalancerState(Lists.newArrayList(new RebalanceTaskInfo(incorrectStealerId, plans.get(0).getDonorId(), plans.get(0).getStoreToPartitionIds(), plans.get(0).getInitialCluster()))));
try {
adminClient.rebalanceOps.rebalanceNode(plans.get(0));
fail("Should have thrown an exception since the two plans eventhough have the same donor are different");
} catch (VoldemortException e) {
}
// Set the rebalance info on the stealer node
for (RebalanceTaskInfo partitionPlan : plans) {
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_STEAL_INFO, new RebalancerState(Lists.newArrayList(RebalanceTaskInfo.create(partitionPlan.toJsonString()))));
}
// Update the cluster metadata on all three nodes
for (VoldemortServer server : servers) {
server.getMetadataStore().put(MetadataStore.CLUSTER_KEY, finalCluster);
}
// Actually run it
try {
for (RebalanceTaskInfo currentPlan : plans) {
int asyncId = adminClient.rebalanceOps.rebalanceNode(currentPlan);
// AlreadyRebalancingException
try {
adminClient.rebalanceOps.rebalanceNode(currentPlan);
fail("Should have thrown an exception since it is already rebalancing");
} catch (AlreadyRebalancingException e) {
}
assertNotSame("Got a valid rebalanceAsyncId", -1, asyncId);
getAdminClient().rpcOps.waitForCompletion(currentPlan.getStealerId(), asyncId, 300, TimeUnit.SECONDS);
// Test that plan has been removed from the list
assertFalse(getServer(currentPlan.getStealerId()).getMetadataStore().getRebalancerState().getAll().contains(currentPlan));
}
} catch (Exception e) {
e.printStackTrace();
fail("Should not throw any exceptions");
}
Store<ByteArray, byte[], byte[]> storeTest0 = getStore(0, "test2");
Store<ByteArray, byte[], byte[]> storeTest2 = getStore(2, "test2");
Store<ByteArray, byte[], byte[]> storeTest20 = getStore(2, "test");
// Primary is on Node 0 and not on Node 1
for (Entry<ByteArray, byte[]> entry : primaryEntriesMoved.entrySet()) {
assertSame("entry should be present at store", 1, storeTest0.get(entry.getKey(), null).size());
assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest0.get(entry.getKey(), null).get(0).getValue()));
// Check in other store
assertSame("entry should be present in store test2 ", 1, storeTest20.get(entry.getKey(), null).size());
assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest20.get(entry.getKey(), null).get(0).getValue()));
}
// Secondary is on Node 2 and not on Node 0
for (Entry<ByteArray, byte[]> entry : secondaryEntriesMoved.entrySet()) {
assertSame("entry should be present at store", 1, storeTest2.get(entry.getKey(), null).size());
assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest2.get(entry.getKey(), null).get(0).getValue()));
}
// All servers should be back to normal state
for (VoldemortServer server : servers) {
assertEquals(server.getMetadataStore().getRebalancerState(), new RebalancerState(new ArrayList<RebalanceTaskInfo>()));
assertEquals(server.getMetadataStore().getServerStateUnlocked(), MetadataStore.VoldemortState.NORMAL_SERVER);
}
} finally {
shutDown();
}
}
Aggregations