use of voldemort.xml.ClusterMapper in project voldemort by voldemort.
the class AbstractStoreBuilderConfigurable method configure.
public void configure(JobConf conf) {
this.cluster = new ClusterMapper().readCluster(new StringReader(conf.get("cluster.xml")));
List<StoreDefinition> storeDefs = new StoreDefinitionsMapper().readStoreList(new StringReader(conf.get("stores.xml")));
if (storeDefs.size() != 1)
throw new IllegalStateException("Expected to find only a single store, but found multiple!");
this.storeDef = storeDefs.get(0);
this.numChunks = conf.getInt(NUM_CHUNKS, -1);
if (this.numChunks < 1) {
// be overridden by the user.
throw new VoldemortException(NUM_CHUNKS + " not specified in the MapReduce JobConf (should NEVER happen)");
}
this.saveKeys = conf.getBoolean(VoldemortBuildAndPushJob.SAVE_KEYS, true);
this.reducerPerBucket = conf.getBoolean(VoldemortBuildAndPushJob.REDUCER_PER_BUCKET, true);
this.buildPrimaryReplicasOnly = conf.getBoolean(VoldemortBuildAndPushJob.BUILD_PRIMARY_REPLICAS_ONLY, false);
if (buildPrimaryReplicasOnly && !saveKeys) {
throw new IllegalStateException(VoldemortBuildAndPushJob.BUILD_PRIMARY_REPLICAS_ONLY + " can only be true if " + VoldemortBuildAndPushJob.SAVE_KEYS + " is also true.");
}
}
use of voldemort.xml.ClusterMapper in project voldemort by voldemort.
the class ImportTextDumpToBDB method main.
public static void main(String[] argv) throws Exception {
OptionParser parser = getParser();
OptionSet options = parser.parse(argv);
validateOptions(options);
String inputPath = (String) options.valueOf("input");
String storeBdbFolderPath = (String) options.valueOf("bdb");
String clusterXmlPath = (String) options.valueOf("cluster-xml");
String storesXmlPath = (String) options.valueOf("stores-xml");
Integer nodeId = (Integer) options.valueOf("node-id");
File input = new File(inputPath);
List<File> dataFiles = new ArrayList<File>();
if (input.isDirectory()) {
File[] files = input.listFiles();
if (files != null)
Collections.addAll(dataFiles, files);
} else if (input.isFile()) {
dataFiles.add(input);
} else {
System.err.println(inputPath + "is not file or directory");
}
File storeBdbFolder = new File(storeBdbFolderPath);
final String storeName = storeBdbFolder.getName();
Cluster cluster = new ClusterMapper().readCluster(new File(clusterXmlPath));
List<StoreDefinition> storeDefs = new StoreDefinitionsMapper().readStoreList(new File(storesXmlPath));
StoreDefinition storeDef = null;
for (StoreDefinition sd : storeDefs) {
if (sd.getName() != null && sd.getName().equals(storeName)) {
storeDef = sd;
}
}
if (storeDef == null) {
throw new VoldemortException("StoreNotfound: " + storeName);
}
RoutingStrategy routingStrategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, cluster);
Properties properties = new Properties();
properties.put("node.id", "0");
properties.put("voldemort.home", storeBdbFolder.getParent());
VoldemortConfig voldemortConfig = new VoldemortConfig(properties);
voldemortConfig.setBdbDataDirectory(storeBdbFolder.getParent());
voldemortConfig.setEnableJmx(false);
voldemortConfig.setBdbOneEnvPerStore(true);
BdbStorageConfiguration bdbConfiguration = new BdbStorageConfiguration(voldemortConfig);
class MockStoreDefinition extends StoreDefinition {
public MockStoreDefinition() {
super(storeName, null, null, null, null, null, null, null, 0, null, 0, null, 0, null, null, null, null, null, null, null, null, null, null, null, null, 0);
}
@Override
public boolean hasMemoryFootprint() {
return false;
}
}
StoreDefinition mockStoreDef = new MockStoreDefinition();
StorageEngine<ByteArray, byte[], byte[]> engine = bdbConfiguration.getStore(mockStoreDef, routingStrategy);
long reportIntervalMs = 10000L;
long lastCount = 0;
long lastInserted = 0;
Reporter<Boolean> rp = new Reporter<Boolean>(reportIntervalMs);
long count = 0;
long inserted = 0;
for (File f : dataFiles) {
try {
BufferedReader bufferedReader = new BufferedReader(new FileReader(f), READER_BUFFER_SIZE);
engine.beginBatchModifications();
while (true) {
String line = bufferedReader.readLine();
if (line == null) {
break;
}
Pair<ByteArray, Versioned<byte[]>> entry;
try {
entry = lineToEntry(line);
} catch (Exception e) {
System.err.println("Skipping line: " + line);
e.printStackTrace();
continue;
}
ByteArray key = entry.getFirst();
List<Node> nodeList = routingStrategy.routeRequest(key.get());
for (Node node : nodeList) {
if (nodeId == node.getId()) {
try {
engine.put(key, entry.getSecond(), null);
inserted++;
} catch (ObsoleteVersionException e) {
e.printStackTrace();
}
break;
}
}
count++;
final Long countObject = count;
final Long insertedObject = inserted;
Boolean reported = rp.tryReport(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
System.out.print(String.format("Imported %15d entries; Inserted %15d entries", countObject, insertedObject));
return true;
}
});
if (reported != null) {
long importSpeed = (count - lastCount) / (reportIntervalMs / 1000);
long insertSpeed = (inserted - lastInserted) / (reportIntervalMs / 1000);
System.out.println(String.format("; ImportSpeed: %8d/s; InsertSpeed: %8d/s ", importSpeed, insertSpeed));
lastCount = count;
lastInserted = inserted;
}
}
bufferedReader.close();
} catch (IOException e) {
e.printStackTrace();
} finally {
engine.endBatchModifications();
}
}
engine.close();
System.out.println(String.format("Finished importing %d entries (%d inserted, rest discarded)", count, inserted));
}
use of voldemort.xml.ClusterMapper in project voldemort by voldemort.
the class RebalanceControllerCLI method main.
public static void main(String[] args) throws Exception {
setupParser();
OptionSet options = getValidOptions(args);
// Bootstrap & fetch current cluster/stores
String bootstrapURL = (String) options.valueOf("url");
int parallelism = RebalanceController.MAX_PARALLEL_REBALANCING;
if (options.has("parallelism")) {
parallelism = (Integer) options.valueOf("parallelism");
}
long proxyPauseSec = RebalanceController.PROXY_PAUSE_IN_SECONDS;
if (options.has("proxy-pause")) {
proxyPauseSec = (Long) options.valueOf("proxy-pause");
}
RebalanceController rebalanceController = new RebalanceController(bootstrapURL, parallelism, proxyPauseSec);
Cluster currentCluster = rebalanceController.getCurrentCluster();
List<StoreDefinition> currentStoreDefs = rebalanceController.getCurrentStoreDefs();
// If this test doesn't pass, something is wrong in prod!
RebalanceUtils.validateClusterStores(currentCluster, currentStoreDefs);
// Determine final cluster/stores and validate them
String finalClusterXML = (String) options.valueOf("final-cluster");
Cluster finalCluster = new ClusterMapper().readCluster(new File(finalClusterXML));
List<StoreDefinition> finalStoreDefs = currentStoreDefs;
if (options.has("final-stores")) {
String storesXML = (String) options.valueOf("final-stores");
finalStoreDefs = new StoreDefinitionsMapper().readStoreList(new File(storesXML));
}
RebalanceUtils.validateClusterStores(finalCluster, finalStoreDefs);
RebalanceUtils.validateCurrentFinalCluster(currentCluster, finalCluster);
// Process optional "planning" arguments
int batchSize = CmdUtils.valueOf(options, "batch-size", RebalancePlan.BATCH_SIZE);
String outputDir = null;
if (options.has("output-dir")) {
outputDir = (String) options.valueOf("output-dir");
}
RebalancePlan rebalancePlan = new RebalancePlan(currentCluster, currentStoreDefs, finalCluster, finalStoreDefs, batchSize, outputDir);
boolean resetQuota = !options.has("no-reset-quota");
Set<String> storeNames = Sets.newHashSet();
for (StoreDefinition storeDef : finalStoreDefs) {
storeNames.add(storeDef.getName());
}
QuotaResetter quotaResetter = new QuotaResetter(bootstrapURL, storeNames, rebalancePlan.getFinalCluster().getNodeIds());
// before rebalance, remember and disable quota enforcement settings
if (resetQuota) {
quotaResetter.rememberAndDisableQuota();
}
// Plan & execute rebalancing.
rebalanceController.rebalance(rebalancePlan);
// after rebalance, reset quota values and recover quota enforcement
if (resetQuota) {
quotaResetter.resetQuotaAndRecoverEnforcement();
}
}
use of voldemort.xml.ClusterMapper in project voldemort by voldemort.
the class ReplaceNodeCLI method init.
private void init() {
this.adminClient = new AdminClient(this.url);
this.newAdminClient = new AdminClient(this.newUrl);
this.cluster = adminClient.getAdminClientCluster();
// Validate node exists in the old cluster
this.cluster.getNodeById(nodeId);
this.newCluster = newAdminClient.getAdminClientCluster();
if (newCluster.getNumberOfNodes() > 1) {
newNodeId = nodeId;
} else {
newNodeId = newCluster.getNodeIds().iterator().next().intValue();
}
this.clusterXml = getClusterXML();
// Update your cluster XML based on the consensus
this.cluster = new ClusterMapper().readCluster(new StringReader(clusterXml));
this.storesXml = getStoresXML();
this.storeDefinitions = new StoreDefinitionsMapper().readStoreList(new StringReader(storesXml), false);
}
use of voldemort.xml.ClusterMapper in project voldemort by voldemort.
the class ZoneShrinkageCLI method shrinkClusterXml.
protected static String shrinkClusterXml(String clusterXml, int droppingZoneId) {
Cluster initialCluster = new ClusterMapper().readCluster(new StringReader(clusterXml));
Cluster intermediateCluster = RebalanceUtils.vacateZone(initialCluster, droppingZoneId);
Cluster finalCluster = RebalanceUtils.dropZone(intermediateCluster, droppingZoneId);
String newClusterXml = new ClusterMapper().writeCluster(finalCluster);
return newClusterXml;
}
Aggregations