use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class AdminToolUtils method getUserStoreDefMapOnNode.
/**
* Utility function that fetches user defined store definitions
*
* @param adminClient An instance of AdminClient points to given cluster
* @param nodeId Node id to fetch store definitions from
* @return The map container that maps store names to store definitions
*/
public static Map<String, StoreDefinition> getUserStoreDefMapOnNode(AdminClient adminClient, Integer nodeId) {
List<StoreDefinition> storeDefinitionList = adminClient.metadataMgmtOps.getRemoteStoreDefList(nodeId).getValue();
Map<String, StoreDefinition> storeDefinitionMap = Maps.newHashMap();
for (StoreDefinition storeDefinition : storeDefinitionList) {
storeDefinitionMap.put(storeDefinition.getName(), storeDefinition);
}
return storeDefinitionMap;
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class PartitionAnalysisCLI method main.
public static void main(String[] args) throws Exception {
setupParser();
OptionSet options = getValidOptions(args);
String clusterXML = (String) options.valueOf("cluster");
String storesXML = (String) options.valueOf("stores");
Cluster currentCluster = new ClusterMapper().readCluster(new File(clusterXML));
List<StoreDefinition> storeDefs = new StoreDefinitionsMapper().readStoreList(new File(storesXML));
PartitionBalance partitionBalance = new PartitionBalance(currentCluster, storeDefs);
System.out.println(partitionBalance);
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class RebalancePlanCLI method main.
public static void main(String[] args) throws Exception {
setupParser();
OptionSet options = getValidOptions(args);
// Required args
String currentClusterXML = (String) options.valueOf("current-cluster");
String currentStoresXML = (String) options.valueOf("current-stores");
String finalClusterXML = (String) options.valueOf("final-cluster");
// Required args for some use cases
String finalStoresXML = new String(currentStoresXML);
if (options.has("final-stores")) {
finalStoresXML = (String) options.valueOf("final-stores");
}
Cluster currentCluster = new ClusterMapper().readCluster(new File(currentClusterXML));
List<StoreDefinition> currentStoreDefs = new StoreDefinitionsMapper().readStoreList(new File(currentStoresXML));
Cluster finalCluster = new ClusterMapper().readCluster(new File(finalClusterXML));
List<StoreDefinition> finalStoreDefs = new StoreDefinitionsMapper().readStoreList(new File(finalStoresXML));
// Optional args
int batchSize = CmdUtils.valueOf(options, "batch-size", RebalancePlan.BATCH_SIZE);
String outputDir = null;
if (options.has("output-dir")) {
outputDir = (String) options.valueOf("output-dir");
}
new RebalancePlan(currentCluster, currentStoreDefs, finalCluster, finalStoreDefs, batchSize, outputDir);
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class ExportBDBToTextDump method main.
public static void main(String[] argv) throws Exception {
OptionParser parser = getParser();
OptionSet options = parser.parse(argv);
validateOptions(options);
// bdb_folder output_folder
String storeBdbFolderPath = (String) options.valueOf("bdb");
String outputFolderPath = (String) options.valueOf("output");
File storeBdbFolder = new File(storeBdbFolderPath);
File outputFolder = new File(outputFolderPath);
final String storeName = storeBdbFolder.getName();
Properties properties = new Properties();
properties.put("node.id", "0");
properties.put("voldemort.home", storeBdbFolder.getParent());
VoldemortConfig voldemortConfig = new VoldemortConfig(properties);
voldemortConfig.setBdbDataDirectory(storeBdbFolder.getParent());
voldemortConfig.setEnableJmx(false);
voldemortConfig.setBdbOneEnvPerStore(true);
BdbStorageConfiguration bdbConfiguration = new BdbStorageConfiguration(voldemortConfig);
class MockStoreDefinition extends StoreDefinition {
public MockStoreDefinition() {
super(storeName, null, null, null, null, null, null, null, 0, null, 0, null, 0, null, null, null, null, null, null, null, null, null, null, null, null, 0);
}
@Override
public boolean hasMemoryFootprint() {
return false;
}
}
StoreDefinition storeDef = new MockStoreDefinition();
StorageEngine<ByteArray, byte[], byte[]> engine = bdbConfiguration.getStore(storeDef, null);
long reportIntervalMs = 10000L;
long lastCount = 0;
Reporter<Boolean> rp = new Reporter<Boolean>(reportIntervalMs);
long count = 0;
BufferedWriter splitFileWriter = null;
ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> entries = engine.entries();
while (entries.hasNext()) {
if (splitFileWriter == null) {
long splitId = count / SPLIT_SIZE;
File splitFile = new File(outputFolder, makeSplitFileName(splitId));
splitFileWriter = new BufferedWriter(new FileWriter(splitFile), WRITER_BUFFER_SIZE);
}
Pair<ByteArray, Versioned<byte[]>> pair = entries.next();
String line = makeLine(pair);
splitFileWriter.write(line);
if ((count + 1) % SPLIT_SIZE == 0) {
splitFileWriter.close();
splitFileWriter = null;
}
count++;
final Long countObject = count;
Boolean reported = rp.tryReport(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
System.out.print(String.format("Exported %15d entries", countObject));
return true;
}
});
if (reported != null) {
System.out.println(String.format("; Speed: %8d/s", (count - lastCount) / (reportIntervalMs / 1000)));
lastCount = count;
}
}
entries.close();
if (splitFileWriter != null) {
splitFileWriter.close();
}
System.out.println(String.format("Finished exporting %d entries", count));
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class HadoopStoreBuilderTest method testRowsLessThanNodes.
/**
* Issue 258 : 'node--1' produced during store building if some reducer does
* not get any data.
*
* @throws Exception
*/
@Test
public void testRowsLessThanNodes() throws Exception {
Map<String, String> values = new HashMap<String, String>();
File testDir = TestUtils.createTempDir();
File tempDir = new File(testDir, "temp");
File outputDir = new File(testDir, "output");
// write test data to text file
File inputFile = File.createTempFile("input", ".txt", testDir);
inputFile.deleteOnExit();
StringBuilder contents = new StringBuilder();
for (Map.Entry<String, String> entry : values.entrySet()) contents.append(entry.getKey() + "\t" + entry.getValue() + "\n");
FileUtils.writeStringToFile(inputFile, contents.toString());
String storeName = "test";
SerializerDefinition serDef = new SerializerDefinition("string");
Cluster cluster = ServerTestUtils.getLocalCluster(10);
// Test backwards compatibility
StoreDefinition def = new StoreDefinitionBuilder().setName(storeName).setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(serDef).setValueSerializer(serDef).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(1).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
HadoopStoreBuilder builder = new HadoopStoreBuilder("testRowsLessThanNodes", new Props(), new JobConf(), TextStoreMapper.class, TextInputFormat.class, cluster, def, new Path(tempDir.getAbsolutePath()), new Path(outputDir.getAbsolutePath()), new Path(inputFile.getAbsolutePath()), CheckSumType.MD5, saveKeys, false, 64 * 1024, false, 0L, false);
builder.build();
File[] nodeDirectories = outputDir.listFiles(new FileFilter() {
@Override
public boolean accept(File pathname) {
// We are only interested in counting directories, not files.
return pathname.isDirectory();
}
});
// Should not produce node--1 directory + have one folder for every node
Assert.assertEquals(cluster.getNumberOfNodes(), nodeDirectories.length);
for (File f : outputDir.listFiles()) {
Assert.assertFalse(f.toString().contains("node--1"));
}
// Check if individual nodes exist, along with their metadata file
for (int nodeId = 0; nodeId < 10; nodeId++) {
File nodeFile = new File(outputDir, "node-" + Integer.toString(nodeId));
Assert.assertTrue(nodeFile.exists());
Assert.assertTrue(new File(nodeFile, ".metadata").exists());
}
}
Aggregations