Search in sources :

Example 26 with StoreDefinition

use of voldemort.store.StoreDefinition in project voldemort by voldemort.

the class AdminToolUtils method getUserStoreDefMapOnNode.

/**
 * Utility function that fetches user defined store definitions
 *
 * @param adminClient An instance of AdminClient points to given cluster
 * @param nodeId Node id to fetch store definitions from
 * @return The map container that maps store names to store definitions
 */
public static Map<String, StoreDefinition> getUserStoreDefMapOnNode(AdminClient adminClient, Integer nodeId) {
    List<StoreDefinition> storeDefinitionList = adminClient.metadataMgmtOps.getRemoteStoreDefList(nodeId).getValue();
    Map<String, StoreDefinition> storeDefinitionMap = Maps.newHashMap();
    for (StoreDefinition storeDefinition : storeDefinitionList) {
        storeDefinitionMap.put(storeDefinition.getName(), storeDefinition);
    }
    return storeDefinitionMap;
}
Also used : StoreDefinition(voldemort.store.StoreDefinition)

Example 27 with StoreDefinition

use of voldemort.store.StoreDefinition in project voldemort by voldemort.

the class PartitionAnalysisCLI method main.

public static void main(String[] args) throws Exception {
    setupParser();
    OptionSet options = getValidOptions(args);
    String clusterXML = (String) options.valueOf("cluster");
    String storesXML = (String) options.valueOf("stores");
    Cluster currentCluster = new ClusterMapper().readCluster(new File(clusterXML));
    List<StoreDefinition> storeDefs = new StoreDefinitionsMapper().readStoreList(new File(storesXML));
    PartitionBalance partitionBalance = new PartitionBalance(currentCluster, storeDefs);
    System.out.println(partitionBalance);
}
Also used : StoreDefinition(voldemort.store.StoreDefinition) StoreDefinitionsMapper(voldemort.xml.StoreDefinitionsMapper) Cluster(voldemort.cluster.Cluster) ClusterMapper(voldemort.xml.ClusterMapper) OptionSet(joptsimple.OptionSet) File(java.io.File)

Example 28 with StoreDefinition

use of voldemort.store.StoreDefinition in project voldemort by voldemort.

the class RebalancePlanCLI method main.

public static void main(String[] args) throws Exception {
    setupParser();
    OptionSet options = getValidOptions(args);
    // Required args
    String currentClusterXML = (String) options.valueOf("current-cluster");
    String currentStoresXML = (String) options.valueOf("current-stores");
    String finalClusterXML = (String) options.valueOf("final-cluster");
    // Required args for some use cases
    String finalStoresXML = new String(currentStoresXML);
    if (options.has("final-stores")) {
        finalStoresXML = (String) options.valueOf("final-stores");
    }
    Cluster currentCluster = new ClusterMapper().readCluster(new File(currentClusterXML));
    List<StoreDefinition> currentStoreDefs = new StoreDefinitionsMapper().readStoreList(new File(currentStoresXML));
    Cluster finalCluster = new ClusterMapper().readCluster(new File(finalClusterXML));
    List<StoreDefinition> finalStoreDefs = new StoreDefinitionsMapper().readStoreList(new File(finalStoresXML));
    // Optional args
    int batchSize = CmdUtils.valueOf(options, "batch-size", RebalancePlan.BATCH_SIZE);
    String outputDir = null;
    if (options.has("output-dir")) {
        outputDir = (String) options.valueOf("output-dir");
    }
    new RebalancePlan(currentCluster, currentStoreDefs, finalCluster, finalStoreDefs, batchSize, outputDir);
}
Also used : RebalancePlan(voldemort.client.rebalance.RebalancePlan) StoreDefinition(voldemort.store.StoreDefinition) StoreDefinitionsMapper(voldemort.xml.StoreDefinitionsMapper) Cluster(voldemort.cluster.Cluster) ClusterMapper(voldemort.xml.ClusterMapper) OptionSet(joptsimple.OptionSet) File(java.io.File)

Example 29 with StoreDefinition

use of voldemort.store.StoreDefinition in project voldemort by voldemort.

the class ExportBDBToTextDump method main.

public static void main(String[] argv) throws Exception {
    OptionParser parser = getParser();
    OptionSet options = parser.parse(argv);
    validateOptions(options);
    // bdb_folder output_folder
    String storeBdbFolderPath = (String) options.valueOf("bdb");
    String outputFolderPath = (String) options.valueOf("output");
    File storeBdbFolder = new File(storeBdbFolderPath);
    File outputFolder = new File(outputFolderPath);
    final String storeName = storeBdbFolder.getName();
    Properties properties = new Properties();
    properties.put("node.id", "0");
    properties.put("voldemort.home", storeBdbFolder.getParent());
    VoldemortConfig voldemortConfig = new VoldemortConfig(properties);
    voldemortConfig.setBdbDataDirectory(storeBdbFolder.getParent());
    voldemortConfig.setEnableJmx(false);
    voldemortConfig.setBdbOneEnvPerStore(true);
    BdbStorageConfiguration bdbConfiguration = new BdbStorageConfiguration(voldemortConfig);
    class MockStoreDefinition extends StoreDefinition {

        public MockStoreDefinition() {
            super(storeName, null, null, null, null, null, null, null, 0, null, 0, null, 0, null, null, null, null, null, null, null, null, null, null, null, null, 0);
        }

        @Override
        public boolean hasMemoryFootprint() {
            return false;
        }
    }
    StoreDefinition storeDef = new MockStoreDefinition();
    StorageEngine<ByteArray, byte[], byte[]> engine = bdbConfiguration.getStore(storeDef, null);
    long reportIntervalMs = 10000L;
    long lastCount = 0;
    Reporter<Boolean> rp = new Reporter<Boolean>(reportIntervalMs);
    long count = 0;
    BufferedWriter splitFileWriter = null;
    ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> entries = engine.entries();
    while (entries.hasNext()) {
        if (splitFileWriter == null) {
            long splitId = count / SPLIT_SIZE;
            File splitFile = new File(outputFolder, makeSplitFileName(splitId));
            splitFileWriter = new BufferedWriter(new FileWriter(splitFile), WRITER_BUFFER_SIZE);
        }
        Pair<ByteArray, Versioned<byte[]>> pair = entries.next();
        String line = makeLine(pair);
        splitFileWriter.write(line);
        if ((count + 1) % SPLIT_SIZE == 0) {
            splitFileWriter.close();
            splitFileWriter = null;
        }
        count++;
        final Long countObject = count;
        Boolean reported = rp.tryReport(new Callable<Boolean>() {

            @Override
            public Boolean call() throws Exception {
                System.out.print(String.format("Exported %15d entries", countObject));
                return true;
            }
        });
        if (reported != null) {
            System.out.println(String.format("; Speed: %8d/s", (count - lastCount) / (reportIntervalMs / 1000)));
            lastCount = count;
        }
    }
    entries.close();
    if (splitFileWriter != null) {
        splitFileWriter.close();
    }
    System.out.println(String.format("Finished exporting %d entries", count));
}
Also used : Versioned(voldemort.versioning.Versioned) Properties(java.util.Properties) OptionParser(joptsimple.OptionParser) VoldemortConfig(voldemort.server.VoldemortConfig) StoreDefinition(voldemort.store.StoreDefinition) ByteArray(voldemort.utils.ByteArray) BdbStorageConfiguration(voldemort.store.bdb.BdbStorageConfiguration) Pair(voldemort.utils.Pair) OptionSet(joptsimple.OptionSet)

Example 30 with StoreDefinition

use of voldemort.store.StoreDefinition in project voldemort by voldemort.

the class HadoopStoreBuilderTest method testRowsLessThanNodes.

/**
 * Issue 258 : 'node--1' produced during store building if some reducer does
 * not get any data.
 *
 * @throws Exception
 */
@Test
public void testRowsLessThanNodes() throws Exception {
    Map<String, String> values = new HashMap<String, String>();
    File testDir = TestUtils.createTempDir();
    File tempDir = new File(testDir, "temp");
    File outputDir = new File(testDir, "output");
    // write test data to text file
    File inputFile = File.createTempFile("input", ".txt", testDir);
    inputFile.deleteOnExit();
    StringBuilder contents = new StringBuilder();
    for (Map.Entry<String, String> entry : values.entrySet()) contents.append(entry.getKey() + "\t" + entry.getValue() + "\n");
    FileUtils.writeStringToFile(inputFile, contents.toString());
    String storeName = "test";
    SerializerDefinition serDef = new SerializerDefinition("string");
    Cluster cluster = ServerTestUtils.getLocalCluster(10);
    // Test backwards compatibility
    StoreDefinition def = new StoreDefinitionBuilder().setName(storeName).setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(serDef).setValueSerializer(serDef).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(1).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
    HadoopStoreBuilder builder = new HadoopStoreBuilder("testRowsLessThanNodes", new Props(), new JobConf(), TextStoreMapper.class, TextInputFormat.class, cluster, def, new Path(tempDir.getAbsolutePath()), new Path(outputDir.getAbsolutePath()), new Path(inputFile.getAbsolutePath()), CheckSumType.MD5, saveKeys, false, 64 * 1024, false, 0L, false);
    builder.build();
    File[] nodeDirectories = outputDir.listFiles(new FileFilter() {

        @Override
        public boolean accept(File pathname) {
            // We are only interested in counting directories, not files.
            return pathname.isDirectory();
        }
    });
    // Should not produce node--1 directory + have one folder for every node
    Assert.assertEquals(cluster.getNumberOfNodes(), nodeDirectories.length);
    for (File f : outputDir.listFiles()) {
        Assert.assertFalse(f.toString().contains("node--1"));
    }
    // Check if individual nodes exist, along with their metadata file
    for (int nodeId = 0; nodeId < 10; nodeId++) {
        File nodeFile = new File(outputDir, "node-" + Integer.toString(nodeId));
        Assert.assertTrue(nodeFile.exists());
        Assert.assertTrue(new File(nodeFile, ".metadata").exists());
    }
}
Also used : StoreDefinitionBuilder(voldemort.store.StoreDefinitionBuilder) Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) Cluster(voldemort.cluster.Cluster) Props(voldemort.utils.Props) StoreDefinition(voldemort.store.StoreDefinition) FileFilter(java.io.FileFilter) File(java.io.File) Map(java.util.Map) HashMap(java.util.HashMap) JobConf(org.apache.hadoop.mapred.JobConf) SerializerDefinition(voldemort.serialization.SerializerDefinition) Test(org.junit.Test)

Aggregations

StoreDefinition (voldemort.store.StoreDefinition)215 Cluster (voldemort.cluster.Cluster)74 Test (org.junit.Test)67 ArrayList (java.util.ArrayList)56 HashMap (java.util.HashMap)50 StoreDefinitionsMapper (voldemort.xml.StoreDefinitionsMapper)50 VoldemortException (voldemort.VoldemortException)49 ByteArray (voldemort.utils.ByteArray)49 Node (voldemort.cluster.Node)43 StoreDefinitionBuilder (voldemort.store.StoreDefinitionBuilder)42 SerializerDefinition (voldemort.serialization.SerializerDefinition)38 File (java.io.File)34 StringReader (java.io.StringReader)34 Versioned (voldemort.versioning.Versioned)29 IOException (java.io.IOException)24 List (java.util.List)23 Store (voldemort.store.Store)21 AdminClient (voldemort.client.protocol.admin.AdminClient)19 RoutingStrategyFactory (voldemort.routing.RoutingStrategyFactory)19 ClusterMapper (voldemort.xml.ClusterMapper)18