Search in sources :

Example 11 with StoreDefinitionBuilder

use of voldemort.store.StoreDefinitionBuilder in project voldemort by voldemort.

the class GetallNodeReachTest method testGetall_322.

@Test
public void testGetall_322() throws Exception {
    cluster = getEightNodeClusterWithZones();
    HashMap<Integer, Integer> zoneReplicationFactor = new HashMap<Integer, Integer>();
    zoneReplicationFactor.put(0, 3);
    zoneReplicationFactor.put(1, 3);
    storeDef = new StoreDefinitionBuilder().setName("test").setType(InMemoryStorageConfiguration.TYPE_NAME).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.ZONE_STRATEGY).setReplicationFactor(6).setZoneReplicationFactor(zoneReplicationFactor).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setPreferredReads(2).setRequiredReads(2).setPreferredWrites(2).setRequiredWrites(2).setZoneCountReads(0).setZoneCountWrites(0).build();
    makeStore();
    Versioned<byte[]> v = Versioned.value("v".getBytes());
    subStores.get(0).put(TestUtils.toByteArray("k1111_1111"), v, null);
    subStores.get(0).put(TestUtils.toByteArray("k0000_1111"), v, null);
    subStores.get(1).put(TestUtils.toByteArray("k1111_1111"), v, null);
    subStores.get(1).put(TestUtils.toByteArray("k0000_1111"), v, null);
    subStores.get(2).put(TestUtils.toByteArray("k1111_1111"), v, null);
    subStores.get(2).put(TestUtils.toByteArray("k0000_1111"), v, null);
    subStores.get(3).put(TestUtils.toByteArray("k0000_1111"), v, null);
    subStores.get(3).put(TestUtils.toByteArray("k1111_1111"), v, null);
    subStores.get(4).put(TestUtils.toByteArray("k1111_1111"), v, null);
    subStores.get(4).put(TestUtils.toByteArray("k1111_0000"), v, null);
    subStores.get(5).put(TestUtils.toByteArray("k1111_1111"), v, null);
    subStores.get(5).put(TestUtils.toByteArray("k1111_0000"), v, null);
    subStores.get(6).put(TestUtils.toByteArray("k1111_1111"), v, null);
    subStores.get(6).put(TestUtils.toByteArray("k1111_0000"), v, null);
    subStores.get(7).put(TestUtils.toByteArray("k1111_1111"), v, null);
    subStores.get(7).put(TestUtils.toByteArray("k1111_0000"), v, null);
    /* test multiple keys getall */
    List<ByteArray> keys = new ArrayList<ByteArray>();
    keys.add(TestUtils.toByteArray("k0000_0000"));
    keys.add(TestUtils.toByteArray("k0000_1111"));
    keys.add(TestUtils.toByteArray("k1111_0000"));
    keys.add(TestUtils.toByteArray("k1111_1111"));
    Map<ByteArray, List<Versioned<byte[]>>> result = store.getAll(keys, null);
    assertFalse(result.containsKey(TestUtils.toByteArray("not_included")));
    assertFalse(result.containsKey(TestUtils.toByteArray("k0000_0000")));
    assertEquals(2, result.get(TestUtils.toByteArray("k0000_1111")).size());
    assertFalse(result.containsKey(TestUtils.toByteArray("k1111_0000")));
    assertEquals(2, result.get(TestUtils.toByteArray("k1111_1111")).size());
}
Also used : StoreDefinitionBuilder(voldemort.store.StoreDefinitionBuilder) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ByteArray(voldemort.utils.ByteArray) ArrayList(java.util.ArrayList) List(java.util.List) SerializerDefinition(voldemort.serialization.SerializerDefinition) Test(org.junit.Test)

Example 12 with StoreDefinitionBuilder

use of voldemort.store.StoreDefinitionBuilder in project voldemort by voldemort.

the class ReadOnlyStorageEngineTestInstance method create.

public static ReadOnlyStorageEngineTestInstance create(SearchStrategy strategy, File baseDir, int testSize, int numNodes, int repFactor, SerializerDefinition keySerDef, SerializerDefinition valueSerDef, ReadOnlyStorageFormat type, int[][] partitionMap) throws Exception {
    // create some test data
    Map<String, String> data = createTestData(testSize);
    JsonReader reader = makeTestDataReader(data, baseDir);
    // set up definitions for cluster and store
    List<Node> nodes = new ArrayList<Node>();
    for (int i = 0; i < numNodes; i++) {
        List<Integer> partitions = new ArrayList<Integer>(partitionMap[i].length);
        for (int p : partitionMap[i]) {
            partitions.add(p);
        }
        nodes.add(new Node(i, "localhost", 8080 + i, 6666 + i, 7000 + i, partitions));
    }
    Cluster cluster = new Cluster("test", nodes);
    StoreDefinition storeDef = new StoreDefinitionBuilder().setName("test").setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(keySerDef).setValueSerializer(valueSerDef).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(repFactor).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
    RoutingStrategy router = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, cluster);
    // build store files in outputDir
    File outputDir = TestUtils.createTempDir(baseDir);
    JsonStoreBuilder storeBuilder = new JsonStoreBuilder(reader, cluster, storeDef, router, outputDir, null, testSize / 5, 1, 2, 10000, false);
    storeBuilder.build(type);
    File nodeDir = TestUtils.createTempDir(baseDir);
    @SuppressWarnings("unchecked") Serializer<String> keySerializer = (Serializer<String>) new DefaultSerializerFactory().getSerializer(keySerDef);
    @SuppressWarnings("unchecked") Serializer<String> valueSerializer = (Serializer<String>) new DefaultSerializerFactory().getSerializer(valueSerDef);
    Serializer<String> transSerializer = new StringSerializer();
    Map<Integer, Store<String, String, String>> nodeStores = Maps.newHashMap();
    Map<Integer, ReadOnlyStorageEngine> readOnlyStores = Maps.newHashMap();
    for (int i = 0; i < numNodes; i++) {
        File currNode = new File(nodeDir, Integer.toString(i));
        currNode.mkdirs();
        currNode.deleteOnExit();
        Utils.move(new File(outputDir, "node-" + Integer.toString(i)), new File(currNode, "version-0"));
        CompressionStrategyFactory compressionStrategyFactory = new CompressionStrategyFactory();
        CompressionStrategy keyCompressionStrat = compressionStrategyFactory.get(keySerDef.getCompression());
        CompressionStrategy valueCompressionStrat = compressionStrategyFactory.get(valueSerDef.getCompression());
        ReadOnlyStorageEngine readOnlyStorageEngine = new ReadOnlyStorageEngine("test", strategy, router, i, currNode, 1);
        readOnlyStores.put(i, readOnlyStorageEngine);
        Store<ByteArray, byte[], byte[]> innerStore = new CompressingStore(readOnlyStorageEngine, keyCompressionStrat, valueCompressionStrat);
        nodeStores.put(i, SerializingStore.wrap(innerStore, keySerializer, valueSerializer, transSerializer));
    }
    return new ReadOnlyStorageEngineTestInstance(data, baseDir, readOnlyStores, nodeStores, router, keySerializer);
}
Also used : RoutingStrategyFactory(voldemort.routing.RoutingStrategyFactory) Node(voldemort.cluster.Node) ArrayList(java.util.ArrayList) Store(voldemort.store.Store) CompressingStore(voldemort.store.compress.CompressingStore) SerializingStore(voldemort.store.serialized.SerializingStore) CompressionStrategy(voldemort.store.compress.CompressionStrategy) CompressionStrategyFactory(voldemort.store.compress.CompressionStrategyFactory) CompressingStore(voldemort.store.compress.CompressingStore) StoreDefinition(voldemort.store.StoreDefinition) RoutingStrategy(voldemort.routing.RoutingStrategy) JsonReader(voldemort.serialization.json.JsonReader) ByteArray(voldemort.utils.ByteArray) StringSerializer(voldemort.serialization.StringSerializer) StringSerializer(voldemort.serialization.StringSerializer) Serializer(voldemort.serialization.Serializer) StoreDefinitionBuilder(voldemort.store.StoreDefinitionBuilder) Cluster(voldemort.cluster.Cluster) DefaultSerializerFactory(voldemort.serialization.DefaultSerializerFactory) File(java.io.File)

Example 13 with StoreDefinitionBuilder

use of voldemort.store.StoreDefinitionBuilder in project voldemort by voldemort.

the class HadoopStoreBuilderTest method testRowsLessThanNodes.

/**
 * Issue 258 : 'node--1' produced during store building if some reducer does
 * not get any data.
 *
 * @throws Exception
 */
@Test
public void testRowsLessThanNodes() throws Exception {
    Map<String, String> values = new HashMap<String, String>();
    File testDir = TestUtils.createTempDir();
    File tempDir = new File(testDir, "temp");
    File outputDir = new File(testDir, "output");
    // write test data to text file
    File inputFile = File.createTempFile("input", ".txt", testDir);
    inputFile.deleteOnExit();
    StringBuilder contents = new StringBuilder();
    for (Map.Entry<String, String> entry : values.entrySet()) contents.append(entry.getKey() + "\t" + entry.getValue() + "\n");
    FileUtils.writeStringToFile(inputFile, contents.toString());
    String storeName = "test";
    SerializerDefinition serDef = new SerializerDefinition("string");
    Cluster cluster = ServerTestUtils.getLocalCluster(10);
    // Test backwards compatibility
    StoreDefinition def = new StoreDefinitionBuilder().setName(storeName).setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(serDef).setValueSerializer(serDef).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(1).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
    HadoopStoreBuilder builder = new HadoopStoreBuilder("testRowsLessThanNodes", new Props(), new JobConf(), TextStoreMapper.class, TextInputFormat.class, cluster, def, new Path(tempDir.getAbsolutePath()), new Path(outputDir.getAbsolutePath()), new Path(inputFile.getAbsolutePath()), CheckSumType.MD5, saveKeys, false, 64 * 1024, false, 0L, false);
    builder.build();
    File[] nodeDirectories = outputDir.listFiles(new FileFilter() {

        @Override
        public boolean accept(File pathname) {
            // We are only interested in counting directories, not files.
            return pathname.isDirectory();
        }
    });
    // Should not produce node--1 directory + have one folder for every node
    Assert.assertEquals(cluster.getNumberOfNodes(), nodeDirectories.length);
    for (File f : outputDir.listFiles()) {
        Assert.assertFalse(f.toString().contains("node--1"));
    }
    // Check if individual nodes exist, along with their metadata file
    for (int nodeId = 0; nodeId < 10; nodeId++) {
        File nodeFile = new File(outputDir, "node-" + Integer.toString(nodeId));
        Assert.assertTrue(nodeFile.exists());
        Assert.assertTrue(new File(nodeFile, ".metadata").exists());
    }
}
Also used : StoreDefinitionBuilder(voldemort.store.StoreDefinitionBuilder) Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) Cluster(voldemort.cluster.Cluster) Props(voldemort.utils.Props) StoreDefinition(voldemort.store.StoreDefinition) FileFilter(java.io.FileFilter) File(java.io.File) Map(java.util.Map) HashMap(java.util.HashMap) JobConf(org.apache.hadoop.mapred.JobConf) SerializerDefinition(voldemort.serialization.SerializerDefinition) Test(org.junit.Test)

Example 14 with StoreDefinitionBuilder

use of voldemort.store.StoreDefinitionBuilder in project voldemort by voldemort.

the class HadoopStoreBuilderCollisionTest method testCollisionWithParams.

@SuppressWarnings({ "unchecked" })
public void testCollisionWithParams(int totalElements, int maxCollisions) throws Exception {
    assertEquals(totalElements % maxCollisions, 0);
    // create test data
    Map<String, String> values = new HashMap<String, String>();
    List<String> valuesLeft = Lists.newArrayList();
    File testDir = TestUtils.createTempDir();
    File tempDir = new File(testDir, "temp");
    File outputDir = new File(testDir, "output");
    File storeDir = TestUtils.createTempDir(testDir);
    for (int i = 0; i < totalElements; i++) {
        values.put(Integer.toString(i), Integer.toString(i));
        valuesLeft.add(Integer.toString(i));
    }
    String storeName = "test";
    SerializerDefinition serDef = new SerializerDefinition("string");
    Cluster cluster = ServerTestUtils.getLocalCluster(1);
    Serializer<Object> serializer = (Serializer<Object>) new DefaultSerializerFactory().getSerializer(serDef);
    // write test data to text file
    File inputFile = File.createTempFile("input", ".txt", testDir);
    inputFile.deleteOnExit();
    StringBuilder contents = new StringBuilder();
    byte[] currentMd5 = TestUtils.randomBytes(2 * ByteUtils.SIZE_OF_INT);
    int entryId = 0;
    for (Map.Entry<String, String> entry : values.entrySet()) {
        if (entryId % maxCollisions == 0) {
            currentMd5 = TestUtils.randomBytes(2 * ByteUtils.SIZE_OF_INT);
        }
        contents.append(entry.getKey() + "\t" + entry.getValue() + "\n");
        byte[] oldMd5 = ByteUtils.copy(ByteUtils.md5(serializer.toBytes(entry.getKey())), 0, 2 * ByteUtils.SIZE_OF_INT);
        oldMd5ToNewMd5.put(new ByteArray(oldMd5), currentMd5);
        entryId++;
    }
    FileUtils.writeStringToFile(inputFile, contents.toString());
    StoreDefinition def = new StoreDefinitionBuilder().setName(storeName).setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(serDef).setValueSerializer(serDef).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(1).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
    HadoopStoreBuilder builder = new HadoopStoreBuilder("testCollisionWithParams", new Props(), new JobConf(), CollidingTextStoreMapper.class, TextInputFormat.class, cluster, def, new Path(tempDir.getAbsolutePath()), new Path(outputDir.getAbsolutePath()), new Path(inputFile.getAbsolutePath()), CheckSumType.MD5, true, false, 1024 * 1024 * 1024, false, null, false);
    builder.build();
    File nodeFile = new File(outputDir, "node-0");
    File versionDir = new File(storeDir, "version-0");
    HdfsFetcher fetcher = new HdfsFetcher();
    fetcher.fetch(nodeFile.getAbsolutePath(), versionDir.getAbsolutePath());
    // Test if we work in the normal collision scenario open store
    ReadOnlyStorageEngine engine = new ReadOnlyStorageEngine(storeName, new CustomBinarySearchStrategy(), new RoutingStrategyFactory().updateRoutingStrategy(def, cluster), 0, storeDir, 1);
    Store<Object, Object, Object> store = SerializingStore.wrap(engine, serializer, serializer, serializer);
    // check values
    for (Map.Entry<String, String> entry : values.entrySet()) {
        List<Versioned<Object>> found = store.get(entry.getKey(), null);
        Assert.assertEquals("Incorrect number of results", 1, found.size());
        Assert.assertEquals(entry.getValue(), found.get(0).getValue());
    }
    // also check the iterator - first key iterator...
    List<String> valuesLeft2 = Lists.newArrayList(valuesLeft);
    ClosableIterator<ByteArray> keyIterator = engine.keys();
    int numElements = 0;
    while (keyIterator.hasNext()) {
        Object object = serializer.toObject(keyIterator.next().get());
        assertEquals(valuesLeft.remove(object), true);
        Assert.assertTrue(values.containsKey(object));
        numElements++;
    }
    Assert.assertEquals(numElements, values.size());
    Assert.assertEquals(valuesLeft.size(), 0);
    // ... and entry iterator
    ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> entryIterator = engine.entries();
    numElements = 0;
    while (entryIterator.hasNext()) {
        Pair<ByteArray, Versioned<byte[]>> entry = entryIterator.next();
        assertEquals(valuesLeft2.remove(serializer.toObject(entry.getFirst().get())), true);
        Assert.assertEquals(values.get(serializer.toObject(entry.getFirst().get())), serializer.toObject(entry.getSecond().getValue()));
        numElements++;
    }
    Assert.assertEquals(numElements, values.size());
    Assert.assertEquals(valuesLeft2.size(), 0);
}
Also used : Versioned(voldemort.versioning.Versioned) HashMap(java.util.HashMap) RoutingStrategyFactory(voldemort.routing.RoutingStrategyFactory) Props(voldemort.utils.Props) StoreDefinition(voldemort.store.StoreDefinition) ByteArray(voldemort.utils.ByteArray) JobConf(org.apache.hadoop.mapred.JobConf) Serializer(voldemort.serialization.Serializer) Pair(voldemort.utils.Pair) StoreDefinitionBuilder(voldemort.store.StoreDefinitionBuilder) Path(org.apache.hadoop.fs.Path) Cluster(voldemort.cluster.Cluster) ReadOnlyStorageEngine(voldemort.store.readonly.ReadOnlyStorageEngine) DefaultSerializerFactory(voldemort.serialization.DefaultSerializerFactory) HdfsFetcher(voldemort.store.readonly.fetcher.HdfsFetcher) File(java.io.File) HashMap(java.util.HashMap) Map(java.util.Map) SerializerDefinition(voldemort.serialization.SerializerDefinition)

Example 15 with StoreDefinitionBuilder

use of voldemort.store.StoreDefinitionBuilder in project voldemort by voldemort.

the class AbstractNonZonedRebalanceTest method setUp.

@Before
public void setUp() throws IOException {
    // First without replication
    roStoreDefWithoutReplication = new StoreDefinitionBuilder().setName(testStoreNameRO).setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(1).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
    rwStoreDefWithoutReplication = new StoreDefinitionBuilder().setName(testStoreNameRW).setType(BdbStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(1).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
    storeDefWithoutReplication = Lists.newArrayList(roStoreDefWithoutReplication, rwStoreDefWithoutReplication);
    String storeDefWithoutReplicationString = new StoreDefinitionsMapper().writeStoreList(storeDefWithoutReplication);
    File file = ServerTestUtils.createTempFile("two-stores-", ".xml");
    FileUtils.writeStringToFile(file, storeDefWithoutReplicationString);
    storeDefFileWithoutReplication = file.getAbsolutePath();
    // Now with replication
    roStoreDefWithReplication = new StoreDefinitionBuilder().setName(testStoreNameRO).setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(2).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
    file = ServerTestUtils.createTempFile("ro-stores-", ".xml");
    FileUtils.writeStringToFile(file, new StoreDefinitionsMapper().writeStoreList(Lists.newArrayList(roStoreDefWithReplication)));
    roStoreDefFileWithReplication = file.getAbsolutePath();
    rwStoreDefWithReplication = new StoreDefinitionBuilder().setName(testStoreNameRW).setType(BdbStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(2).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
    rwStoreDefWithReplication2 = new StoreDefinitionBuilder().setName(testStoreNameRW2).setType(BdbStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(2).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
    file = ServerTestUtils.createTempFile("rw-stores-", ".xml");
    FileUtils.writeStringToFile(file, new StoreDefinitionsMapper().writeStoreList(Lists.newArrayList(rwStoreDefWithReplication)));
    rwStoreDefFileWithReplication = file.getAbsolutePath();
    file.deleteOnExit();
    file = ServerTestUtils.createTempFile("rw-two-stores-", ".xml");
    FileUtils.writeStringToFile(file, new StoreDefinitionsMapper().writeStoreList(Lists.newArrayList(rwStoreDefWithReplication, rwStoreDefWithReplication2)));
    rwTwoStoreDefFileWithReplication = file.getAbsolutePath();
    storeDefWithReplication = Lists.newArrayList(roStoreDefWithReplication, rwStoreDefWithReplication);
    String storeDefWithReplicationString = new StoreDefinitionsMapper().writeStoreList(storeDefWithReplication);
    file = ServerTestUtils.createTempFile("two-stores-", ".xml");
    FileUtils.writeStringToFile(file, storeDefWithReplicationString);
    storeDefFileWithReplication = file.getAbsolutePath();
}
Also used : StoreDefinitionBuilder(voldemort.store.StoreDefinitionBuilder) StoreDefinitionsMapper(voldemort.xml.StoreDefinitionsMapper) File(java.io.File) SerializerDefinition(voldemort.serialization.SerializerDefinition) Before(org.junit.Before)

Aggregations

StoreDefinitionBuilder (voldemort.store.StoreDefinitionBuilder)57 SerializerDefinition (voldemort.serialization.SerializerDefinition)46 StoreDefinition (voldemort.store.StoreDefinition)42 HashMap (java.util.HashMap)28 ArrayList (java.util.ArrayList)27 Test (org.junit.Test)23 File (java.io.File)17 ByteArray (voldemort.utils.ByteArray)16 Cluster (voldemort.cluster.Cluster)13 StoreDefinitionsMapper (voldemort.xml.StoreDefinitionsMapper)12 LinkedList (java.util.LinkedList)10 List (java.util.List)9 VoldemortException (voldemort.VoldemortException)9 Node (voldemort.cluster.Node)8 IOException (java.io.IOException)7 Before (org.junit.Before)6 VoldemortTestConstants.getNineNodeCluster (voldemort.VoldemortTestConstants.getNineNodeCluster)6 AdminClient (voldemort.client.protocol.admin.AdminClient)6 Store (voldemort.store.Store)6 FileWriter (java.io.FileWriter)5