Search in sources :

Example 1 with SerializerDefinition

use of voldemort.serialization.SerializerDefinition in project voldemort by voldemort.

the class HadoopStoreBuilderCollisionTest method testCollisionWithParams.

@SuppressWarnings({ "unchecked" })
public void testCollisionWithParams(int totalElements, int maxCollisions) throws Exception {
    assertEquals(totalElements % maxCollisions, 0);
    // create test data
    Map<String, String> values = new HashMap<String, String>();
    List<String> valuesLeft = Lists.newArrayList();
    File testDir = TestUtils.createTempDir();
    File tempDir = new File(testDir, "temp");
    File outputDir = new File(testDir, "output");
    File storeDir = TestUtils.createTempDir(testDir);
    for (int i = 0; i < totalElements; i++) {
        values.put(Integer.toString(i), Integer.toString(i));
        valuesLeft.add(Integer.toString(i));
    }
    String storeName = "test";
    SerializerDefinition serDef = new SerializerDefinition("string");
    Cluster cluster = ServerTestUtils.getLocalCluster(1);
    Serializer<Object> serializer = (Serializer<Object>) new DefaultSerializerFactory().getSerializer(serDef);
    // write test data to text file
    File inputFile = File.createTempFile("input", ".txt", testDir);
    inputFile.deleteOnExit();
    StringBuilder contents = new StringBuilder();
    byte[] currentMd5 = TestUtils.randomBytes(2 * ByteUtils.SIZE_OF_INT);
    int entryId = 0;
    for (Map.Entry<String, String> entry : values.entrySet()) {
        if (entryId % maxCollisions == 0) {
            currentMd5 = TestUtils.randomBytes(2 * ByteUtils.SIZE_OF_INT);
        }
        contents.append(entry.getKey() + "\t" + entry.getValue() + "\n");
        byte[] oldMd5 = ByteUtils.copy(ByteUtils.md5(serializer.toBytes(entry.getKey())), 0, 2 * ByteUtils.SIZE_OF_INT);
        oldMd5ToNewMd5.put(new ByteArray(oldMd5), currentMd5);
        entryId++;
    }
    FileUtils.writeStringToFile(inputFile, contents.toString());
    StoreDefinition def = new StoreDefinitionBuilder().setName(storeName).setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(serDef).setValueSerializer(serDef).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(1).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
    HadoopStoreBuilder builder = new HadoopStoreBuilder("testCollisionWithParams", new Props(), new JobConf(), CollidingTextStoreMapper.class, TextInputFormat.class, cluster, def, new Path(tempDir.getAbsolutePath()), new Path(outputDir.getAbsolutePath()), new Path(inputFile.getAbsolutePath()), CheckSumType.MD5, true, false, 1024 * 1024 * 1024, false, null, false);
    builder.build();
    File nodeFile = new File(outputDir, "node-0");
    File versionDir = new File(storeDir, "version-0");
    HdfsFetcher fetcher = new HdfsFetcher();
    fetcher.fetch(nodeFile.getAbsolutePath(), versionDir.getAbsolutePath());
    // Test if we work in the normal collision scenario open store
    ReadOnlyStorageEngine engine = new ReadOnlyStorageEngine(storeName, new CustomBinarySearchStrategy(), new RoutingStrategyFactory().updateRoutingStrategy(def, cluster), 0, storeDir, 1);
    Store<Object, Object, Object> store = SerializingStore.wrap(engine, serializer, serializer, serializer);
    // check values
    for (Map.Entry<String, String> entry : values.entrySet()) {
        List<Versioned<Object>> found = store.get(entry.getKey(), null);
        Assert.assertEquals("Incorrect number of results", 1, found.size());
        Assert.assertEquals(entry.getValue(), found.get(0).getValue());
    }
    // also check the iterator - first key iterator...
    List<String> valuesLeft2 = Lists.newArrayList(valuesLeft);
    ClosableIterator<ByteArray> keyIterator = engine.keys();
    int numElements = 0;
    while (keyIterator.hasNext()) {
        Object object = serializer.toObject(keyIterator.next().get());
        assertEquals(valuesLeft.remove(object), true);
        Assert.assertTrue(values.containsKey(object));
        numElements++;
    }
    Assert.assertEquals(numElements, values.size());
    Assert.assertEquals(valuesLeft.size(), 0);
    // ... and entry iterator
    ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> entryIterator = engine.entries();
    numElements = 0;
    while (entryIterator.hasNext()) {
        Pair<ByteArray, Versioned<byte[]>> entry = entryIterator.next();
        assertEquals(valuesLeft2.remove(serializer.toObject(entry.getFirst().get())), true);
        Assert.assertEquals(values.get(serializer.toObject(entry.getFirst().get())), serializer.toObject(entry.getSecond().getValue()));
        numElements++;
    }
    Assert.assertEquals(numElements, values.size());
    Assert.assertEquals(valuesLeft2.size(), 0);
}
Also used : Versioned(voldemort.versioning.Versioned) HashMap(java.util.HashMap) RoutingStrategyFactory(voldemort.routing.RoutingStrategyFactory) Props(voldemort.utils.Props) StoreDefinition(voldemort.store.StoreDefinition) ByteArray(voldemort.utils.ByteArray) JobConf(org.apache.hadoop.mapred.JobConf) Serializer(voldemort.serialization.Serializer) Pair(voldemort.utils.Pair) StoreDefinitionBuilder(voldemort.store.StoreDefinitionBuilder) Path(org.apache.hadoop.fs.Path) Cluster(voldemort.cluster.Cluster) ReadOnlyStorageEngine(voldemort.store.readonly.ReadOnlyStorageEngine) DefaultSerializerFactory(voldemort.serialization.DefaultSerializerFactory) HdfsFetcher(voldemort.store.readonly.fetcher.HdfsFetcher) File(java.io.File) HashMap(java.util.HashMap) Map(java.util.Map) SerializerDefinition(voldemort.serialization.SerializerDefinition)

Example 2 with SerializerDefinition

use of voldemort.serialization.SerializerDefinition in project voldemort by voldemort.

the class RESTClientFactory method getRawStore.

@Override
public <K, V, T> Store<K, V, T> getRawStore(String storeName, InconsistencyResolver<Versioned<V>> resolver) {
    Store<K, V, T> clientStore = null;
    // The lowest layer : Transporting request to coordinator
    R2Store r2store = null;
    this.d2Client = restClientFactoryConfig.getD2Client();
    if (this.d2Client == null) {
        logger.info("Using transportclient since d2client is not available");
        Map<String, String> properties = new HashMap<String, String>();
        properties.put(HttpClientFactory.HTTP_POOL_SIZE, Integer.toString(this.config.getMaxR2ConnectionPoolSize()));
        transportClient = _clientFactory.getClient(properties);
        r2store = new R2Store(storeName, this.config.getHttpBootstrapURL(), this.transportClient, this.config);
    } else {
        logger.info("Using d2client");
        r2store = new R2Store(storeName, this.config.getHttpBootstrapURL(), this.d2Client, this.config);
    }
    this.rawStoreList.add(r2store);
    // bootstrap from the coordinator and obtain all the serialization
    // information.
    String serializerInfoXml = r2store.getSerializerInfoXml();
    SerializerDefinition keySerializerDefinition = RestUtils.parseKeySerializerDefinition(serializerInfoXml);
    SerializerDefinition valueSerializerDefinition = RestUtils.parseValueSerializerDefinition(serializerInfoXml);
    synchronized (this) {
        keySerializerMap.put(storeName, keySerializerDefinition);
        valueSerializerMap.put(storeName, valueSerializerDefinition);
    }
    if (logger.isDebugEnabled()) {
        logger.debug("Bootstrapping for " + storeName + ": Key serializer " + keySerializerDefinition);
        logger.debug("Bootstrapping for " + storeName + ": Value serializer " + valueSerializerDefinition);
    }
    // Start building the stack..
    // First, the transport layer
    Store<ByteArray, byte[], byte[]> store = r2store;
    // TODO: Add identifierString to the Mbean name
    if (this.config.isEnableJmx()) {
        StatTrackingStore statStore = new StatTrackingStore(store, this.stats);
        store = statStore;
        JmxUtils.registerMbean(new StoreStatsJmx(statStore.getStats()), JmxUtils.createObjectName(JmxUtils.getPackageName(store.getClass()), store.getName()));
    }
    // Add compression layer
    if (keySerializerDefinition.hasCompression() || valueSerializerDefinition.hasCompression()) {
        store = new CompressingStore(store, new CompressionStrategyFactory().get(keySerializerDefinition.getCompression()), new CompressionStrategyFactory().get(valueSerializerDefinition.getCompression()));
    }
    // Add Serialization layer
    Serializer<K> keySerializer = (Serializer<K>) serializerFactory.getSerializer(keySerializerDefinition);
    Serializer<V> valueSerializer = (Serializer<V>) serializerFactory.getSerializer(valueSerializerDefinition);
    clientStore = SerializingStore.wrap(store, keySerializer, valueSerializer, null);
    // Add inconsistency Resolving layer
    InconsistencyResolver<Versioned<V>> secondaryResolver = resolver == null ? new TimeBasedInconsistencyResolver<V>() : resolver;
    clientStore = new InconsistencyResolvingStore<K, V, T>(clientStore, new ChainedResolver<Versioned<V>>(new VectorClockInconsistencyResolver<V>(), secondaryResolver));
    return clientStore;
}
Also used : ChainedResolver(voldemort.versioning.ChainedResolver) Versioned(voldemort.versioning.Versioned) HashMap(java.util.HashMap) CompressionStrategyFactory(voldemort.store.compress.CompressionStrategyFactory) StatTrackingStore(voldemort.store.stats.StatTrackingStore) CompressingStore(voldemort.store.compress.CompressingStore) ByteArray(voldemort.utils.ByteArray) StoreStatsJmx(voldemort.store.stats.StoreStatsJmx) SerializerDefinition(voldemort.serialization.SerializerDefinition) Serializer(voldemort.serialization.Serializer)

Example 3 with SerializerDefinition

use of voldemort.serialization.SerializerDefinition in project voldemort by voldemort.

the class HadoopStoreBuilderTest method testRowsLessThanNodes.

/**
 * Issue 258 : 'node--1' produced during store building if some reducer does
 * not get any data.
 *
 * @throws Exception
 */
@Test
public void testRowsLessThanNodes() throws Exception {
    Map<String, String> values = new HashMap<String, String>();
    File testDir = TestUtils.createTempDir();
    File tempDir = new File(testDir, "temp");
    File outputDir = new File(testDir, "output");
    // write test data to text file
    File inputFile = File.createTempFile("input", ".txt", testDir);
    inputFile.deleteOnExit();
    StringBuilder contents = new StringBuilder();
    for (Map.Entry<String, String> entry : values.entrySet()) contents.append(entry.getKey() + "\t" + entry.getValue() + "\n");
    FileUtils.writeStringToFile(inputFile, contents.toString());
    String storeName = "test";
    SerializerDefinition serDef = new SerializerDefinition("string");
    Cluster cluster = ServerTestUtils.getLocalCluster(10);
    // Test backwards compatibility
    StoreDefinition def = new StoreDefinitionBuilder().setName(storeName).setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(serDef).setValueSerializer(serDef).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(1).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
    HadoopStoreBuilder builder = new HadoopStoreBuilder("testRowsLessThanNodes", new Props(), new JobConf(), TextStoreMapper.class, TextInputFormat.class, cluster, def, new Path(tempDir.getAbsolutePath()), new Path(outputDir.getAbsolutePath()), new Path(inputFile.getAbsolutePath()), CheckSumType.MD5, saveKeys, false, 64 * 1024, false, 0L, false);
    builder.build();
    File[] nodeDirectories = outputDir.listFiles(new FileFilter() {

        @Override
        public boolean accept(File pathname) {
            // We are only interested in counting directories, not files.
            return pathname.isDirectory();
        }
    });
    // Should not produce node--1 directory + have one folder for every node
    Assert.assertEquals(cluster.getNumberOfNodes(), nodeDirectories.length);
    for (File f : outputDir.listFiles()) {
        Assert.assertFalse(f.toString().contains("node--1"));
    }
    // Check if individual nodes exist, along with their metadata file
    for (int nodeId = 0; nodeId < 10; nodeId++) {
        File nodeFile = new File(outputDir, "node-" + Integer.toString(nodeId));
        Assert.assertTrue(nodeFile.exists());
        Assert.assertTrue(new File(nodeFile, ".metadata").exists());
    }
}
Also used : StoreDefinitionBuilder(voldemort.store.StoreDefinitionBuilder) Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) Cluster(voldemort.cluster.Cluster) Props(voldemort.utils.Props) StoreDefinition(voldemort.store.StoreDefinition) FileFilter(java.io.FileFilter) File(java.io.File) Map(java.util.Map) HashMap(java.util.HashMap) JobConf(org.apache.hadoop.mapred.JobConf) SerializerDefinition(voldemort.serialization.SerializerDefinition) Test(org.junit.Test)

Example 4 with SerializerDefinition

use of voldemort.serialization.SerializerDefinition in project voldemort by voldemort.

the class ReadOnlyStorePerformanceTest method main.

public static void main(String[] args) throws FileNotFoundException, IOException {
    OptionParser parser = new OptionParser();
    parser.accepts("help", "print usage information");
    parser.accepts("threads", "number of threads").withRequiredArg().ofType(Integer.class);
    parser.accepts("requests", "[REQUIRED] number of requests").withRequiredArg().ofType(Integer.class);
    parser.accepts("store-dir", "[REQUIRED] store directory").withRequiredArg().describedAs("directory");
    parser.accepts("cluster-xml", "Path to cluster.xml").withRequiredArg().describedAs("path");
    parser.accepts("node-id", "Id of node").withRequiredArg().ofType(Integer.class).describedAs("node-id");
    parser.accepts("search-strategy", "class of the search strategy to use").withRequiredArg().describedAs("class_name");
    parser.accepts("build", "If present, first build the data");
    parser.accepts("num-values", "The number of values in the store").withRequiredArg().describedAs("count").ofType(Integer.class);
    parser.accepts("num-chunks", "The number of chunks per partition").withRequiredArg().describedAs("chunks").ofType(Integer.class);
    parser.accepts("internal-sort-size", "The number of items to sort in memory at a time").withRequiredArg().describedAs("size").ofType(Integer.class);
    parser.accepts("value-size", "The size of the values in the store").withRequiredArg().describedAs("size").ofType(Integer.class);
    parser.accepts("working-dir", "The directory in which to store temporary data").withRequiredArg().describedAs("dir");
    parser.accepts("gzip", "Compress the intermediate temp files used in building the store");
    parser.accepts("request-file", "file get request ids from").withRequiredArg();
    parser.accepts("version", "Version of read-only store [" + ReadOnlyStorageFormat.READONLY_V0 + "," + ReadOnlyStorageFormat.READONLY_V1 + "," + ReadOnlyStorageFormat.READONLY_V2 + " (default)]").withRequiredArg().describedAs("version");
    parser.accepts("test-gz", "Path to gzip containing data. Works with --build only").withRequiredArg().describedAs("path");
    OptionSet options = parser.parse(args);
    if (options.has("help")) {
        parser.printHelpOn(System.out);
        System.exit(0);
    }
    CmdUtils.croakIfMissing(parser, options, "requests", "store-dir");
    final int numThreads = CmdUtils.valueOf(options, "threads", 10);
    final int numRequests = (Integer) options.valueOf("requests");
    final int internalSortSize = CmdUtils.valueOf(options, "internal-sort-size", 500000);
    int numValues = numRequests;
    final String inputFile = (String) options.valueOf("request-file");
    final String searcherClass = CmdUtils.valueOf(options, "search-strategy", BinarySearchStrategy.class.getName()).trim();
    final boolean gzipIntermediate = options.has("gzip");
    final SearchStrategy searcher = (SearchStrategy) ReflectUtils.callConstructor(ReflectUtils.loadClass(searcherClass));
    final File workingDir = new File(CmdUtils.valueOf(options, "working-dir", System.getProperty("java.io.tmpdir")));
    String storeDir = (String) options.valueOf("store-dir");
    ReadOnlyStorageFormat format = ReadOnlyStorageFormat.fromCode(CmdUtils.valueOf(options, "version", ReadOnlyStorageFormat.READONLY_V2.toString()));
    Cluster cluster = null;
    int nodeId = 0;
    SerializerDefinition sdef = new SerializerDefinition("json", "'string'");
    StoreDefinition storeDef = new StoreDefinitionBuilder().setName("test").setKeySerializer(sdef).setValueSerializer(sdef).setRequiredReads(1).setReplicationFactor(1).setRequiredWrites(1).setType("read-only").setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setRoutingPolicy(RoutingTier.CLIENT).build();
    if (options.has("build")) {
        CmdUtils.croakIfMissing(parser, options, "num-values", "value-size");
        numValues = (Integer) options.valueOf("num-values");
        int numChunks = 1;
        if (options.has("num-chunks"))
            numChunks = (Integer) options.valueOf("num-chunks");
        int valueSize = (Integer) options.valueOf("value-size");
        // generate test data
        File temp = null;
        if (options.has("test-gz")) {
            temp = new File((String) options.valueOf("test-gz"));
        } else {
            temp = File.createTempFile("json-data", ".txt.gz", workingDir);
            temp.deleteOnExit();
            System.out.println("Generating test data in " + temp);
            OutputStream outputStream = new GZIPOutputStream(new FileOutputStream(temp));
            Writer writer = new BufferedWriter(new OutputStreamWriter(outputStream), 10 * 1024 * 1024);
            String value = TestUtils.randomLetters(valueSize);
            for (int i = 0; i < numValues; i++) {
                writer.write("\"");
                writer.write(Integer.toString(i));
                writer.write("\" \"");
                writer.write(value);
                writer.write("\"");
                writer.write("\n");
            }
            writer.close();
            writer = null;
        }
        System.out.println("Building store.");
        InputStream inputStream = new GZIPInputStream(new FileInputStream(temp));
        Reader r = new BufferedReader(new InputStreamReader(inputStream), 1 * 1024 * 1024);
        File output = TestUtils.createTempDir(workingDir);
        File tempDir = TestUtils.createTempDir(workingDir);
        cluster = ServerTestUtils.getLocalCluster(1);
        nodeId = 0;
        JsonStoreBuilder builder = new JsonStoreBuilder(new JsonReader(r), cluster, storeDef, new ConsistentRoutingStrategy(cluster, 1), output, tempDir, internalSortSize, 2, numChunks, 64 * 1024, gzipIntermediate);
        builder.build(format);
        // copy to store dir
        File dir = new File(storeDir);
        Utils.rm(dir);
        dir.mkdirs();
        System.out.println("Moving store data from " + output + " to " + dir);
        boolean copyWorked = new File(output, "node-0").renameTo(new File(dir, "version-0"));
        if (!copyWorked)
            Utils.croak("Copy of data from " + output + " to " + dir + " failed.");
    } else {
        CmdUtils.croakIfMissing(parser, options, "cluster-xml", "node-id");
        String clusterXmlPath = (String) options.valueOf("cluster-xml");
        nodeId = (Integer) options.valueOf("node-id");
        File clusterXml = new File(clusterXmlPath);
        if (!clusterXml.exists()) {
            Utils.croak("Cluster.xml does not exist");
        }
        cluster = new ClusterMapper().readCluster(clusterXml);
    }
    final Store<ByteArray, byte[], byte[]> store = new ReadOnlyStorageEngine("test", searcher, new RoutingStrategyFactory().updateRoutingStrategy(storeDef, cluster), nodeId, new File(storeDir), 0);
    final AtomicInteger obsoletes = new AtomicInteger(0);
    final AtomicInteger nullResults = new AtomicInteger(0);
    final AtomicInteger totalResults = new AtomicInteger(0);
    final BlockingQueue<String> requestIds = new ArrayBlockingQueue<String>(20000);
    final Executor executor = Executors.newFixedThreadPool(1);
    // if they have given us a file make a request generator that reads from
    // it, otherwise just generate random values
    final int numVals = numValues;
    Runnable requestGenerator;
    if (inputFile == null) {
        requestGenerator = new Runnable() {

            public void run() {
                System.out.println("Generating random requests.");
                Random random = new Random();
                try {
                    while (true) requestIds.put(Integer.toString(random.nextInt(numRequests) % numVals));
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        };
    } else {
        requestGenerator = new Runnable() {

            public void run() {
                try {
                    System.out.println("Using request file to generate requests.");
                    BufferedReader reader = new BufferedReader(new FileReader(inputFile), 1000000);
                    while (true) {
                        String line = reader.readLine();
                        if (line == null)
                            return;
                        requestIds.put(line.trim());
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        };
    }
    executor.execute(requestGenerator);
    final Serializer<Object> keySerializer = new JsonTypeSerializer(JsonTypeDefinition.fromJson("'string'"), true);
    final AtomicInteger current = new AtomicInteger();
    final int progressIncrement = numRequests / 5;
    PerformanceTest readWriteTest = new PerformanceTest() {

        @Override
        public void doOperation(int index) throws Exception {
            try {
                totalResults.incrementAndGet();
                int curr = current.getAndIncrement();
                List<Versioned<byte[]>> results = store.get(new ByteArray(keySerializer.toBytes(requestIds.take())), null);
                if (curr % progressIncrement == 0)
                    System.out.println(curr);
                if (results.size() == 0)
                    nullResults.incrementAndGet();
            } catch (ObsoleteVersionException e) {
                obsoletes.incrementAndGet();
            }
        }
    };
    System.out.println("Running test...");
    readWriteTest.run(numRequests, numThreads);
    System.out.println("Random Access Read Only store Results:");
    System.out.println("Null reads ratio:" + (nullResults.doubleValue()) / totalResults.doubleValue());
    readWriteTest.printStats();
    System.exit(0);
}
Also used : BinarySearchStrategy(voldemort.store.readonly.BinarySearchStrategy) SearchStrategy(voldemort.store.readonly.SearchStrategy) GZIPOutputStream(java.util.zip.GZIPOutputStream) OutputStream(java.io.OutputStream) FileOutputStream(java.io.FileOutputStream) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) Random(java.util.Random) GZIPOutputStream(java.util.zip.GZIPOutputStream) ByteArray(voldemort.utils.ByteArray) StoreDefinitionBuilder(voldemort.store.StoreDefinitionBuilder) ReadOnlyStorageEngine(voldemort.store.readonly.ReadOnlyStorageEngine) ReadOnlyStorageFormat(voldemort.store.readonly.ReadOnlyStorageFormat) ClusterMapper(voldemort.xml.ClusterMapper) FileInputStream(java.io.FileInputStream) JsonStoreBuilder(voldemort.store.readonly.JsonStoreBuilder) ObsoleteVersionException(voldemort.versioning.ObsoleteVersionException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) FileOutputStream(java.io.FileOutputStream) File(java.io.File) JsonTypeSerializer(voldemort.serialization.json.JsonTypeSerializer) Versioned(voldemort.versioning.Versioned) RoutingStrategyFactory(voldemort.routing.RoutingStrategyFactory) Reader(java.io.Reader) JsonReader(voldemort.serialization.json.JsonReader) InputStreamReader(java.io.InputStreamReader) BufferedReader(java.io.BufferedReader) FileReader(java.io.FileReader) OptionParser(joptsimple.OptionParser) BufferedWriter(java.io.BufferedWriter) GZIPInputStream(java.util.zip.GZIPInputStream) Executor(java.util.concurrent.Executor) StoreDefinition(voldemort.store.StoreDefinition) JsonReader(voldemort.serialization.json.JsonReader) ConsistentRoutingStrategy(voldemort.routing.ConsistentRoutingStrategy) FileReader(java.io.FileReader) InputStreamReader(java.io.InputStreamReader) GZIPInputStream(java.util.zip.GZIPInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) Cluster(voldemort.cluster.Cluster) FileNotFoundException(java.io.FileNotFoundException) ObsoleteVersionException(voldemort.versioning.ObsoleteVersionException) IOException(java.io.IOException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BufferedReader(java.io.BufferedReader) OutputStreamWriter(java.io.OutputStreamWriter) OptionSet(joptsimple.OptionSet) SerializerDefinition(voldemort.serialization.SerializerDefinition) Writer(java.io.Writer) OutputStreamWriter(java.io.OutputStreamWriter) BufferedWriter(java.io.BufferedWriter)

Example 5 with SerializerDefinition

use of voldemort.serialization.SerializerDefinition in project voldemort by voldemort.

the class ServerTestUtils method getStoreDefs.

public static List<StoreDefinition> getStoreDefs(int numStores) {
    List<StoreDefinition> defs = new ArrayList<StoreDefinition>();
    SerializerDefinition serDef = new SerializerDefinition("string");
    for (int i = 0; i < numStores; i++) defs.add(new StoreDefinitionBuilder().setName("test" + i).setType(InMemoryStorageConfiguration.TYPE_NAME).setKeySerializer(serDef).setValueSerializer(serDef).setRoutingPolicy(RoutingTier.SERVER).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(2).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build());
    return defs;
}
Also used : StoreDefinitionBuilder(voldemort.store.StoreDefinitionBuilder) StoreDefinition(voldemort.store.StoreDefinition) ArrayList(java.util.ArrayList) SerializerDefinition(voldemort.serialization.SerializerDefinition)

Aggregations

SerializerDefinition (voldemort.serialization.SerializerDefinition)65 StoreDefinitionBuilder (voldemort.store.StoreDefinitionBuilder)46 StoreDefinition (voldemort.store.StoreDefinition)38 HashMap (java.util.HashMap)31 ArrayList (java.util.ArrayList)22 ByteArray (voldemort.utils.ByteArray)19 Test (org.junit.Test)18 DefaultSerializerFactory (voldemort.serialization.DefaultSerializerFactory)15 File (java.io.File)14 Cluster (voldemort.cluster.Cluster)12 Node (voldemort.cluster.Node)11 LinkedList (java.util.LinkedList)10 IOException (java.io.IOException)9 Serializer (voldemort.serialization.Serializer)9 List (java.util.List)8 VoldemortException (voldemort.VoldemortException)8 StoreDefinitionsMapper (voldemort.xml.StoreDefinitionsMapper)8 StatTrackingStore (voldemort.store.stats.StatTrackingStore)7 Before (org.junit.Before)6 Store (voldemort.store.Store)6