use of voldemort.store.readonly.mr.BuildAndPushMapper in project voldemort by voldemort.
the class HadoopStoreWriterTest method init.
private void init() {
tmpOutPutDirectory = TestUtils.createTempDir();
// Setup before each test method
conf = new JobConf();
conf.setInt(AbstractStoreBuilderConfigurable.NUM_CHUNKS, numChunks);
conf.set("final.output.dir", tmpOutPutDirectory.getAbsolutePath());
conf.set("mapred.output.dir", tmpOutPutDirectory.getAbsolutePath());
conf.set("mapred.task.id", "1234");
/**
* We don't have to test different types of checksums. That's covered in
* {@link voldemort.store.readonly.checksum.CheckSumTests}.
*/
conf.set(VoldemortBuildAndPushJob.CHECKSUM_TYPE, CheckSum.CheckSumType.NONE.name());
// generate a list of storeDefinitions.
List<StoreDefinition> storeDefList = Lists.newArrayList(ServerTestUtils.getStoreDef("test", // Replication Factor 1, since we are testing a one node "cluster"
1, // preferred/required reads/writes all at 1
1, // preferred/required reads/writes all at 1
1, // preferred/required reads/writes all at 1
1, // preferred/required reads/writes all at 1
1, RoutingStrategyType.CONSISTENT_STRATEGY.toString()));
String storesXML = new StoreDefinitionsMapper().writeStoreList(storeDefList);
conf.set("stores.xml", storesXML);
String clusterXML = new ClusterMapper().writeCluster(ServerTestUtils.getLocalCluster(1));
conf.set("cluster.xml", clusterXML);
// We leverage the real mapper used in the BnP job to generate data with the proper format
mapper = new BuildAndPushMapper();
mapper.configure(conf);
testCollector = new TestCollector();
testCollectorWrapper = new TestCollectorWrapper();
testCollectorWrapper.setCollector(testCollector);
}
Aggregations