use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class HadoopStoreBuilderCollisionTest method testCollisionWithParams.
@SuppressWarnings({ "unchecked" })
public void testCollisionWithParams(int totalElements, int maxCollisions) throws Exception {
assertEquals(totalElements % maxCollisions, 0);
// create test data
Map<String, String> values = new HashMap<String, String>();
List<String> valuesLeft = Lists.newArrayList();
File testDir = TestUtils.createTempDir();
File tempDir = new File(testDir, "temp");
File outputDir = new File(testDir, "output");
File storeDir = TestUtils.createTempDir(testDir);
for (int i = 0; i < totalElements; i++) {
values.put(Integer.toString(i), Integer.toString(i));
valuesLeft.add(Integer.toString(i));
}
String storeName = "test";
SerializerDefinition serDef = new SerializerDefinition("string");
Cluster cluster = ServerTestUtils.getLocalCluster(1);
Serializer<Object> serializer = (Serializer<Object>) new DefaultSerializerFactory().getSerializer(serDef);
// write test data to text file
File inputFile = File.createTempFile("input", ".txt", testDir);
inputFile.deleteOnExit();
StringBuilder contents = new StringBuilder();
byte[] currentMd5 = TestUtils.randomBytes(2 * ByteUtils.SIZE_OF_INT);
int entryId = 0;
for (Map.Entry<String, String> entry : values.entrySet()) {
if (entryId % maxCollisions == 0) {
currentMd5 = TestUtils.randomBytes(2 * ByteUtils.SIZE_OF_INT);
}
contents.append(entry.getKey() + "\t" + entry.getValue() + "\n");
byte[] oldMd5 = ByteUtils.copy(ByteUtils.md5(serializer.toBytes(entry.getKey())), 0, 2 * ByteUtils.SIZE_OF_INT);
oldMd5ToNewMd5.put(new ByteArray(oldMd5), currentMd5);
entryId++;
}
FileUtils.writeStringToFile(inputFile, contents.toString());
StoreDefinition def = new StoreDefinitionBuilder().setName(storeName).setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(serDef).setValueSerializer(serDef).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(1).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
HadoopStoreBuilder builder = new HadoopStoreBuilder("testCollisionWithParams", new Props(), new JobConf(), CollidingTextStoreMapper.class, TextInputFormat.class, cluster, def, new Path(tempDir.getAbsolutePath()), new Path(outputDir.getAbsolutePath()), new Path(inputFile.getAbsolutePath()), CheckSumType.MD5, true, false, 1024 * 1024 * 1024, false, null, false);
builder.build();
File nodeFile = new File(outputDir, "node-0");
File versionDir = new File(storeDir, "version-0");
HdfsFetcher fetcher = new HdfsFetcher();
fetcher.fetch(nodeFile.getAbsolutePath(), versionDir.getAbsolutePath());
// Test if we work in the normal collision scenario open store
ReadOnlyStorageEngine engine = new ReadOnlyStorageEngine(storeName, new CustomBinarySearchStrategy(), new RoutingStrategyFactory().updateRoutingStrategy(def, cluster), 0, storeDir, 1);
Store<Object, Object, Object> store = SerializingStore.wrap(engine, serializer, serializer, serializer);
// check values
for (Map.Entry<String, String> entry : values.entrySet()) {
List<Versioned<Object>> found = store.get(entry.getKey(), null);
Assert.assertEquals("Incorrect number of results", 1, found.size());
Assert.assertEquals(entry.getValue(), found.get(0).getValue());
}
// also check the iterator - first key iterator...
List<String> valuesLeft2 = Lists.newArrayList(valuesLeft);
ClosableIterator<ByteArray> keyIterator = engine.keys();
int numElements = 0;
while (keyIterator.hasNext()) {
Object object = serializer.toObject(keyIterator.next().get());
assertEquals(valuesLeft.remove(object), true);
Assert.assertTrue(values.containsKey(object));
numElements++;
}
Assert.assertEquals(numElements, values.size());
Assert.assertEquals(valuesLeft.size(), 0);
// ... and entry iterator
ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> entryIterator = engine.entries();
numElements = 0;
while (entryIterator.hasNext()) {
Pair<ByteArray, Versioned<byte[]>> entry = entryIterator.next();
assertEquals(valuesLeft2.remove(serializer.toObject(entry.getFirst().get())), true);
Assert.assertEquals(values.get(serializer.toObject(entry.getFirst().get())), serializer.toObject(entry.getSecond().getValue()));
numElements++;
}
Assert.assertEquals(numElements, values.size());
Assert.assertEquals(valuesLeft2.size(), 0);
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class HadoopStoreWriterTest method init.
private void init() {
tmpOutPutDirectory = TestUtils.createTempDir();
// Setup before each test method
conf = new JobConf();
conf.setInt(AbstractStoreBuilderConfigurable.NUM_CHUNKS, numChunks);
conf.set("final.output.dir", tmpOutPutDirectory.getAbsolutePath());
conf.set("mapred.output.dir", tmpOutPutDirectory.getAbsolutePath());
conf.set("mapred.task.id", "1234");
/**
* We don't have to test different types of checksums. That's covered in
* {@link voldemort.store.readonly.checksum.CheckSumTests}.
*/
conf.set(VoldemortBuildAndPushJob.CHECKSUM_TYPE, CheckSum.CheckSumType.NONE.name());
// generate a list of storeDefinitions.
List<StoreDefinition> storeDefList = Lists.newArrayList(ServerTestUtils.getStoreDef("test", // Replication Factor 1, since we are testing a one node "cluster"
1, // preferred/required reads/writes all at 1
1, // preferred/required reads/writes all at 1
1, // preferred/required reads/writes all at 1
1, // preferred/required reads/writes all at 1
1, RoutingStrategyType.CONSISTENT_STRATEGY.toString()));
String storesXML = new StoreDefinitionsMapper().writeStoreList(storeDefList);
conf.set("stores.xml", storesXML);
String clusterXML = new ClusterMapper().writeCluster(ServerTestUtils.getLocalCluster(1));
conf.set("cluster.xml", clusterXML);
// We leverage the real mapper used in the BnP job to generate data with the proper format
mapper = new BuildAndPushMapper();
mapper.configure(conf);
testCollector = new TestCollector();
testCollectorWrapper = new TestCollectorWrapper();
testCollectorWrapper.setCollector(testCollector);
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class AbstractZonedRebalanceTest method testShuffleZZAndShuffleAgain.
@Test(timeout = 600000)
public void testShuffleZZAndShuffleAgain() throws Exception {
logger.info("Starting testShuffleZZAndShuffleAgain");
// Hacky work around of TOCTOU bind Exception issues. Each test that
// invokes this method brings servers up & down on the same ports. The
// OS seems to need a rest between subsequent tests...
Thread.sleep(TimeUnit.SECONDS.toMillis(5));
Cluster interimCluster = RebalanceUtils.getInterimCluster(zzCurrent, zzShuffle);
// start all the servers
List<Integer> serverList = new ArrayList<Integer>(interimCluster.getNodeIds());
Map<String, String> configProps = new HashMap<String, String>();
configProps.put("admin.max.threads", "5");
interimCluster = startServers(interimCluster, zzStoresXml, serverList, configProps);
// Populate cluster with data
for (StoreDefinition storeDef : zzStores) {
populateData(zzCurrent, storeDef);
}
String bootstrapUrl = getBootstrapUrl(interimCluster, 0);
// Shuffle cluster
ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, zzShuffle, zzStores);
rebalanceAndCheck(rebalanceKit.plan, rebalanceKit.controller, serverList);
checkConsistentMetadata(zzShuffle, serverList);
// Now, go from shuffled state, back to the original to ocnfirm
// subsequent rebalances can be invoked.
rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, zzCurrent, zzStores);
rebalanceAndCheck(rebalanceKit.plan, rebalanceKit.controller, serverList);
checkConsistentMetadata(zzCurrent, serverList);
// Done.
stopServer(serverList);
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class AbstractZonedRebalanceTest method testZonedRebalance.
// TODO: The tests based on this method are susceptible to TOCTOU
// BindException issue since findFreePorts is used to determine the ports
// for localhost:PORT of each node.
/**
* Scripts the execution of a specific type of zoned rebalance test: sets up
* cluster based on cCluster plus any new nodes/zones in fCluster,
* rebalances to fCluster, verifies rebalance was correct.
*
* @param testTag For pretty printing
* @param cCluster current cluster
* @param fCluster final cluster
* @param cStoresXml XML file with current stores xml
* @param fStoresXml Unused parameter. Included for symmetry in method
* declaration.
* @param cStoreDefs store defs for current cluster (from on cStoresXml)
* @param fStoreDefs store defs for final cluster.
* @throws Exception
*/
public void testZonedRebalance(String testTag, Cluster cCluster, Cluster fCluster, String cStoresXml, String fStoresXml, List<StoreDefinition> cStoreDefs, List<StoreDefinition> fStoreDefs) throws Exception {
logger.info("Starting " + testTag);
// Hacky work around of TOCTOU bind Exception issues. Each test that
// invokes this method brings servers up & down on the same ports. The
// OS seems to need a rest between subsequent tests...
Thread.sleep(TimeUnit.SECONDS.toMillis(5));
try {
Cluster interimCluster = RebalanceUtils.getInterimCluster(cCluster, fCluster);
// start all the servers
List<Integer> serverList = new ArrayList<Integer>(interimCluster.getNodeIds());
Map<String, String> configProps = new HashMap<String, String>();
configProps.put("admin.max.threads", "5");
interimCluster = startServers(interimCluster, cStoresXml, serverList, configProps);
String bootstrapUrl = getBootstrapUrl(interimCluster, 0);
ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, fCluster, fStoreDefs);
try {
for (StoreDefinition storeDef : cStoreDefs) {
populateData(cCluster, storeDef);
}
rebalanceAndCheck(rebalanceKit.plan, rebalanceKit.controller, serverList);
checkConsistentMetadata(fCluster, serverList);
} finally {
// stop servers
stopServer(serverList);
}
} catch (AssertionError ae) {
logger.error("Assertion broken in " + testTag + " : ", ae);
throw ae;
}
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class RebalanceRebootstrapConsistencyTest method rebalance.
/*
* simulate rebalance behavior
*/
public void rebalance() {
assert servers != null && servers.size() > 1;
VoldemortConfig config = servers.get(0).getVoldemortConfig();
adminClient = AdminClient.createTempAdminClient(config, cluster, 4);
List<Integer> partitionIds = ImmutableList.of(0, 1);
int req = adminClient.storeMntOps.migratePartitions(0, 1, testStoreNameRW, partitionIds, null, null);
adminClient.rpcOps.waitForCompletion(1, req, 5, TimeUnit.SECONDS);
Versioned<Cluster> versionedCluster = adminClient.metadataMgmtOps.getRemoteCluster(0);
Node node0 = versionedCluster.getValue().getNodeById(0);
Node node1 = versionedCluster.getValue().getNodeById(1);
Node newNode0 = new Node(node0.getId(), node0.getHost(), node0.getHttpPort(), node0.getSocketPort(), node0.getAdminPort(), ImmutableList.<Integer>of());
Node newNode1 = new Node(node1.getId(), node1.getHost(), node1.getHttpPort(), node1.getSocketPort(), node1.getAdminPort(), ImmutableList.of(0, 1));
adminClient.storeMntOps.deletePartitions(0, testStoreNameRW, ImmutableList.of(0, 1), null);
newCluster = new Cluster(cluster.getName(), ImmutableList.of(newNode0, newNode1), Lists.newArrayList(cluster.getZones()));
newstoredefs = new ArrayList<StoreDefinition>();
newstoredefs.add(rwStoreDefWithReplication2);
for (Node node : cluster.getNodes()) {
VectorClock clock = (VectorClock) versionedCluster.getVersion();
clock.incrementVersion(node.getId(), System.currentTimeMillis());
adminClient.metadataMgmtOps.updateRemoteMetadata(node.getId(), MetadataStore.STORES_KEY, new Versioned<String>(storeMapper.writeStoreList(newstoredefs), clock));
adminClient.metadataMgmtOps.updateRemoteMetadata(node.getId(), MetadataStore.CLUSTER_KEY, new Versioned<String>(clusterMapper.writeCluster(newCluster), clock));
}
adminClient.metadataMgmtOps.updateMetadataversion(adminClient.getAdminClientCluster().getNodeIds(), CLUSTER_VERSION_KEY);
}
Aggregations