use of voldemort.cluster.Cluster in project voldemort by voldemort.
the class ReadOnlyStorageEngineTestInstance method create.
public static ReadOnlyStorageEngineTestInstance create(SearchStrategy strategy, File baseDir, int testSize, int numNodes, int repFactor, SerializerDefinition keySerDef, SerializerDefinition valueSerDef, ReadOnlyStorageFormat type, int[][] partitionMap) throws Exception {
// create some test data
Map<String, String> data = createTestData(testSize);
JsonReader reader = makeTestDataReader(data, baseDir);
// set up definitions for cluster and store
List<Node> nodes = new ArrayList<Node>();
for (int i = 0; i < numNodes; i++) {
List<Integer> partitions = new ArrayList<Integer>(partitionMap[i].length);
for (int p : partitionMap[i]) {
partitions.add(p);
}
nodes.add(new Node(i, "localhost", 8080 + i, 6666 + i, 7000 + i, partitions));
}
Cluster cluster = new Cluster("test", nodes);
StoreDefinition storeDef = new StoreDefinitionBuilder().setName("test").setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(keySerDef).setValueSerializer(valueSerDef).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(repFactor).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
RoutingStrategy router = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, cluster);
// build store files in outputDir
File outputDir = TestUtils.createTempDir(baseDir);
JsonStoreBuilder storeBuilder = new JsonStoreBuilder(reader, cluster, storeDef, router, outputDir, null, testSize / 5, 1, 2, 10000, false);
storeBuilder.build(type);
File nodeDir = TestUtils.createTempDir(baseDir);
@SuppressWarnings("unchecked") Serializer<String> keySerializer = (Serializer<String>) new DefaultSerializerFactory().getSerializer(keySerDef);
@SuppressWarnings("unchecked") Serializer<String> valueSerializer = (Serializer<String>) new DefaultSerializerFactory().getSerializer(valueSerDef);
Serializer<String> transSerializer = new StringSerializer();
Map<Integer, Store<String, String, String>> nodeStores = Maps.newHashMap();
Map<Integer, ReadOnlyStorageEngine> readOnlyStores = Maps.newHashMap();
for (int i = 0; i < numNodes; i++) {
File currNode = new File(nodeDir, Integer.toString(i));
currNode.mkdirs();
currNode.deleteOnExit();
Utils.move(new File(outputDir, "node-" + Integer.toString(i)), new File(currNode, "version-0"));
CompressionStrategyFactory compressionStrategyFactory = new CompressionStrategyFactory();
CompressionStrategy keyCompressionStrat = compressionStrategyFactory.get(keySerDef.getCompression());
CompressionStrategy valueCompressionStrat = compressionStrategyFactory.get(valueSerDef.getCompression());
ReadOnlyStorageEngine readOnlyStorageEngine = new ReadOnlyStorageEngine("test", strategy, router, i, currNode, 1);
readOnlyStores.put(i, readOnlyStorageEngine);
Store<ByteArray, byte[], byte[]> innerStore = new CompressingStore(readOnlyStorageEngine, keyCompressionStrat, valueCompressionStrat);
nodeStores.put(i, SerializingStore.wrap(innerStore, keySerializer, valueSerializer, transSerializer));
}
return new ReadOnlyStorageEngineTestInstance(data, baseDir, readOnlyStores, nodeStores, router, keySerializer);
}
use of voldemort.cluster.Cluster in project voldemort by voldemort.
the class RebootstrappingStoreTest method rebalance.
public void rebalance() {
assert servers != null && servers.size() > 1;
VoldemortConfig config = servers.get(0).getVoldemortConfig();
AdminClient adminClient = AdminClient.createTempAdminClient(config, cluster, 4);
List<Integer> partitionIds = ImmutableList.of(0, 1);
int req = adminClient.storeMntOps.migratePartitions(0, 1, STORE_NAME, partitionIds, null, null);
adminClient.rpcOps.waitForCompletion(1, req, 5, TimeUnit.SECONDS);
Versioned<Cluster> versionedCluster = adminClient.metadataMgmtOps.getRemoteCluster(0);
Node node0 = versionedCluster.getValue().getNodeById(0);
Node node1 = versionedCluster.getValue().getNodeById(1);
Node newNode0 = new Node(node0.getId(), node0.getHost(), node0.getHttpPort(), node0.getSocketPort(), node0.getAdminPort(), ImmutableList.<Integer>of());
Node newNode1 = new Node(node1.getId(), node1.getHost(), node1.getHttpPort(), node1.getSocketPort(), node1.getAdminPort(), ImmutableList.of(0, 1));
long deleted = adminClient.storeMntOps.deletePartitions(0, STORE_NAME, ImmutableList.of(0, 1), null);
assert deleted > 0;
Cluster newCluster = new Cluster(cluster.getName(), ImmutableList.of(newNode0, newNode1), Lists.newArrayList(cluster.getZones()));
for (Node node : cluster.getNodes()) {
VectorClock clock = (VectorClock) versionedCluster.getVersion();
clock.incrementVersion(node.getId(), System.currentTimeMillis());
adminClient.metadataMgmtOps.updateRemoteCluster(node.getId(), newCluster, clock);
}
}
use of voldemort.cluster.Cluster in project voldemort by voldemort.
the class InvalidMetadataCheckingStoreTest method testAddingPartition.
/**
* NOTE: the total number of partitions should remain same for hash
* consistency
*/
public void testAddingPartition() {
StoreDefinition storeDef = ServerTestUtils.getStoreDefs(1).get(0);
Cluster cluster = ServerTestUtils.getLocalCluster(3, new int[][] { { 0, 1, 2, 3 }, { 4, 5, 6, 7 }, { 8, 9, 10 } });
MetadataStore metadata = ServerTestUtils.createMetadataStore(cluster, Arrays.asList(storeDef));
InvalidMetadataCheckingStore store = new InvalidMetadataCheckingStore(0, new DoNothingStore<ByteArray, byte[], byte[]>(storeDef.getName()), metadata);
try {
// add partitions to node 0 on client side.
Cluster updatedCluster = ServerTestUtils.getLocalCluster(3, new int[][] { { 0, 1, 2, 3, 4, 5, 10 }, { 6, 7 }, { 8, 9 } });
MetadataStore updatedMetadata = ServerTestUtils.createMetadataStore(updatedCluster, Arrays.asList(storeDef));
doOperations(0, store, updatedMetadata, storeDef);
fail("Should see InvalidMetadataExceptions");
} catch (InvalidMetadataException e) {
// ignore
}
}
use of voldemort.cluster.Cluster in project voldemort by voldemort.
the class InvalidMetadataCheckingStoreTest method testRemovingPartition.
public void testRemovingPartition() {
StoreDefinition storeDef = ServerTestUtils.getStoreDefs(1).get(0);
Cluster cluster = ServerTestUtils.getLocalCluster(3, new int[][] { { 0, 1, 2, 3 }, { 4, 5, 6, 7 }, { 8, 9, 10 } });
MetadataStore metadata = ServerTestUtils.createMetadataStore(cluster, Arrays.asList(storeDef));
InvalidMetadataCheckingStore store = new InvalidMetadataCheckingStore(0, new DoNothingStore<ByteArray, byte[], byte[]>(storeDef.getName()), metadata);
try {
// remove partitions to node 0 on client side.
Cluster updatedCluster = ServerTestUtils.getLocalCluster(3, new int[][] { { 0, 1 }, { 2, 4, 5, 6, 7 }, { 3, 8, 9, 10 } });
MetadataStore updatedMetadata = ServerTestUtils.createMetadataStore(updatedCluster, Arrays.asList(storeDef));
doOperations(0, store, updatedMetadata, storeDef);
} catch (InvalidMetadataException e) {
throw new RuntimeException("Should not see any InvalidMetaDataException", e);
}
}
use of voldemort.cluster.Cluster in project voldemort by voldemort.
the class ChunkedFileSetTest method getTempStrategy.
private RoutingStrategy getTempStrategy() {
List<Node> nodes = new ArrayList<Node>();
for (int i = 0; i < 1; i++) {
nodes.add(new Node(i, "localhost", 8080 + i, 6666 + i, 7000 + i, Arrays.asList(0)));
}
Cluster cluster = new Cluster("test", nodes);
RoutingStrategy router = new ConsistentRoutingStrategy(cluster, 1);
return router;
}
Aggregations