use of voldemort.store.metadata.MetadataStore in project voldemort by voldemort.
the class InvalidMetadataCheckingStoreTest method testValidMetaData.
public void testValidMetaData() {
Cluster cluster = ServerTestUtils.getLocalCluster(3, new int[][] { { 0, 1, 2, 3 }, { 4, 5, 6, 7 }, { 8, 9, 10, 11 } });
StoreDefinition storeDef = ServerTestUtils.getStoreDefs(1).get(0);
MetadataStore metadata = ServerTestUtils.createMetadataStore(cluster, Arrays.asList(storeDef));
InvalidMetadataCheckingStore store = new InvalidMetadataCheckingStore(0, new DoNothingStore<ByteArray, byte[], byte[]>(storeDef.getName()), metadata);
try {
doOperations(0, store, metadata, storeDef);
} catch (InvalidMetadataException e) {
throw new RuntimeException("Should not see any InvalidMetaDataException", e);
}
}
use of voldemort.store.metadata.MetadataStore in project voldemort by voldemort.
the class AdminRebalanceTest method testRebalanceNodeRO.
@Test(timeout = 60000)
public void testRebalanceNodeRO() throws IOException {
try {
startFourNodeRO();
int numChunks = 5;
for (StoreDefinition storeDef : Lists.newArrayList(storeDef1, storeDef2)) {
buildROStore(storeDef, numChunks);
}
// Set into rebalancing state
for (RebalanceTaskInfo partitionPlan : plans) {
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.SERVER_STATE_KEY, MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER);
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_STEAL_INFO, new RebalancerState(Lists.newArrayList(RebalanceTaskInfo.create(partitionPlan.toJsonString()))));
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML, partitionPlan.getInitialCluster());
}
// Actually run it
try {
for (RebalanceTaskInfo currentPlan : plans) {
int asyncId = adminClient.rebalanceOps.rebalanceNode(currentPlan);
assertNotSame("Got a valid rebalanceAsyncId", -1, asyncId);
getAdminClient().rpcOps.waitForCompletion(currentPlan.getStealerId(), asyncId, 300, TimeUnit.SECONDS);
// Test that plan has been removed from the list
assertFalse(getServer(currentPlan.getStealerId()).getMetadataStore().getRebalancerState().getAll().contains(currentPlan));
}
} catch (Exception e) {
e.printStackTrace();
fail("Should not throw any exceptions");
}
// Check if files have been copied
for (StoreDefinition storeDef : Lists.newArrayList(storeDef1, storeDef2)) {
String storeName = storeDef.getName();
for (RebalanceTaskInfo currentPlan : plans) {
MetadataStore metadataStore = getServer(currentPlan.getStealerId()).getMetadataStore();
int nodeId = metadataStore.getNodeId();
int zoneId = metadataStore.getCluster().getNodeById(nodeId).getZoneId();
StoreRoutingPlan storeRoutingPlan = new StoreRoutingPlan(metadataStore.getCluster(), storeDef);
File currentDir = new File(((ReadOnlyStorageEngine) getStore(currentPlan.getStealerId(), storeName)).getCurrentDirPath());
if (currentPlan.getPartitionStores().contains(storeDef.getName())) {
for (Integer partitionId : currentPlan.getStoreToPartitionIds().get(storeName)) {
int zoneNary = -1;
// it means we don't want to consider that partition.
try {
zoneNary = storeRoutingPlan.getZoneNaryForNodesPartition(zoneId, nodeId, partitionId);
} catch (VoldemortException ve) {
continue;
}
if (zoneNary < storeDef.getReplicationFactor()) {
for (int chunkId = 0; chunkId < numChunks; chunkId++) {
assertTrue(new File(currentDir, partitionId + "_" + zoneNary + "_" + chunkId + ".data").exists());
assertTrue(new File(currentDir, partitionId + "_" + zoneNary + "_" + chunkId + ".index").exists());
}
}
}
}
}
}
// All servers should be back to normal state
for (VoldemortServer server : servers) {
assertEquals(server.getMetadataStore().getRebalancerState(), new RebalancerState(new ArrayList<RebalanceTaskInfo>()));
assertEquals(server.getMetadataStore().getServerStateUnlocked(), MetadataStore.VoldemortState.NORMAL_SERVER);
}
// Test the "cluster + swap" changes
// Test 1) Fail some swap by adding a dummy store
servers[2].getMetadataStore().put(MetadataStore.STORES_KEY, Lists.newArrayList(storeDef1, storeDef2, new StoreDefinitionBuilder().setName("test3").setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(2).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build()));
try {
adminClient.rebalanceOps.rebalanceStateChange(currentCluster, finalCluster, servers[2].getMetadataStore().getStoreDefList(), servers[2].getMetadataStore().getStoreDefList(), plans, true, true, false, true, true);
fail("Should have thrown an exception since one node doesn't have the store");
} catch (VoldemortException e) {
}
servers[2].getMetadataStore().put(MetadataStore.STORES_KEY, Lists.newArrayList(storeDef1, storeDef2));
// Test that all servers are still using the old cluster and have
// swapped successfully
checkRO(currentCluster);
// Test 2) All passes scenario
adminClient.rebalanceOps.rebalanceStateChange(currentCluster, finalCluster, servers[2].getMetadataStore().getStoreDefList(), servers[2].getMetadataStore().getStoreDefList(), plans, true, true, false, true, true);
checkRO(finalCluster);
// mmap-ed. Should fail...
for (RebalanceTaskInfo partitionPlan : plans) {
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.SERVER_STATE_KEY, MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER);
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_STEAL_INFO, new RebalancerState(Lists.newArrayList(RebalanceTaskInfo.create(partitionPlan.toJsonString()))));
}
// Actually run it
try {
int asyncId = adminClient.rebalanceOps.rebalanceNode(plans.get(0));
getAdminClient().rpcOps.waitForCompletion(plans.get(0).getStealerId(), asyncId, 300, TimeUnit.SECONDS);
fail("Should throw an exception");
} catch (Exception e) {
}
} finally {
shutDown();
}
}
Aggregations