use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class AdminRebalanceTest method testRebalanceNodeRORW.
@Test(timeout = 60000)
public void testRebalanceNodeRORW() throws IOException, InterruptedException {
try {
startFourNodeRORW();
int numChunks = 5;
for (StoreDefinition storeDef : Lists.newArrayList(storeDef1, storeDef2)) {
buildROStore(storeDef, numChunks);
}
// Set into rebalancing state
for (RebalanceTaskInfo partitionPlan : plans) {
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.SERVER_STATE_KEY, MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER);
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_STEAL_INFO, new RebalancerState(Lists.newArrayList(RebalanceTaskInfo.create(partitionPlan.toJsonString()))));
getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML, partitionPlan.getInitialCluster());
}
// Actually run it
try {
for (RebalanceTaskInfo currentPlan : plans) {
int asyncId = adminClient.rebalanceOps.rebalanceNode(currentPlan);
assertNotSame("Got a valid rebalanceAsyncId", -1, asyncId);
getAdminClient().rpcOps.waitForCompletion(currentPlan.getStealerId(), asyncId, 300, TimeUnit.SECONDS);
// Test that plan has been removed from the list
assertFalse(getServer(currentPlan.getStealerId()).getMetadataStore().getRebalancerState().getAll().contains(currentPlan));
}
} catch (Exception e) {
e.printStackTrace();
fail("Should not throw any exceptions");
}
// Test 1) Change one of the rebalance partitions info to force a
// failure
servers[3].getMetadataStore().getRebalancerState().update(new RebalanceTaskInfo(3, 0, new HashMap<String, List<Integer>>(), currentCluster));
try {
adminClient.rebalanceOps.rebalanceStateChange(currentCluster, finalCluster, servers[2].getMetadataStore().getStoreDefList(), servers[2].getMetadataStore().getStoreDefList(), plans, true, true, true, true, true);
fail("Should have thrown an exception since we added state before hand");
} catch (VoldemortRebalancingException e) {
}
// except node 3
for (VoldemortServer server : servers) {
if (server.getMetadataStore().getNodeId() != 3) {
assertEquals(server.getMetadataStore().getRebalancerState(), new RebalancerState(new ArrayList<RebalanceTaskInfo>()));
assertEquals(server.getMetadataStore().getServerStateUnlocked(), MetadataStore.VoldemortState.NORMAL_SERVER);
}
assertEquals(server.getMetadataStore().getCluster(), currentCluster);
}
checkRO(currentCluster);
// Clean-up everything
cleanUpAllState();
// Test 2 ) Add another store to trigger a failure
servers[2].getMetadataStore().put(MetadataStore.STORES_KEY, Lists.newArrayList(storeDef1, storeDef2, storeDef3, storeDef4, new StoreDefinitionBuilder().setName("test5").setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(2).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build()));
try {
adminClient.rebalanceOps.rebalanceStateChange(currentCluster, finalCluster, servers[2].getMetadataStore().getStoreDefList(), servers[2].getMetadataStore().getStoreDefList(), plans, true, true, true, true, true);
fail("Should have thrown an exception since we added state before hand");
} catch (VoldemortRebalancingException e) {
}
Thread.sleep(1000);
for (VoldemortServer server : servers) {
assertEquals(server.getMetadataStore().getRebalancerState(), new RebalancerState(new ArrayList<RebalanceTaskInfo>()));
assertEquals(server.getMetadataStore().getServerStateUnlocked(), MetadataStore.VoldemortState.NORMAL_SERVER);
assertEquals(server.getMetadataStore().getCluster(), currentCluster);
}
checkRO(currentCluster);
// Clean-up everything
cleanUpAllState();
// Put back server 2 back to normal state
servers[2].getMetadataStore().put(MetadataStore.STORES_KEY, Lists.newArrayList(storeDef1, storeDef2, storeDef3, storeDef4));
// Test 3) Everything should work
adminClient.rebalanceOps.rebalanceStateChange(currentCluster, finalCluster, servers[2].getMetadataStore().getStoreDefList(), servers[2].getMetadataStore().getStoreDefList(), plans, true, true, true, true, true);
List<Integer> nodesChecked = Lists.newArrayList();
for (RebalanceTaskInfo plan : plans) {
nodesChecked.add(plan.getStealerId());
assertEquals(servers[plan.getStealerId()].getMetadataStore().getRebalancerState(), new RebalancerState(Lists.newArrayList(plan)));
assertEquals(servers[plan.getStealerId()].getMetadataStore().getServerStateUnlocked(), MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER);
assertEquals(servers[plan.getStealerId()].getMetadataStore().getCluster(), finalCluster);
}
List<Integer> allNodes = Lists.newArrayList(Utils.nodeListToNodeIdList(Lists.newArrayList(currentCluster.getNodes())));
allNodes.removeAll(nodesChecked);
// Check all other nodes
for (int nodeId : allNodes) {
assertEquals(servers[nodeId].getMetadataStore().getRebalancerState(), new RebalancerState(new ArrayList<RebalanceTaskInfo>()));
assertEquals(servers[nodeId].getMetadataStore().getServerStateUnlocked(), MetadataStore.VoldemortState.NORMAL_SERVER);
assertEquals(servers[nodeId].getMetadataStore().getCluster(), finalCluster);
}
checkRO(finalCluster);
} finally {
shutDown();
}
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class StoreRoutingPlanTest method setupNonContiguous.
@Before
public void setupNonContiguous() {
Cluster z1z3Current = ClusterTestUtils.getZ1Z3ClusterWithNonContiguousNodeIds();
HashMap<Integer, Integer> zoneRep211 = new HashMap<Integer, Integer>();
zoneRep211.put(1, 2);
zoneRep211.put(3, 2);
StoreDefinition z1z3211StoreDef = new StoreDefinitionBuilder().setName("z1z3211").setType(BdbStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.ZONE_STRATEGY).setReplicationFactor(4).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).setZoneCountReads(0).setZoneCountWrites(0).setZoneReplicationFactor(zoneRep211).setHintedHandoffStrategy(HintedHandoffStrategyType.PROXIMITY_STRATEGY).build();
z1z3BaseRoutingPlan = new BaseStoreRoutingPlan(z1z3Current, z1z3211StoreDef);
z1z3StoreRoutingPlan = new StoreRoutingPlan(z1z3Current, z1z3211StoreDef);
// 3 zones
Cluster z1z3z5Current = ClusterTestUtils.getZ1Z3Z5ClusterWithNonContiguousNodeIds();
HashMap<Integer, Integer> zoneRep3zones211 = new HashMap<Integer, Integer>();
zoneRep3zones211.put(1, 2);
zoneRep3zones211.put(3, 2);
zoneRep3zones211.put(5, 2);
StoreDefinition z1z3z5211StoreDef = new StoreDefinitionBuilder().setName("z1z3z5211").setType(BdbStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.ZONE_STRATEGY).setReplicationFactor(6).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).setZoneCountReads(0).setZoneCountWrites(0).setZoneReplicationFactor(zoneRep3zones211).setHintedHandoffStrategy(HintedHandoffStrategyType.PROXIMITY_STRATEGY).build();
z1z3z5BaseRoutingPlan = new BaseStoreRoutingPlan(z1z3z5Current, z1z3z5211StoreDef);
z1z3z5StoreRoutingPlan = new StoreRoutingPlan(z1z3z5Current, z1z3z5211StoreDef);
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class MetadataStore method validate.
@SuppressWarnings("unchecked")
public void validate(ByteArray keyBytes, Versioned<byte[]> valueBytes, byte[] transforms) throws VoldemortException {
String key = ByteUtils.getString(keyBytes.get(), "UTF-8");
Versioned<String> value = new Versioned<String>(ByteUtils.getString(valueBytes.getValue(), "UTF-8"), valueBytes.getVersion());
Versioned<Object> valueObject = convertStringToObject(key, value);
if (key.equals(MetadataStore.STORES_KEY)) {
List<StoreDefinition> storeDefinitions = (List<StoreDefinition>) valueObject.getValue();
Set<String> existingStores = new HashSet<String>(this.storeNames);
Set<String> specifiedStoreNames = new HashSet<String>();
for (StoreDefinition storeDef : storeDefinitions) {
String storeName = storeDef.getName();
if (specifiedStoreNames.contains(storeName)) {
throw new VoldemortException(" Duplicate store names in Stores.xml for storeName " + storeName);
}
specifiedStoreNames.add(storeName);
}
existingStores.removeAll(specifiedStoreNames);
// adding it back to allow again.
if (existingStores.size() > 0) {
logger.warn(" Set metadata does not support store deletion. This will leave the store in an " + "inconsistent state. Stores (Inconsistent) missing in set metadata " + Arrays.toString(existingStores.toArray()));
}
specifiedStoreNames.removeAll(this.storeNames);
if (specifiedStoreNames.size() > 0) {
logger.warn(" Set metadata does not support store addition . This will leave the store in an " + "inconsistent state. Stores (Inconsistent) added in set metadata " + Arrays.toString(specifiedStoreNames.toArray()));
}
}
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class MetadataStore method createInMemoryMetadataStore.
public static MetadataStore createInMemoryMetadataStore(Store<String, String, String> innerStore, int nodeId) {
StorageEngine<String, String, String> storesRepo = new InMemoryStorageEngine<String, String, String>("stores-repo");
List<Versioned<String>> versionedStoreList = innerStore.get(STORES_KEY, "");
if (versionedStoreList != null && versionedStoreList.size() > 0) {
String stores = versionedStoreList.get(0).getValue();
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
List<StoreDefinition> storeDefinitions = mapper.readStoreList(new StringReader(stores));
for (StoreDefinition storeDef : storeDefinitions) {
Versioned<String> versionedStoreValue = new Versioned<String>(mapper.writeStore(storeDef));
storesRepo.put(storeDef.getName(), versionedStoreValue, null);
}
}
MetadataStore store = new MetadataStore(innerStore, storesRepo);
store.initNodeId(nodeId);
return store;
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class MetadataStore method updateStoreDefinitions.
/**
* Function to update store definitions. Unlike the put method, this
* function does not delete any existing state. It only updates the state of
* the stores specified in the given stores.xml
*
* @param valueBytes specifies the bytes of the stores.xml containing
* updates for the specified stores
*/
@SuppressWarnings("unchecked")
public void updateStoreDefinitions(Versioned<byte[]> valueBytes) {
// acquire write lock
writeLock.lock();
try {
Versioned<String> value = new Versioned<String>(ByteUtils.getString(valueBytes.getValue(), "UTF-8"), valueBytes.getVersion());
Versioned<Object> valueObject = convertStringToObject(STORES_KEY, value);
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
List<StoreDefinition> storeDefinitions = (List<StoreDefinition>) valueObject.getValue();
// Check for backwards compatibility
StoreDefinitionUtils.validateSchemasAsNeeded(storeDefinitions);
StoreDefinitionUtils.validateNewStoreDefsAreNonBreaking(getStoreDefList(), storeDefinitions);
// Go through each store definition and do a corresponding put
for (StoreDefinition storeDef : storeDefinitions) {
if (!this.storeNames.contains(storeDef.getName())) {
throw new VoldemortException("Cannot update a store which does not exist !");
}
String storeDefStr = mapper.writeStore(storeDef);
Versioned<String> versionedValueStr = new Versioned<String>(storeDefStr, value.getVersion());
this.storeDefinitionsStorageEngine.put(storeDef.getName(), versionedValueStr, "");
// Update the metadata cache
this.metadataCache.put(storeDef.getName(), new Versioned<Object>(storeDefStr, value.getVersion()));
}
// Re-initialize the store definitions
initStoreDefinitions(value.getVersion());
// Update routing strategies
// TODO: Make this more fine grained.. i.e only update listeners for
// a specific store.
updateRoutingStrategies(getCluster(), getStoreDefList());
} finally {
writeLock.unlock();
}
}
Aggregations