use of voldemort.client.protocol.admin.AdminClient in project voldemort by voldemort.
the class ReplaceNodeTest method verifyNewNodePartOfCluster.
private void verifyNewNodePartOfCluster(Node replacementNode) {
// Verify if new node is part of the new cluster.
Cluster cluster = new AdminClient(originalBootstrapUrl).getAdminClientCluster();
boolean isNewNodePresent = false;
for (Node curNode : cluster.getNodes()) {
if (curNode.isEqualState(replacementNode)) {
isNewNodePresent = true;
break;
}
}
assertTrue(isNewNodePresent);
}
use of voldemort.client.protocol.admin.AdminClient in project voldemort by voldemort.
the class AdminServiceBasicTest method testIsClusterModified.
@Test
public void testIsClusterModified() {
AdminClient client = getAdminClient();
assertFalse("Newly Created admin client has valid cluster", client.isClusterModified());
String clusterXml = new ClusterMapper().writeCluster(cluster);
client.metadataMgmtOps.updateRemoteMetadata(cluster.getNodeIds(), MetadataStore.CLUSTER_KEY, clusterXml);
assertTrue("After cluster update", client.isClusterModified());
}
use of voldemort.client.protocol.admin.AdminClient in project voldemort by voldemort.
the class AdminServiceBasicTest method testFetchAndUpdateStoresMetadata.
@Test
public void testFetchAndUpdateStoresMetadata() {
AdminClient client = getAdminClient();
int nodeId = 0;
String storeNameToBeUpdated = "users";
doClientOperation();
// Fetch the original list of stores
Versioned<List<StoreDefinition>> originalStoreDefinitions = client.metadataMgmtOps.getRemoteStoreDefList(nodeId);
List<StoreDefinition> updatedStoreDefList = new ArrayList<StoreDefinition>();
StoreDefinition oldDefinition = getStoreDefinitionFromList(originalStoreDefinitions.getValue(), storeNameToBeUpdated);
// Create an updated store definition for store: 'users'
StoreDefinition newDefinition = new StoreDefinitionBuilder().setName(storeNameToBeUpdated).setType(oldDefinition.getType()).setKeySerializer(oldDefinition.getKeySerializer()).setValueSerializer(oldDefinition.getValueSerializer()).setRoutingPolicy(oldDefinition.getRoutingPolicy()).setRoutingStrategyType(oldDefinition.getRoutingStrategyType()).setReplicationFactor(2).setPreferredReads(2).setRequiredReads(1).setPreferredWrites(2).setRequiredWrites(2).build();
updatedStoreDefList.add(newDefinition);
// Update the 'users' store
client.metadataMgmtOps.fetchAndUpdateRemoteStore(nodeId, updatedStoreDefList);
// Fetch the stores list again and check that the 'users' store has been
// updated
Versioned<List<StoreDefinition>> newStoreDefinitions = client.metadataMgmtOps.getRemoteStoreDefList(nodeId);
assertFalse(originalStoreDefinitions.getValue().equals(newStoreDefinitions.getValue()));
for (StoreDefinition def : newStoreDefinitions.getValue()) {
if (def.getName().equalsIgnoreCase(storeNameToBeUpdated)) {
assertTrue(def.equals(newDefinition));
}
}
// Restore the old set of store definitions
client.metadataMgmtOps.updateRemoteStoreDefList(nodeId, originalStoreDefinitions.getValue());
doClientOperation();
}
use of voldemort.client.protocol.admin.AdminClient in project voldemort by voldemort.
the class AtomicSetMetadataPairTest method testClusterAndStoresAreSetAtomically.
/**
* Bug fix: The old approach tried to test store metadata update by
* replacing an existing stores.xml with a completely different stores.xml.
* This has been fixed such that, the new stores.xml is the same as the
* original, except for required replication factor = 2.
*/
@Test
public void testClusterAndStoresAreSetAtomically() {
try {
AdminClient adminClient = new AdminClient(bootStrapUrls[0]);
StoreDefinitionsMapper storeDefsMapper = new StoreDefinitionsMapper();
List<StoreDefinition> storeDefs = storeDefsMapper.readStoreList(new File(newStoresXmlfile));
ClusterMapper clusterMapper = new ClusterMapper();
for (Node node : oldCluster.getNodes()) {
VoldemortAdminTool.executeSetMetadataPair(node.getId(), adminClient, CLUSTER_KEY, clusterMapper.writeCluster(newCluster), STORES_KEY, storeDefsMapper.writeStoreList(storeDefs));
}
String dirPath = TestUtils.createTempDir().getAbsolutePath();
for (Node node : newCluster.getNodes()) {
VoldemortAdminTool.executeGetMetadata(node.getId(), adminClient, CLUSTER_KEY, dirPath);
// Make sure cluster metadata was updated
Cluster newClusterFromMetadataRepo = clusterMapper.readCluster(new File(dirPath, CLUSTER_KEY + "_" + node.getId()));
// All nodes should have this old list
assertTrue(oldCluster.getNodeById(5).getPartitionIds().equals(oldPartitionIds));
// As per the new metadata node 5 should have this list
assertTrue(newClusterFromMetadataRepo.getNodeById(5).getPartitionIds().equals(newPartitionIds));
// Make sure store metadata was updated
VoldemortAdminTool.executeGetMetadata(node.getId(), adminClient, STORES_KEY, dirPath);
List<StoreDefinition> newStoreDefsFromMetadatRepo = storeDefsMapper.readStoreList(new File(dirPath, STORES_KEY + "_" + node.getId()));
// Check that the required replication factor has been updated
assertTrue(newStoreDefsFromMetadatRepo.get(1).getRequiredReads() == 2);
assertTrue(newStoreDefsFromMetadatRepo.get(1).getRequiredWrites() == 2);
}
} catch (Exception e) {
fail("Error in validating end to end client rebootstrap : " + e);
}
}
use of voldemort.client.protocol.admin.AdminClient in project voldemort by voldemort.
the class AbstractNonZonedRebalanceTest method testProxyPutDuringRebalancing.
@Test(timeout = 600000)
public void testProxyPutDuringRebalancing() throws Exception {
logger.info("Starting testProxyPutDuringRebalancing");
try {
Cluster currentCluster = ServerTestUtils.getLocalCluster(3, new int[][] { { 0 }, { 1, 3 }, { 2 } });
Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 2, Lists.newArrayList(3));
// start servers 0,1,2 only
final List<Integer> serverList = Arrays.asList(0, 1, 2);
Map<String, String> configProps = new HashMap<String, String>();
configProps.put("admin.max.threads", "5");
final Cluster updatedCurrentCluster = startServers(currentCluster, rwStoreDefFileWithReplication, serverList, configProps);
ExecutorService executors = Executors.newFixedThreadPool(2);
final AtomicBoolean rebalancingComplete = new AtomicBoolean(false);
final List<Exception> exceptions = Collections.synchronizedList(new ArrayList<Exception>());
// Its is imperative that we test in a single shot since multiple
// batches would mean the proxy bridges being torn down and
// established multiple times and we cannot test against the source
// cluster topology then.
String bootstrapUrl = getBootstrapUrl(currentCluster, 0);
int maxParallel = 2;
final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, maxParallel, finalCluster);
populateData(updatedCurrentCluster, rwStoreDefWithReplication, rebalanceKit.controller.getAdminClient(), false);
final AdminClient adminClient = rebalanceKit.controller.getAdminClient();
// the plan would cause these partitions to move
// Partition : Donor -> Stealer
// p2 (SEC) : s1 -> s0
// p3 (PRI) : s1 -> s2
final List<ByteArray> movingKeysList = sampleKeysFromPartition(adminClient, 1, rwStoreDefWithReplication.getName(), Arrays.asList(2, 3), 20);
assertTrue("Empty list of moving keys...", movingKeysList.size() > 0);
final AtomicBoolean rebalancingStarted = new AtomicBoolean(false);
final AtomicBoolean proxyWritesDone = new AtomicBoolean(false);
final HashMap<String, String> baselineTuples = new HashMap<String, String>(testEntries);
final HashMap<String, VectorClock> baselineVersions = new HashMap<String, VectorClock>();
for (String key : baselineTuples.keySet()) {
baselineVersions.put(key, new VectorClock());
}
final CountDownLatch latch = new CountDownLatch(2);
// start get operation.
executors.execute(new Runnable() {
@Override
public void run() {
SocketStoreClientFactory factory = null;
try {
// wait for the rebalancing to begin.
List<VoldemortServer> serverList = Lists.newArrayList(serverMap.get(0), serverMap.get(2));
while (!rebalancingComplete.get()) {
Iterator<VoldemortServer> serverIterator = serverList.iterator();
while (serverIterator.hasNext()) {
VoldemortServer server = serverIterator.next();
if (ByteUtils.getString(server.getMetadataStore().get(MetadataStore.SERVER_STATE_KEY, null).get(0).getValue(), "UTF-8").compareTo(VoldemortState.REBALANCING_MASTER_SERVER.toString()) == 0) {
logger.info("Server " + server.getIdentityNode().getId() + " transitioned into REBALANCING MODE");
serverIterator.remove();
}
}
if (serverList.size() == 0) {
rebalancingStarted.set(true);
break;
}
}
if (!rebalancingComplete.get()) {
factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(getBootstrapUrl(updatedCurrentCluster, 0)).setEnableLazy(false).setSocketTimeout(120, TimeUnit.SECONDS));
final StoreClient<String, String> storeClientRW = new DefaultStoreClient<String, String>(testStoreNameRW, null, factory, 3);
// zero vector clock
for (ByteArray movingKey : movingKeysList) {
try {
if (rebalancingComplete.get()) {
break;
}
String keyStr = ByteUtils.getString(movingKey.get(), "UTF-8");
String valStr = "proxy_write";
storeClientRW.put(keyStr, valStr);
baselineTuples.put(keyStr, valStr);
// all these keys will have [2:1] vector
// clock
// is node 2 is the pseudo master in both
// moves
baselineVersions.get(keyStr).incrementVersion(2, System.currentTimeMillis());
proxyWritesDone.set(true);
} catch (InvalidMetadataException e) {
// let this go
logger.error("Encountered an invalid metadata exception.. ", e);
}
}
}
} catch (Exception e) {
logger.error("Exception in proxy put thread", e);
exceptions.add(e);
} finally {
if (factory != null)
factory.close();
latch.countDown();
}
}
});
executors.execute(new Runnable() {
@Override
public void run() {
try {
rebalanceKit.rebalance();
} catch (Exception e) {
logger.error("Error in rebalancing... ", e);
exceptions.add(e);
} finally {
rebalancingComplete.set(true);
latch.countDown();
}
}
});
latch.await();
executors.shutdown();
executors.awaitTermination(300, TimeUnit.SECONDS);
assertEquals("Client did not see all server transition into rebalancing state", rebalancingStarted.get(), true);
assertEquals("Not enough time to begin proxy writing", proxyWritesDone.get(), true);
checkEntriesPostRebalance(updatedCurrentCluster, finalCluster, Lists.newArrayList(rwStoreDefWithReplication), Arrays.asList(0, 1, 2), baselineTuples, baselineVersions);
checkConsistentMetadata(finalCluster, serverList);
// check No Exception
if (exceptions.size() > 0) {
for (Exception e : exceptions) {
e.printStackTrace();
}
fail("Should not see any exceptions.");
}
// check that the proxy writes were made to the original donor, node
// 1
List<ClockEntry> clockEntries = new ArrayList<ClockEntry>(serverList.size());
for (Integer nodeid : serverList) clockEntries.add(new ClockEntry(nodeid.shortValue(), System.currentTimeMillis()));
VectorClock clusterXmlClock = new VectorClock(clockEntries, System.currentTimeMillis());
for (Integer nodeid : serverList) adminClient.metadataMgmtOps.updateRemoteCluster(nodeid, currentCluster, clusterXmlClock);
adminClient.setAdminClientCluster(currentCluster);
checkForTupleEquivalence(adminClient, 1, testStoreNameRW, movingKeysList, baselineTuples, baselineVersions);
// stop servers
try {
stopServer(serverList);
} catch (Exception e) {
throw new RuntimeException(e);
}
} catch (AssertionError ae) {
logger.error("Assertion broken in testProxyPutDuringRebalancing ", ae);
throw ae;
}
}
Aggregations