use of voldemort.client.ClientConfig in project voldemort by voldemort.
the class NioStatsJmxTest method setUp.
@Before
public void setUp() throws Exception {
String storesXmlfile = "test/common/voldemort/config/single-store.xml";
ClientConfig clientConfig = new ClientConfig().setMaxConnectionsPerNode(1).setMaxThreads(1);
SocketStoreFactory socketStoreFactory = new ClientRequestExecutorPool(clientConfig.getSelectors(), clientConfig.getMaxConnectionsPerNode(), clientConfig.getConnectionTimeout(TimeUnit.MILLISECONDS), clientConfig.getSocketTimeout(TimeUnit.MILLISECONDS), clientConfig.getSocketBufferSize(), clientConfig.getSocketKeepAlive());
Properties props = new Properties();
props.put("jmx.enable", "true");
int numServers = 1;
VoldemortServer[] servers = new VoldemortServer[numServers];
Cluster cluster = ServerTestUtils.startVoldemortCluster(numServers, servers, null, socketStoreFactory, true, null, storesXmlfile, props);
server = servers[0];
for (Node node : cluster.getNodes()) {
socketStore = ServerTestUtils.getSocketStore(socketStoreFactory, "test", node.getSocketPort(), clientConfig.getRequestFormatType());
}
}
use of voldemort.client.ClientConfig in project voldemort by voldemort.
the class EndToEndTest method setUp.
@Before
public void setUp() throws IOException {
int numServers = 2;
VoldemortServer[] servers = new VoldemortServer[numServers];
int[][] partitionMap = { { 0, 2, 4, 6 }, { 1, 3, 5, 7 } };
Cluster cluster = ServerTestUtils.startVoldemortCluster(numServers, servers, partitionMap, socketStoreFactory, useNio, null, STORES_XML, new Properties());
Node node = cluster.getNodeById(0);
String bootstrapUrl = "tcp://" + node.getHost() + ":" + node.getSocketPort();
StoreClientFactory storeClientFactory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(bootstrapUrl));
storeClient = storeClientFactory.getStoreClient(STORE_NAME);
}
use of voldemort.client.ClientConfig in project voldemort by voldemort.
the class VoldemortRollbackJob method run.
@Override
public void run() throws Exception {
// Go over every cluster and rollback one store at a time
for (String clusterUrl : clusterUrls) {
AdminClient adminClient = null;
ExecutorService service = null;
try {
service = Executors.newCachedThreadPool();
ClientConfig config = new ClientConfig().setBootstrapUrls(clusterUrl);
AdminClientConfig adminConfig = new AdminClientConfig().setAdminSocketTimeoutSec(60);
adminClient = new AdminClient(adminConfig, config);
Cluster cluster = adminClient.getAdminClientCluster();
AdminStoreSwapper swapper = new AdminStoreSwapper(service, adminClient, 1000 * props.getInt("timeout.seconds", 24 * 60 * 60), true, true);
// Get the current version for all stores on all nodes
Map<Integer, Map<String, Long>> previousVersions = Maps.newHashMap();
for (Node node : cluster.getNodes()) {
Map<String, Long> currentVersion = adminClient.readonlyOps.getROCurrentVersion(node.getId(), storeNames);
log.info("Retrieving current version information on node " + node.getId());
Map<String, Long> previousVersion = Maps.newHashMap();
for (Entry<String, Long> entry : currentVersion.entrySet()) {
previousVersion.put(entry.getKey(), entry.getValue() - 1);
if (entry.getValue() == 0) {
throw new VoldemortException("Store '" + entry.getKey() + "' on node " + node.getId() + " does not have version to rollback to");
}
}
previousVersions.put(node.getId(), previousVersion);
}
// Swap one store at a time
for (String storeName : storeNames) {
for (Node node : cluster.getNodes()) {
log.info("Rolling back data on node " + node.getId() + " and for store " + storeName + " to version " + previousVersions.get(node.getId()).get(storeName));
swapper.invokeRollback(storeName, previousVersions.get(node.getId()).get(storeName));
log.info("Successfully rolled back data on node " + node.getId() + " and for store " + storeName);
}
}
} finally {
if (service != null) {
service.shutdownNow();
service.awaitTermination(10, TimeUnit.SECONDS);
service = null;
}
if (adminClient != null) {
adminClient.close();
adminClient = null;
}
}
}
}
use of voldemort.client.ClientConfig in project voldemort by voldemort.
the class VoldemortSwapJob method run.
public void run() throws Exception {
ExecutorService executor = Executors.newCachedThreadPool();
// Read the hadoop configuration settings
JobConf conf = new JobConf();
Path dataPath = new Path(dataDir);
String modifiedDataDir = dataPath.makeQualified(FileSystem.get(conf)).toString();
/*
* Replace the default protocol and port with the one derived as above
*/
try {
modifiedDataDir = VoldemortUtils.modifyURL(modifiedDataDir, hdfsFetcherProtocol, Integer.valueOf(hdfsFetcherPort), false);
} catch (NumberFormatException nfe) {
info("The dataDir will not be modified, since hdfsFetcherPort is not a valid port number");
} catch (IllegalArgumentException e) {
info("The dataDir will not be modified, since it does not contain the expected " + "structure of protocol:hostname:port/some_path");
}
try {
new Path(modifiedDataDir);
} catch (IllegalArgumentException e) {
throw new VoldemortException("Could not create a valid data path out of the supplied dataDir: " + dataDir, e);
}
// It should not be necessary to set the max conn / node so high, but it should not be a big deal either. New
// connections will be created as needed, not upfront, so there should be no extra cost associated with the
// higher setting. There shouldn't be many parallel requests happening in this use case, but we're going to
// leave it as is for now, just to minimize the potential for unforeseen regressions.
AdminClientConfig adminConfig = new AdminClientConfig().setMaxConnectionsPerNode(cluster.getNumberOfNodes()).setMaxBackoffDelayMs(maxBackoffDelayMs).setAdminSocketTimeoutSec(60 * 5);
ClientConfig clientConfig = new ClientConfig().setBootstrapUrls(cluster.getBootStrapUrls()).setConnectionTimeout(httpTimeoutMs, TimeUnit.MILLISECONDS);
// Create admin client
AdminClient client = new AdminClient(adminConfig, clientConfig);
if (pushVersion == -1L) {
// Need to retrieve max version
ArrayList<String> stores = new ArrayList<String>();
stores.add(storeName);
Map<String, Long> pushVersions = client.readonlyOps.getROMaxVersion(stores, maxNodeFailures);
if (pushVersions == null || !pushVersions.containsKey(storeName)) {
throw new RuntimeException("Push version could not be determined for store " + storeName);
}
pushVersion = pushVersions.get(storeName);
pushVersion++;
}
// do the fetch, and if it succeeds, the swap
info("Initiating fetch of " + storeName + " with dataDir: " + dataDir);
AdminStoreSwapper swapper = new AdminStoreSwapper(executor, client, httpTimeoutMs, rollbackFailedSwap, failedFetchStrategyList, clusterName, buildPrimaryReplicasOnly);
swapper.fetchAndSwapStoreData(storeName, modifiedDataDir, pushVersion);
info("Swap complete.");
executor.shutdownNow();
executor.awaitTermination(10, TimeUnit.SECONDS);
}
use of voldemort.client.ClientConfig in project voldemort by voldemort.
the class AbstractZonedRebalanceTest method testProxyPutDuringRebalancing.
@Test(timeout = 600000)
public void testProxyPutDuringRebalancing() throws Exception {
logger.info("Starting testProxyPutDuringRebalancing");
try {
Cluster currentCluster = ServerTestUtils.getLocalZonedCluster(6, 2, new int[] { 0, 0, 0, 1, 1, 1 }, new int[][] { { 0 }, { 1, 6 }, { 2 }, { 3 }, { 4, 7 }, { 5 } });
Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 2, Lists.newArrayList(7));
finalCluster = UpdateClusterUtils.createUpdatedCluster(finalCluster, 5, Lists.newArrayList(6));
/**
* Original partition map
*
* [s0 : p0] [s1 : p1, p6] [s2 : p2]
*
* [s3 : p3] [s4 : p4, p7] [s5 : p5]
*
* final server partition ownership
*
* [s0 : p0] [s1 : p1] [s2 : p2, p7]
*
* [s3 : p3] [s4 : p4] [s5 : p5, p6]
*
* Note that rwStoreDefFileWithReplication is a "2/1/1" store def.
*
* Original server n-ary partition ownership
*
* [s0 : p0, p3-7] [s1 : p0-p7] [s2 : p1-2]
*
* [s3 : p0-3, p6-7] [s4 : p0-p7] [s5 : p4-5]
*
* final server n-ary partition ownership
*
* [s0 : p0, p2-7] [s1 : p0-1] [s2 : p1-p7]
*
* [s3 : p0-3, p5-7] [s4 : p0-4, p7] [s5 : p4-6]
*/
List<Integer> serverList = Arrays.asList(0, 1, 2, 3, 4, 5);
Map<String, String> configProps = new HashMap<String, String>();
configProps.put("admin.max.threads", "5");
final Cluster updatedCurrentCluster = startServers(currentCluster, rwStoreDefFileWithReplication, serverList, configProps);
ExecutorService executors = Executors.newFixedThreadPool(2);
final AtomicBoolean rebalancingComplete = new AtomicBoolean(false);
final List<Exception> exceptions = Collections.synchronizedList(new ArrayList<Exception>());
// Its is imperative that we test in a single shot since multiple
// batches would mean the proxy bridges being torn down and
// established multiple times and we cannot test against the source
// cluster topology then. getRebalanceKit uses batch size of
// infinite, so this should be fine.
String bootstrapUrl = getBootstrapUrl(updatedCurrentCluster, 0);
int maxParallel = 2;
final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, maxParallel, finalCluster);
populateData(currentCluster, rwStoreDefWithReplication);
final AdminClient adminClient = rebalanceKit.controller.getAdminClient();
// the plan would cause these partitions to move:
// Partition : Donor -> stealer
//
// p2 (Z-SEC) : s1 -> s0
// p3-6 (Z-PRI) : s1 -> s2
// p7 (Z-PRI) : s0 -> s2
//
// p5 (Z-SEC): s4 -> s3
// p6 (Z-PRI): s4 -> s5
//
// :. rebalancing will run on servers 0, 2, 3, & 5
final List<ByteArray> movingKeysList = sampleKeysFromPartition(adminClient, 1, rwStoreDefWithReplication.getName(), Arrays.asList(6), 20);
assertTrue("Empty list of moving keys...", movingKeysList.size() > 0);
final AtomicBoolean rebalancingStarted = new AtomicBoolean(false);
final AtomicBoolean proxyWritesDone = new AtomicBoolean(false);
final HashMap<String, String> baselineTuples = new HashMap<String, String>(testEntries);
final HashMap<String, VectorClock> baselineVersions = new HashMap<String, VectorClock>();
for (String key : baselineTuples.keySet()) {
baselineVersions.put(key, new VectorClock());
}
final CountDownLatch latch = new CountDownLatch(2);
// start get operation.
executors.execute(new Runnable() {
@Override
public void run() {
SocketStoreClientFactory factory = null;
try {
// wait for the rebalancing to begin
List<VoldemortServer> serverList = Lists.newArrayList(serverMap.get(0), serverMap.get(2), serverMap.get(3), serverMap.get(5));
while (!rebalancingComplete.get()) {
Iterator<VoldemortServer> serverIterator = serverList.iterator();
while (serverIterator.hasNext()) {
VoldemortServer server = serverIterator.next();
if (ByteUtils.getString(server.getMetadataStore().get(MetadataStore.SERVER_STATE_KEY, null).get(0).getValue(), "UTF-8").compareTo(VoldemortState.REBALANCING_MASTER_SERVER.toString()) == 0) {
logger.info("Server " + server.getIdentityNode().getId() + " transitioned into REBALANCING MODE");
serverIterator.remove();
}
}
if (serverList.size() == 0) {
rebalancingStarted.set(true);
break;
}
}
if (rebalancingStarted.get()) {
factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(getBootstrapUrl(updatedCurrentCluster, 0)).setEnableLazy(false).setSocketTimeout(120, TimeUnit.SECONDS).setClientZoneId(1));
final StoreClient<String, String> storeClientRW = new DefaultStoreClient<String, String>(testStoreNameRW, null, factory, 3);
// now with zero vector clock
for (ByteArray movingKey : movingKeysList) {
try {
String keyStr = ByteUtils.getString(movingKey.get(), "UTF-8");
String valStr = "proxy_write";
storeClientRW.put(keyStr, valStr);
baselineTuples.put(keyStr, valStr);
// all these keys will have [5:1] vector
// clock is node 5 is the new pseudo master
baselineVersions.get(keyStr).incrementVersion(5, System.currentTimeMillis());
proxyWritesDone.set(true);
if (rebalancingComplete.get()) {
break;
}
} catch (InvalidMetadataException e) {
// let this go
logger.error("Encountered an invalid metadata exception.. ", e);
}
}
}
} catch (Exception e) {
logger.error("Exception in proxy write thread..", e);
exceptions.add(e);
} finally {
if (factory != null)
factory.close();
latch.countDown();
}
}
});
executors.execute(new Runnable() {
@Override
public void run() {
try {
rebalanceKit.rebalance();
} catch (Exception e) {
logger.error("Error in rebalancing... ", e);
exceptions.add(e);
} finally {
rebalancingComplete.set(true);
latch.countDown();
}
}
});
latch.await();
executors.shutdown();
executors.awaitTermination(300, TimeUnit.SECONDS);
assertEquals("Client did not see all server transition into rebalancing state", rebalancingStarted.get(), true);
assertEquals("Not enough time to begin proxy writing", proxyWritesDone.get(), true);
checkEntriesPostRebalance(updatedCurrentCluster, finalCluster, Lists.newArrayList(rwStoreDefWithReplication), Arrays.asList(0, 1, 2, 3, 4, 5), baselineTuples, baselineVersions);
checkConsistentMetadata(finalCluster, serverList);
// check No Exception
if (exceptions.size() > 0) {
for (Exception e : exceptions) {
e.printStackTrace();
}
fail("Should not see any exceptions.");
}
// check that the proxy writes were made to the original donor, node
// 1
List<ClockEntry> clockEntries = new ArrayList<ClockEntry>(serverList.size());
for (Integer nodeid : serverList) clockEntries.add(new ClockEntry(nodeid.shortValue(), System.currentTimeMillis()));
VectorClock clusterXmlClock = new VectorClock(clockEntries, System.currentTimeMillis());
for (Integer nodeid : serverList) adminClient.metadataMgmtOps.updateRemoteCluster(nodeid, currentCluster, clusterXmlClock);
adminClient.setAdminClientCluster(currentCluster);
checkForTupleEquivalence(adminClient, 1, testStoreNameRW, movingKeysList, baselineTuples, baselineVersions);
// stop servers
try {
stopServer(serverList);
} catch (Exception e) {
throw new RuntimeException(e);
}
} catch (AssertionError ae) {
logger.error("Assertion broken in testProxyPutDuringRebalancing ", ae);
throw ae;
}
}
Aggregations