use of org.apache.curator.test.TestingServer in project alluxio by Alluxio.
the class MultiProcessCluster method startNewMasters.
/**
* Start a number of new master nodes.
*
* @param count the count of the masters to start
* @param format whether to format the master first
* @throws Exception if any error occurs
*/
public synchronized void startNewMasters(int count, boolean format) throws Exception {
int startIndex = 0;
if (mMasterAddresses != null) {
startIndex = mMasterAddresses.size();
} else {
mMasterAddresses = new ArrayList<>();
}
List<MasterNetAddress> masterAddresses = generateMasterAddresses(count);
for (MasterNetAddress newMasterAddress : masterAddresses) {
String id = newMasterAddress.getEmbeddedJournalPort() + "-" + RandomString.make(RandomString.DEFAULT_LENGTH);
mMasterIds.add(id);
}
mMasterAddresses.addAll(masterAddresses);
mNumMasters = mMasterAddresses.size();
LOG.info("Master addresses: {}", mMasterAddresses);
switch(mDeployMode) {
case UFS_NON_HA:
MasterNetAddress masterAddress = mMasterAddresses.get(0);
mProperties.put(PropertyKey.MASTER_JOURNAL_TYPE, JournalType.UFS.toString());
mProperties.put(PropertyKey.MASTER_HOSTNAME, masterAddress.getHostname());
mProperties.put(PropertyKey.MASTER_RPC_PORT, Integer.toString(masterAddress.getRpcPort()));
mProperties.put(PropertyKey.MASTER_WEB_PORT, Integer.toString(masterAddress.getWebPort()));
break;
case EMBEDDED:
List<String> journalAddresses = new ArrayList<>();
List<String> rpcAddresses = new ArrayList<>();
for (MasterNetAddress address : mMasterAddresses) {
journalAddresses.add(String.format("%s:%d", address.getHostname(), address.getEmbeddedJournalPort()));
rpcAddresses.add(String.format("%s:%d", address.getHostname(), address.getRpcPort()));
}
mProperties.put(PropertyKey.MASTER_JOURNAL_TYPE, JournalType.EMBEDDED.toString());
mProperties.put(PropertyKey.MASTER_EMBEDDED_JOURNAL_ADDRESSES, com.google.common.base.Joiner.on(",").join(journalAddresses));
mProperties.put(PropertyKey.MASTER_RPC_ADDRESSES, com.google.common.base.Joiner.on(",").join(rpcAddresses));
break;
case ZOOKEEPER_HA:
mCuratorServer = mCloser.register(new TestingServer(-1, AlluxioTestDirectory.createTemporaryDirectory("zk")));
mProperties.put(PropertyKey.MASTER_JOURNAL_TYPE, JournalType.UFS.toString());
mProperties.put(PropertyKey.ZOOKEEPER_ENABLED, "true");
mProperties.put(PropertyKey.ZOOKEEPER_ADDRESS, mCuratorServer.getConnectString());
break;
default:
throw new IllegalStateException("Unknown deploy mode: " + mDeployMode.toString());
}
for (Entry<PropertyKey, Object> entry : ConfigurationTestUtils.testConfigurationDefaults(ServerConfiguration.global(), NetworkAddressUtils.getLocalHostName((int) ServerConfiguration.getMs(PropertyKey.NETWORK_HOST_RESOLUTION_TIMEOUT_MS)), mWorkDir.getAbsolutePath()).entrySet()) {
// Don't overwrite explicitly set properties.
if (mProperties.containsKey(entry.getKey())) {
continue;
}
// Keep the default RPC timeout.
if (entry.getKey().equals(PropertyKey.USER_RPC_RETRY_MAX_DURATION)) {
continue;
}
mProperties.put(entry.getKey(), entry.getValue());
}
mProperties.put(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS, PathUtils.concatPath(mWorkDir, "underFSStorage"));
new File((String) mProperties.get(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS)).mkdirs();
if (format) {
formatJournal();
}
writeConf();
ServerConfiguration.merge(mProperties, Source.RUNTIME);
// in ms
final int MASTER_START_DELAY_MS = 500;
for (int i = 0; i < count; i++) {
createMaster(startIndex + i).start();
wait(MASTER_START_DELAY_MS);
}
mFilesystemContext = null;
}
use of org.apache.curator.test.TestingServer in project hive by apache.
the class TestServiceDiscoveryWithMiniHS2 method beforeTest.
@BeforeClass
public static void beforeTest() throws Exception {
MiniHS2.cleanupLocalDir();
zkServer = new TestingServer();
Class.forName(MiniHS2.getJdbcDriverName());
hiveConf = new HiveConf();
hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
// Set up zookeeper dynamic service discovery configs
enableZKServiceDiscoveryConfigs(hiveConf);
dataFileDir = hiveConf.get("test.data.files").replace('\\', '/').replace("c:", "");
kvDataFilePath = new Path(dataFileDir, "kv1.txt");
}
use of org.apache.curator.test.TestingServer in project storm by apache.
the class DynamicBrokersReaderTest method setUp.
@Before
public void setUp() throws Exception {
server = new TestingServer();
String connectionString = server.getConnectString();
Map<String, Object> conf = new HashMap<>();
conf.put(Config.STORM_ZOOKEEPER_SESSION_TIMEOUT, 1000);
conf.put(Config.STORM_ZOOKEEPER_CONNECTION_TIMEOUT, 1000);
conf.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, 4);
conf.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, 5);
ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3);
zookeeper = CuratorFrameworkFactory.newClient(connectionString, retryPolicy);
dynamicBrokersReader = new DynamicBrokersReader(conf, connectionString, masterPath, topic);
Map<String, Object> conf2 = new HashMap<>();
conf2.putAll(conf);
conf2.put("kafka.topic.wildcard.match", true);
wildCardBrokerReader = new DynamicBrokersReader(conf2, connectionString, masterPath, "^test.*$");
zookeeper.start();
}
use of org.apache.curator.test.TestingServer in project storm by apache.
the class ZkCoordinatorTest method setUp.
@Before
public void setUp() throws Exception {
MockitoAnnotations.initMocks(this);
server = new TestingServer();
String connectionString = server.getConnectString();
ZkHosts hosts = new ZkHosts(connectionString);
hosts.refreshFreqSecs = 1;
spoutConfig = new SpoutConfig(hosts, "topic", "/test", "id");
Map conf = buildZookeeperConfig(server);
state = new ZkState(conf);
simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
when(dynamicPartitionConnections.register(any(Broker.class), any(String.class), anyInt())).thenReturn(simpleConsumer);
}
use of org.apache.curator.test.TestingServer in project druid by druid-io.
the class CuratorTestBase method setupServerAndCurator.
protected void setupServerAndCurator() throws Exception {
server = new TestingServer();
timing = new Timing();
curator = CuratorFrameworkFactory.builder().connectString(server.getConnectString()).sessionTimeoutMs(timing.session()).connectionTimeoutMs(timing.connection()).retryPolicy(new RetryOneTime(1)).compressionProvider(new PotentiallyGzippedCompressionProvider(true)).build();
}
Aggregations