use of java.net.BindException in project voldemort by voldemort.
the class ServerTestUtils method startVoldemortCluster.
public static Cluster startVoldemortCluster(VoldemortServer[] voldemortServers, int[][] partitionMap, String clusterFile, String storeFile, Properties properties, Cluster customCluster) throws IOException {
boolean started = false;
Cluster cluster = null;
SocketStoreFactory socketStoreFactory = getSocketStoreFactory();
try {
while (!started) {
try {
cluster = internalStartVoldemortCluster(voldemortServers.length, voldemortServers, partitionMap, socketStoreFactory, true, clusterFile, storeFile, properties, customCluster);
started = true;
} catch (BindException be) {
logger.debug("Caught BindException when starting cluster. Will retry.");
}
}
} finally {
socketStoreFactory.close();
}
return cluster;
}
use of java.net.BindException in project hadoop by apache.
the class TestEditLogAutoroll method setUp.
@Before
public void setUp() throws Exception {
conf = new Configuration();
// Stall the standby checkpointer in two ways
conf.setLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, Long.MAX_VALUE);
conf.setLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 20);
// Make it autoroll after 10 edits
conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f);
conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_ASYNC_LOGGING, useAsyncEditLog);
int retryCount = 0;
while (true) {
try {
int basePort = 10060 + random.nextInt(100) * 2;
MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)).addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
cluster.waitActive();
nn0 = cluster.getNameNode(0);
fs = HATestUtil.configureFailoverFs(cluster, conf);
cluster.transitionToActive(0);
fs = cluster.getFileSystem(0);
editLog = nn0.getNamesystem().getEditLog();
++retryCount;
break;
} catch (BindException e) {
LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + retryCount + " times");
}
}
}
use of java.net.BindException in project hadoop by apache.
the class TestEditLogTailer method testStandbyTriggersLogRolls.
private static void testStandbyTriggersLogRolls(int activeIndex) throws Exception {
Configuration conf = getConf();
// Roll every 1s
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ALL_NAMESNODES_RETRY_KEY, 100);
MiniDFSCluster cluster = null;
for (int i = 0; i < 5; i++) {
try {
// Have to specify IPC ports so the NNs can talk to each other.
int[] ports = ServerSocketUtil.getPorts(3);
MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(ports[0])).addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(ports[1])).addNN(new MiniDFSNNTopology.NNConf("nn3").setIpcPort(ports[2])));
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
break;
} catch (BindException e) {
// retry if race on ports given by ServerSocketUtil#getPorts
continue;
}
}
if (cluster == null) {
fail("failed to start mini cluster.");
}
try {
cluster.transitionToActive(activeIndex);
waitForLogRollInSharedDir(cluster, 3);
} finally {
cluster.shutdown();
}
}
use of java.net.BindException in project hadoop by apache.
the class TestFailureToReadEdits method setUpCluster.
@Before
public void setUpCluster() throws Exception {
conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 10);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
HAUtil.setAllowStandbyReads(conf, true);
if (clusterType == TestType.SHARED_DIR_HA) {
int basePort = 10000;
int retryCount = 0;
while (true) {
try {
basePort = 10000 + RANDOM.nextInt(1000) * 4;
LOG.info("Set SHARED_DIR_HA cluster's basePort to " + basePort);
MiniDFSNNTopology topology = MiniQJMHACluster.createDefaultTopology(basePort);
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).checkExitOnShutdown(false).build();
break;
} catch (BindException e) {
if (cluster != null) {
cluster.shutdown(true);
cluster = null;
}
++retryCount;
LOG.info("SHARED_DIR_HA: MiniQJMHACluster port conflicts, retried " + retryCount + " times " + e);
}
}
} else {
Builder builder = new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0).checkExitOnShutdown(false);
miniQjmHaCluster = builder.build();
cluster = miniQjmHaCluster.getDfsCluster();
}
cluster.waitActive();
nn0 = cluster.getNameNode(0);
nn1 = cluster.getNameNode(1);
cluster.transitionToActive(0);
fs = HATestUtil.configureFailoverFs(cluster, conf);
}
use of java.net.BindException in project hadoop by apache.
the class TestValidateConfigurationSettings method testThatDifferentRPCandHttpPortsAreOK.
/**
* Tests setting the rpc port to a different as the web port that an
* exception is NOT thrown
*/
@Test(timeout = 300000)
public void testThatDifferentRPCandHttpPortsAreOK() throws IOException {
Configuration conf = new HdfsConfiguration();
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
Random rand = new Random();
// A few retries in case the ports we choose are in use.
for (int i = 0; i < 5; ++i) {
final int port1 = 30000 + rand.nextInt(10000);
final int port2 = port1 + 1 + rand.nextInt(10000);
FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port1);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port2);
DFSTestUtil.formatNameNode(conf);
NameNode nameNode = null;
try {
// should be OK!
nameNode = new NameNode(conf);
break;
} catch (BindException be) {
// Port in use? Try another.
continue;
} finally {
if (nameNode != null) {
nameNode.stop();
}
}
}
}
Aggregations