use of org.apache.hadoop.conf.Configuration.IntegerRanges in project hadoop by apache.
the class TestConfiguration method testGetRangeIterator.
public void testGetRangeIterator() throws Exception {
Configuration config = new Configuration(false);
IntegerRanges ranges = config.getRange("Test", "");
assertFalse("Empty range has values", ranges.iterator().hasNext());
ranges = config.getRange("Test", "5");
Set<Integer> expected = new HashSet<Integer>(Arrays.asList(5));
Set<Integer> found = new HashSet<Integer>();
for (Integer i : ranges) {
found.add(i);
}
assertEquals(expected, found);
ranges = config.getRange("Test", "5-10,13-14");
expected = new HashSet<Integer>(Arrays.asList(5, 6, 7, 8, 9, 10, 13, 14));
found = new HashSet<Integer>();
for (Integer i : ranges) {
found.add(i);
}
assertEquals(expected, found);
ranges = config.getRange("Test", "8-12, 5- 7");
expected = new HashSet<Integer>(Arrays.asList(5, 6, 7, 8, 9, 10, 11, 12));
found = new HashSet<Integer>();
for (Integer i : ranges) {
found.add(i);
}
assertEquals(expected, found);
}
use of org.apache.hadoop.conf.Configuration.IntegerRanges in project hadoop by apache.
the class Server method bind.
public static void bind(ServerSocket socket, InetSocketAddress address, int backlog, Configuration conf, String rangeConf) throws IOException {
try {
IntegerRanges range = null;
if (rangeConf != null) {
range = conf.getRange(rangeConf, "");
}
if (range == null || range.isEmpty() || (address.getPort() != 0)) {
socket.bind(address, backlog);
} else {
for (Integer port : range) {
if (socket.isBound())
break;
try {
InetSocketAddress temp = new InetSocketAddress(address.getAddress(), port);
socket.bind(temp, backlog);
} catch (BindException e) {
//Ignored
}
}
if (!socket.isBound()) {
throw new BindException("Could not find a free port in " + range);
}
}
} catch (SocketException e) {
throw NetUtils.wrapException(null, 0, address.getHostName(), address.getPort(), e);
}
}
use of org.apache.hadoop.conf.Configuration.IntegerRanges in project hadoop by apache.
the class TestHttpServer method testPortRanges.
@Test
public void testPortRanges() throws Exception {
Configuration conf = new Configuration();
int port = ServerSocketUtil.waitForPort(49000, 60);
int endPort = 49500;
conf.set("abc", "49000-49500");
HttpServer2.Builder builder = new HttpServer2.Builder().setName("test").setConf(new Configuration()).setFindPort(false);
IntegerRanges ranges = conf.getRange("abc", "");
int startPort = 0;
if (ranges != null && !ranges.isEmpty()) {
startPort = ranges.getRangeStart();
builder.setPortRanges(ranges);
}
builder.addEndpoint(URI.create("http://localhost:" + startPort));
HttpServer2 myServer = builder.build();
HttpServer2 myServer2 = null;
try {
myServer.start();
assertEquals(port, myServer.getConnectorAddress(0).getPort());
myServer2 = builder.build();
myServer2.start();
assertTrue(myServer2.getConnectorAddress(0).getPort() > port && myServer2.getConnectorAddress(0).getPort() <= endPort);
} finally {
stopHttpServer(myServer);
stopHttpServer(myServer2);
}
}
use of org.apache.hadoop.conf.Configuration.IntegerRanges in project hadoop by apache.
the class Job method monitorAndPrintJob.
/**
* Monitor a job and print status in real-time as progress is made and tasks
* fail.
* @return true if the job succeeded
* @throws IOException if communication to the JobTracker fails
*/
public boolean monitorAndPrintJob() throws IOException, InterruptedException {
String lastReport = null;
Job.TaskStatusFilter filter;
Configuration clientConf = getConfiguration();
filter = Job.getTaskOutputFilter(clientConf);
JobID jobId = getJobID();
LOG.info("Running job: " + jobId);
int eventCounter = 0;
boolean profiling = getProfileEnabled();
IntegerRanges mapRanges = getProfileTaskRange(true);
IntegerRanges reduceRanges = getProfileTaskRange(false);
int progMonitorPollIntervalMillis = Job.getProgressPollInterval(clientConf);
/* make sure to report full progress after the job is done */
boolean reportedAfterCompletion = false;
boolean reportedUberMode = false;
while (!isComplete() || !reportedAfterCompletion) {
if (isComplete()) {
reportedAfterCompletion = true;
} else {
Thread.sleep(progMonitorPollIntervalMillis);
}
if (status.getState() == JobStatus.State.PREP) {
continue;
}
if (!reportedUberMode) {
reportedUberMode = true;
LOG.info("Job " + jobId + " running in uber mode : " + isUber());
}
String report = (" map " + StringUtils.formatPercent(mapProgress(), 0) + " reduce " + StringUtils.formatPercent(reduceProgress(), 0));
if (!report.equals(lastReport)) {
LOG.info(report);
lastReport = report;
}
TaskCompletionEvent[] events = getTaskCompletionEvents(eventCounter, 10);
eventCounter += events.length;
printTaskEvents(events, filter, profiling, mapRanges, reduceRanges);
}
boolean success = isSuccessful();
if (success) {
LOG.info("Job " + jobId + " completed successfully");
} else {
LOG.info("Job " + jobId + " failed with state " + status.getState() + " due to: " + status.getFailureInfo());
}
Counters counters = getCounters();
if (counters != null) {
LOG.info(counters.toString());
}
return success;
}
Aggregations