use of org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase in project hadoop by apache.
the class TestStartupProgress method testInitialState.
@Test(timeout = 10000)
public void testInitialState() {
StartupProgressView view = startupProgress.createView();
assertNotNull(view);
assertEquals(0L, view.getElapsedTime());
assertEquals(0.0f, view.getPercentComplete(), 0.001f);
List<Phase> phases = new ArrayList<Phase>();
for (Phase phase : view.getPhases()) {
phases.add(phase);
assertEquals(0L, view.getElapsedTime(phase));
assertNull(view.getFile(phase));
assertEquals(0.0f, view.getPercentComplete(phase), 0.001f);
assertEquals(Long.MIN_VALUE, view.getSize(phase));
assertEquals(PENDING, view.getStatus(phase));
assertEquals(0L, view.getTotal(phase));
for (Step step : view.getSteps(phase)) {
fail(String.format("unexpected step %s in phase %s at initial state", step, phase));
}
}
assertArrayEquals(EnumSet.allOf(Phase.class).toArray(), phases.toArray());
}
use of org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase in project hadoop by apache.
the class TestStartupProgress method testThreadSafety.
@Test(timeout = 10000)
public void testThreadSafety() throws Exception {
// Test for thread safety by starting multiple threads that mutate the same
// StartupProgress instance in various ways. We expect no internal
// corruption of data structures and no lost updates on counter increments.
int numThreads = 100;
// Data tables used by each thread to determine values to pass to APIs.
Phase[] phases = { LOADING_FSIMAGE, LOADING_FSIMAGE, LOADING_EDITS, LOADING_EDITS };
Step[] steps = new Step[] { new Step(INODES), new Step(DELEGATION_KEYS), new Step(INODES), new Step(DELEGATION_KEYS) };
String[] files = { "file1", "file1", "file2", "file2" };
long[] sizes = { 1000L, 1000L, 2000L, 2000L };
long[] totals = { 10000L, 20000L, 30000L, 40000L };
ExecutorService exec = Executors.newFixedThreadPool(numThreads);
try {
for (int i = 0; i < numThreads; ++i) {
final Phase phase = phases[i % phases.length];
final Step step = steps[i % steps.length];
final String file = files[i % files.length];
final long size = sizes[i % sizes.length];
final long total = totals[i % totals.length];
exec.submit(new Callable<Void>() {
@Override
public Void call() {
startupProgress.beginPhase(phase);
startupProgress.setFile(phase, file);
startupProgress.setSize(phase, size);
startupProgress.setTotal(phase, step, total);
incrementCounter(startupProgress, phase, step, 100L);
startupProgress.endStep(phase, step);
startupProgress.endPhase(phase);
return null;
}
});
}
} finally {
exec.shutdown();
assertTrue(exec.awaitTermination(10000L, TimeUnit.MILLISECONDS));
}
StartupProgressView view = startupProgress.createView();
assertNotNull(view);
assertEquals("file1", view.getFile(LOADING_FSIMAGE));
assertEquals(1000L, view.getSize(LOADING_FSIMAGE));
assertEquals(10000L, view.getTotal(LOADING_FSIMAGE, new Step(INODES)));
assertEquals(2500L, view.getCount(LOADING_FSIMAGE, new Step(INODES)));
assertEquals(20000L, view.getTotal(LOADING_FSIMAGE, new Step(DELEGATION_KEYS)));
assertEquals(2500L, view.getCount(LOADING_FSIMAGE, new Step(DELEGATION_KEYS)));
assertEquals("file2", view.getFile(LOADING_EDITS));
assertEquals(2000L, view.getSize(LOADING_EDITS));
assertEquals(30000L, view.getTotal(LOADING_EDITS, new Step(INODES)));
assertEquals(2500L, view.getCount(LOADING_EDITS, new Step(INODES)));
assertEquals(40000L, view.getTotal(LOADING_EDITS, new Step(DELEGATION_KEYS)));
assertEquals(2500L, view.getCount(LOADING_EDITS, new Step(DELEGATION_KEYS)));
}
use of org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase in project hadoop by apache.
the class TestStartupProgress method testFrozenAfterStartupCompletes.
@Test(timeout = 10000)
public void testFrozenAfterStartupCompletes() {
// Do some updates and counter increments.
startupProgress.beginPhase(LOADING_FSIMAGE);
startupProgress.setFile(LOADING_FSIMAGE, "file1");
startupProgress.setSize(LOADING_FSIMAGE, 1000L);
Step step = new Step(INODES);
startupProgress.beginStep(LOADING_FSIMAGE, step);
startupProgress.setTotal(LOADING_FSIMAGE, step, 10000L);
incrementCounter(startupProgress, LOADING_FSIMAGE, step, 100L);
startupProgress.endStep(LOADING_FSIMAGE, step);
startupProgress.endPhase(LOADING_FSIMAGE);
// Force completion of phases, so that entire startup process is completed.
for (Phase phase : EnumSet.allOf(Phase.class)) {
if (startupProgress.getStatus(phase) != Status.COMPLETE) {
startupProgress.beginPhase(phase);
startupProgress.endPhase(phase);
}
}
StartupProgressView before = startupProgress.createView();
// Attempt more updates and counter increments.
startupProgress.beginPhase(LOADING_FSIMAGE);
startupProgress.setFile(LOADING_FSIMAGE, "file2");
startupProgress.setSize(LOADING_FSIMAGE, 2000L);
startupProgress.beginStep(LOADING_FSIMAGE, step);
startupProgress.setTotal(LOADING_FSIMAGE, step, 20000L);
incrementCounter(startupProgress, LOADING_FSIMAGE, step, 100L);
startupProgress.endStep(LOADING_FSIMAGE, step);
startupProgress.endPhase(LOADING_FSIMAGE);
// Also attempt a whole new step that wasn't used last time.
startupProgress.beginPhase(LOADING_EDITS);
Step newStep = new Step("file1");
startupProgress.beginStep(LOADING_EDITS, newStep);
incrementCounter(startupProgress, LOADING_EDITS, newStep, 100L);
startupProgress.endStep(LOADING_EDITS, newStep);
startupProgress.endPhase(LOADING_EDITS);
StartupProgressView after = startupProgress.createView();
// Expect that data was frozen after completion of entire startup process, so
// second set of updates and counter increments should have had no effect.
assertEquals(before.getCount(LOADING_FSIMAGE), after.getCount(LOADING_FSIMAGE));
assertEquals(before.getCount(LOADING_FSIMAGE, step), after.getCount(LOADING_FSIMAGE, step));
assertEquals(before.getElapsedTime(), after.getElapsedTime());
assertEquals(before.getElapsedTime(LOADING_FSIMAGE), after.getElapsedTime(LOADING_FSIMAGE));
assertEquals(before.getElapsedTime(LOADING_FSIMAGE, step), after.getElapsedTime(LOADING_FSIMAGE, step));
assertEquals(before.getFile(LOADING_FSIMAGE), after.getFile(LOADING_FSIMAGE));
assertEquals(before.getSize(LOADING_FSIMAGE), after.getSize(LOADING_FSIMAGE));
assertEquals(before.getTotal(LOADING_FSIMAGE), after.getTotal(LOADING_FSIMAGE));
assertEquals(before.getTotal(LOADING_FSIMAGE, step), after.getTotal(LOADING_FSIMAGE, step));
assertFalse(after.getSteps(LOADING_EDITS).iterator().hasNext());
}
use of org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase in project hadoop by apache.
the class StartupProgressServlet method doGet.
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
resp.setContentType("application/json; charset=UTF-8");
StartupProgress prog = NameNodeHttpServer.getStartupProgressFromContext(getServletContext());
StartupProgressView view = prog.createView();
JsonGenerator json = new JsonFactory().createGenerator(resp.getWriter());
try {
json.writeStartObject();
json.writeNumberField(ELAPSED_TIME, view.getElapsedTime());
json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete());
json.writeArrayFieldStart(PHASES);
for (Phase phase : view.getPhases()) {
json.writeStartObject();
json.writeStringField(NAME, phase.getName());
json.writeStringField(DESC, phase.getDescription());
json.writeStringField(STATUS, view.getStatus(phase).toString());
json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase));
json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase));
writeStringFieldIfNotNull(json, FILE, view.getFile(phase));
writeNumberFieldIfDefined(json, SIZE, view.getSize(phase));
json.writeArrayFieldStart(STEPS);
for (Step step : view.getSteps(phase)) {
json.writeStartObject();
StepType type = step.getType();
if (type != null) {
json.writeStringField(NAME, type.getName());
json.writeStringField(DESC, type.getDescription());
}
json.writeNumberField(COUNT, view.getCount(phase, step));
writeStringFieldIfNotNull(json, FILE, step.getFile());
writeNumberFieldIfDefined(json, SIZE, step.getSize());
json.writeNumberField(TOTAL, view.getTotal(phase, step));
json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase, step));
json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase, step));
json.writeEndObject();
}
json.writeEndArray();
json.writeEndObject();
}
json.writeEndArray();
json.writeEndObject();
} finally {
IOUtils.cleanup(LOG, json);
}
}
use of org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase in project hadoop by apache.
the class StartupProgressMetrics method getMetrics.
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
StartupProgressView prog = startupProgress.createView();
MetricsRecordBuilder builder = collector.addRecord(STARTUP_PROGRESS_METRICS_INFO);
builder.addCounter(info("ElapsedTime", "overall elapsed time"), prog.getElapsedTime());
builder.addGauge(info("PercentComplete", "overall percent complete"), prog.getPercentComplete());
for (Phase phase : prog.getPhases()) {
addCounter(builder, phase, "Count", " count", prog.getCount(phase));
addCounter(builder, phase, "ElapsedTime", " elapsed time", prog.getElapsedTime(phase));
addCounter(builder, phase, "Total", " total", prog.getTotal(phase));
addGauge(builder, phase, "PercentComplete", " percent complete", prog.getPercentComplete(phase));
}
}
Aggregations