use of org.apache.hadoop.mapreduce.Counters in project java-bigtable-hbase by googleapis.
the class TestSnapshotMapper method testRowExceedingMaxCells.
@Test
public void testRowExceedingMaxCells() throws IOException {
int cellCount = SnapshotMapper.MAX_CELLS + 100;
byte[] row = Bytes.toBytes("row");
ImmutableBytesWritable key = new ImmutableBytesWritable(row);
long ts = 123L;
List<Cell> cellList = new ArrayList<>();
for (int i = 0; i < cellCount; i++) {
KeyValue kv = new KeyValue(row, Bytes.toBytes("f"), Bytes.toBytes("q"), ts, Bytes.toBytes("v"));
cellList.add(kv);
}
Result res = Result.create(cellList);
Pair<ImmutableBytesWritable, Result> input = new Pair<>(key, res);
mapDriver.addInput(input);
List<Pair<ImmutableBytesWritable, Put>> resultList = mapDriver.run();
Assert.assertEquals(2, resultList.size());
int cellResCount = 0;
for (Pair<ImmutableBytesWritable, Put> r : resultList) {
CellScanner s = r.getSecond().cellScanner();
while (s.advance()) {
cellResCount++;
}
}
Assert.assertEquals(cellCount, cellResCount);
// verify counters
Counters counters = mapDriver.getCounters();
long numRows = counters.findCounter(ScanCounter.NUM_ROWS).getValue();
long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue();
Assert.assertEquals(1, numRows);
Assert.assertEquals(cellCount, numCells);
}
use of org.apache.hadoop.mapreduce.Counters in project java-bigtable-hbase by googleapis.
the class TestSnapshotMapper method testSnapshotMapper.
@Test
public void testSnapshotMapper() throws IOException {
int rowCount = 20;
int cellCount = 10;
byte[] row = Bytes.toBytes("row");
byte[] columnFamily = Bytes.toBytes("f");
byte[] qualifier = Bytes.toBytes("q");
byte[] value = Bytes.toBytes("v");
long ts = 123L;
ImmutableBytesWritable key = new ImmutableBytesWritable(row);
for (int h = 0; h < rowCount; h++) {
List<Cell> cellList = new ArrayList<>();
for (int i = 0; i < cellCount; i++) {
KeyValue kv = new KeyValue(row, columnFamily, qualifier, ts, value);
cellList.add(kv);
}
Result res = Result.create(cellList);
mapDriver.addInput(new Pair<>(key, res));
}
List<Pair<ImmutableBytesWritable, Put>> resultList = mapDriver.run();
Assert.assertEquals(rowCount, resultList.size());
int cellResCount = 0;
for (Pair<ImmutableBytesWritable, Put> r : resultList) {
CellScanner s = r.getSecond().cellScanner();
while (s.advance()) {
Cell resultCell = s.current();
Assert.assertTrue(CellUtil.matchingRow(resultCell, row));
Assert.assertTrue(CellUtil.matchingFamily(resultCell, columnFamily));
Assert.assertTrue(CellUtil.matchingQualifier(resultCell, qualifier));
Assert.assertTrue(CellUtil.matchingValue(resultCell, value));
Assert.assertEquals(ts, resultCell.getTimestamp());
cellResCount++;
}
}
Assert.assertEquals((rowCount * cellCount), cellResCount);
// verify counters
Counters counters = mapDriver.getCounters();
long numRows = counters.findCounter(ScanCounter.NUM_ROWS).getValue();
long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue();
Assert.assertEquals(rowCount, numRows);
Assert.assertEquals((rowCount * cellCount), numCells);
}
use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.
the class TestTaskImpl method testCountersWithSpeculation.
@Test
public void testCountersWithSpeculation() {
mockTask = new MockTaskImpl(jobId, partition, dispatcher.getEventHandler(), remoteJobConfFile, conf, taskAttemptListener, jobToken, credentials, clock, startCount, metrics, appContext, TaskType.MAP) {
@Override
protected int getMaxAttempts() {
return 1;
}
};
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.RUNNING);
MockTaskAttemptImpl baseAttempt = getLastAttempt();
// add a speculative attempt
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(), TaskEventType.T_ADD_SPEC_ATTEMPT));
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.RUNNING);
MockTaskAttemptImpl specAttempt = getLastAttempt();
assertEquals(2, taskAttempts.size());
Counters specAttemptCounters = new Counters();
Counter cpuCounter = specAttemptCounters.findCounter(TaskCounter.CPU_MILLISECONDS);
cpuCounter.setValue(1000);
specAttempt.setCounters(specAttemptCounters);
// have the spec attempt succeed but second attempt at 1.0 progress as well
commitTaskAttempt(specAttempt.getAttemptId());
specAttempt.setProgress(1.0f);
specAttempt.setState(TaskAttemptState.SUCCEEDED);
mockTask.handle(new TaskTAttemptEvent(specAttempt.getAttemptId(), TaskEventType.T_ATTEMPT_SUCCEEDED));
assertEquals(TaskState.SUCCEEDED, mockTask.getState());
baseAttempt.setProgress(1.0f);
Counters taskCounters = mockTask.getCounters();
assertEquals("wrong counters for task", specAttemptCounters, taskCounters);
}
use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.
the class TaskAttemptImpl method updateProgressSplits.
private void updateProgressSplits() {
double newProgress = reportedStatus.progress;
newProgress = Math.max(Math.min(newProgress, 1.0D), 0.0D);
Counters counters = reportedStatus.counters;
if (counters == null)
return;
WrappedProgressSplitsBlock splitsBlock = getProgressSplitBlock();
if (splitsBlock != null) {
long now = clock.getTime();
// TODO Ensure not 0
long start = getLaunchTime();
if (start != 0 && now - start <= Integer.MAX_VALUE) {
splitsBlock.getProgressWallclockTime().extend(newProgress, (int) (now - start));
}
Counter cpuCounter = counters.findCounter(TaskCounter.CPU_MILLISECONDS);
if (cpuCounter != null && cpuCounter.getValue() <= Integer.MAX_VALUE) {
splitsBlock.getProgressCPUTime().extend(newProgress, // long to int? TODO: FIX. Same below
(int) cpuCounter.getValue());
}
Counter virtualBytes = counters.findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES);
if (virtualBytes != null) {
splitsBlock.getProgressVirtualMemoryKbytes().extend(newProgress, (int) (virtualBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
}
Counter physicalBytes = counters.findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES);
if (physicalBytes != null) {
splitsBlock.getProgressPhysicalMemoryKbytes().extend(newProgress, (int) (physicalBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
}
}
}
use of org.apache.hadoop.mapreduce.Counters in project hadoop by apache.
the class TaskAttemptImpl method initTaskAttemptStatus.
private void initTaskAttemptStatus(TaskAttemptStatus result) {
result.progress = 0.0f;
result.phase = Phase.STARTING;
result.stateString = "NEW";
result.taskState = TaskAttemptState.NEW;
Counters counters = EMPTY_COUNTERS;
result.counters = counters;
}
Aggregations