use of org.apache.hive.spark.client.metrics.ShuffleWriteMetrics in project hive by apache.
the class MetricsCollection method aggregate.
private Metrics aggregate(Predicate<TaskInfo> filter) {
lock.readLock().lock();
try {
// Task metrics.
long executorDeserializeTime = 0L;
long executorRunTime = 0L;
long resultSize = 0L;
long jvmGCTime = 0L;
long resultSerializationTime = 0L;
long memoryBytesSpilled = 0L;
long diskBytesSpilled = 0L;
long taskDurationTime = 0L;
// Input metrics.
boolean hasInputMetrics = false;
long bytesRead = 0L;
// Shuffle read metrics.
boolean hasShuffleReadMetrics = false;
int remoteBlocksFetched = 0;
int localBlocksFetched = 0;
long fetchWaitTime = 0L;
long remoteBytesRead = 0L;
// Shuffle write metrics.
long shuffleBytesWritten = 0L;
long shuffleWriteTime = 0L;
for (TaskInfo info : Collections2.filter(taskMetrics, filter)) {
Metrics m = info.metrics;
executorDeserializeTime += m.executorDeserializeTime;
executorRunTime += m.executorRunTime;
resultSize += m.resultSize;
jvmGCTime += m.jvmGCTime;
resultSerializationTime += m.resultSerializationTime;
memoryBytesSpilled += m.memoryBytesSpilled;
diskBytesSpilled += m.diskBytesSpilled;
taskDurationTime += m.taskDurationTime;
if (m.inputMetrics != null) {
hasInputMetrics = true;
bytesRead += m.inputMetrics.bytesRead;
}
if (m.shuffleReadMetrics != null) {
hasShuffleReadMetrics = true;
remoteBlocksFetched += m.shuffleReadMetrics.remoteBlocksFetched;
localBlocksFetched += m.shuffleReadMetrics.localBlocksFetched;
fetchWaitTime += m.shuffleReadMetrics.fetchWaitTime;
remoteBytesRead += m.shuffleReadMetrics.remoteBytesRead;
}
if (m.shuffleWriteMetrics != null) {
shuffleBytesWritten += m.shuffleWriteMetrics.shuffleBytesWritten;
shuffleWriteTime += m.shuffleWriteMetrics.shuffleWriteTime;
}
}
InputMetrics inputMetrics = null;
if (hasInputMetrics) {
inputMetrics = new InputMetrics(bytesRead);
}
ShuffleReadMetrics shuffleReadMetrics = null;
if (hasShuffleReadMetrics) {
shuffleReadMetrics = new ShuffleReadMetrics(remoteBlocksFetched, localBlocksFetched, fetchWaitTime, remoteBytesRead);
}
ShuffleWriteMetrics shuffleWriteMetrics = null;
if (hasShuffleReadMetrics) {
shuffleWriteMetrics = new ShuffleWriteMetrics(shuffleBytesWritten, shuffleWriteTime);
}
return new Metrics(executorDeserializeTime, executorRunTime, resultSize, jvmGCTime, resultSerializationTime, memoryBytesSpilled, diskBytesSpilled, taskDurationTime, inputMetrics, shuffleReadMetrics, shuffleWriteMetrics);
} finally {
lock.readLock().unlock();
}
}
use of org.apache.hive.spark.client.metrics.ShuffleWriteMetrics in project hive by apache.
the class TestMetricsCollection method testOptionalMetrics.
@Test
public void testOptionalMetrics() {
long value = taskValue(1, 1, 1L);
Metrics metrics = new Metrics(value, value, value, value, value, value, value, value, null, null, null);
MetricsCollection collection = new MetricsCollection();
for (int i : Arrays.asList(1, 2)) {
collection.addMetrics(i, 1, 1, metrics);
}
Metrics global = collection.getAllMetrics();
assertNull(global.inputMetrics);
assertNull(global.shuffleReadMetrics);
assertNull(global.shuffleWriteMetrics);
collection.addMetrics(3, 1, 1, makeMetrics(3, 1, 1));
Metrics global2 = collection.getAllMetrics();
assertNotNull(global2.inputMetrics);
assertEquals(taskValue(3, 1, 1), global2.inputMetrics.bytesRead);
assertNotNull(global2.shuffleReadMetrics);
assertNotNull(global2.shuffleWriteMetrics);
}
Aggregations