use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class NativeRuntime method reportStatus.
/**
* Get the status report from native space
*/
public static void reportStatus(TaskReporter reporter) throws IOException {
assertNativeLibraryLoaded();
synchronized (reporter) {
final byte[] statusBytes = JNIUpdateStatus();
final DataInputBuffer ib = new DataInputBuffer();
ib.reset(statusBytes, statusBytes.length);
final FloatWritable progress = new FloatWritable();
progress.readFields(ib);
reporter.setProgress(progress.get());
final Text status = new Text();
status.readFields(ib);
if (status.getLength() > 0) {
reporter.setStatus(status.toString());
}
final IntWritable numCounters = new IntWritable();
numCounters.readFields(ib);
if (numCounters.get() == 0) {
return;
}
final Text group = new Text();
final Text name = new Text();
final LongWritable amount = new LongWritable();
for (int i = 0; i < numCounters.get(); i++) {
group.readFields(ib);
name.readFields(ib);
amount.readFields(ib);
reporter.incrCounter(group.toString(), name.toString(), amount.get());
}
}
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestCopyListingFileStatus method testCopyListingFileStatusSerialization.
@Test
public void testCopyListingFileStatusSerialization() throws Exception {
CopyListingFileStatus src = new CopyListingFileStatus(4344L, false, 2, 512 << 20, 1234L, 5678L, new FsPermission((short) 0512), "dingo", "yaks", new Path("hdfs://localhost:4344"));
DataOutputBuffer dob = new DataOutputBuffer();
src.write(dob);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), 0, dob.getLength());
CopyListingFileStatus dst = new CopyListingFileStatus();
dst.readFields(dib);
assertEquals(src, dst);
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestGridmixRecord method checkSpec.
static void checkSpec(GridmixKey a, GridmixKey b) throws Exception {
final Random r = new Random();
final long s = r.nextLong();
r.setSeed(s);
LOG.info("spec: " + s);
final DataInputBuffer in = new DataInputBuffer();
final DataOutputBuffer out = new DataOutputBuffer();
a.setType(GridmixKey.REDUCE_SPEC);
b.setType(GridmixKey.REDUCE_SPEC);
for (int i = 0; i < 100; ++i) {
final int in_rec = r.nextInt(Integer.MAX_VALUE);
a.setReduceInputRecords(in_rec);
final int out_rec = r.nextInt(Integer.MAX_VALUE);
a.setReduceOutputRecords(out_rec);
final int out_bytes = r.nextInt(Integer.MAX_VALUE);
a.setReduceOutputBytes(out_bytes);
final int min = WritableUtils.getVIntSize(in_rec) + WritableUtils.getVIntSize(out_rec) + WritableUtils.getVIntSize(out_bytes) + WritableUtils.getVIntSize(0);
// meta + vint min
assertEquals(min + 2, a.fixedBytes());
final int size = r.nextInt(1024) + a.fixedBytes() + 1;
setSerialize(a, r.nextLong(), size, out);
assertEquals(size, out.getLength());
assertTrue(a.equals(a));
assertEquals(0, a.compareTo(a));
in.reset(out.getData(), 0, out.getLength());
b.readFields(in);
assertEquals(size, b.getSize());
assertEquals(in_rec, b.getReduceInputRecords());
assertEquals(out_rec, b.getReduceOutputRecords());
assertEquals(out_bytes, b.getReduceOutputBytes());
assertTrue(a.equals(b));
assertEquals(0, a.compareTo(b));
assertEquals(a.hashCode(), b.hashCode());
}
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestGridmixRecord method randomReplayTest.
static void randomReplayTest(GridmixRecord x, GridmixRecord y, int min, int max) throws Exception {
final Random r = new Random();
final long seed = r.nextLong();
r.setSeed(seed);
LOG.info("randReplay: " + seed);
final DataOutputBuffer out1 = new DataOutputBuffer();
for (int i = min; i < max; ++i) {
final int s = out1.getLength();
x.setSeed(r.nextLong());
x.setSize(i);
x.write(out1);
assertEquals(i, out1.getLength() - s);
}
final DataInputBuffer in = new DataInputBuffer();
in.reset(out1.getData(), 0, out1.getLength());
final DataOutputBuffer out2 = new DataOutputBuffer();
// deserialize written records, write to separate buffer
for (int i = min; i < max; ++i) {
final int s = in.getPosition();
y.readFields(in);
assertEquals(i, in.getPosition() - s);
y.write(out2);
}
// verify written contents match
assertEquals(out1.getLength(), out2.getLength());
// assumes that writes will grow buffer deterministically
assertEquals("Bad test", out1.getData().length, out2.getData().length);
assertArrayEquals(out1.getData(), out2.getData());
}
use of org.apache.hadoop.io.DataInputBuffer in project hive by apache.
the class ContainerRunnerImpl method submitWork.
@Override
public SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws IOException {
LlapTokenInfo tokenInfo = null;
try {
tokenInfo = LlapTokenChecker.getTokenInfo(clusterId);
} catch (SecurityException ex) {
logSecurityErrorRarely(null);
throw ex;
}
SignableVertexSpec vertex = extractVertexSpec(request, tokenInfo);
TezEvent initialEvent = extractInitialEvent(request, tokenInfo);
TezTaskAttemptID attemptId = Converters.createTaskAttemptId(vertex.getQueryIdentifier(), vertex.getVertexIndex(), request.getFragmentNumber(), request.getAttemptNumber());
String fragmentIdString = attemptId.toString();
if (LOG.isInfoEnabled()) {
LOG.info("Queueing container for execution: fragemendId={}, {}", fragmentIdString, stringifySubmitRequest(request, vertex));
}
QueryIdentifierProto qIdProto = vertex.getQueryIdentifier();
HistoryLogger.logFragmentStart(qIdProto.getApplicationIdString(), request.getContainerIdString(), localAddress.get().getHostName(), constructUniqueQueryId(vertex.getHiveQueryId(), qIdProto.getDagIndex()), qIdProto.getDagIndex(), vertex.getVertexName(), request.getFragmentNumber(), request.getAttemptNumber());
// This is the start of container-annotated logging.
final String dagId = attemptId.getTaskID().getVertexID().getDAGId().toString();
final String queryId = vertex.getHiveQueryId();
final String fragmentId = LlapTezUtils.stripAttemptPrefix(fragmentIdString);
MDC.put("dagId", dagId);
MDC.put("queryId", queryId);
MDC.put("fragmentId", fragmentId);
// TODO: Ideally we want tez to use CallableWithMdc that retains the MDC for threads created in
// thread pool. For now, we will push both dagId and queryId into NDC and the custom thread
// pool that we use for task execution and llap io (StatsRecordingThreadPool) will pop them
// using reflection and update the MDC.
NDC.push(dagId);
NDC.push(queryId);
NDC.push(fragmentId);
Scheduler.SubmissionState submissionState;
SubmitWorkResponseProto.Builder responseBuilder = SubmitWorkResponseProto.newBuilder();
try {
Map<String, String> env = new HashMap<>();
// TODO What else is required in this environment map.
env.putAll(localEnv);
env.put(ApplicationConstants.Environment.USER.name(), vertex.getUser());
TezTaskAttemptID taskAttemptId = TezTaskAttemptID.fromString(fragmentIdString);
int dagIdentifier = taskAttemptId.getTaskID().getVertexID().getDAGId().getId();
QueryIdentifier queryIdentifier = new QueryIdentifier(qIdProto.getApplicationIdString(), dagIdentifier);
Credentials credentials = new Credentials();
DataInputBuffer dib = new DataInputBuffer();
byte[] tokenBytes = request.getCredentialsBinary().toByteArray();
dib.reset(tokenBytes, tokenBytes.length);
credentials.readTokenStorageStream(dib);
Token<JobTokenIdentifier> jobToken = TokenCache.getSessionToken(credentials);
LlapNodeId amNodeId = LlapNodeId.getInstance(request.getAmHost(), request.getAmPort());
QueryFragmentInfo fragmentInfo = queryTracker.registerFragment(queryIdentifier, qIdProto.getApplicationIdString(), dagId, vertex.getDagName(), vertex.getHiveQueryId(), dagIdentifier, vertex.getVertexName(), request.getFragmentNumber(), request.getAttemptNumber(), vertex.getUser(), vertex, jobToken, fragmentIdString, tokenInfo, amNodeId);
String[] localDirs = fragmentInfo.getLocalDirs();
Preconditions.checkNotNull(localDirs);
if (LOG.isDebugEnabled()) {
LOG.debug("Dirs are: " + Arrays.toString(localDirs));
}
// May need to setup localDir for re-localization, which is usually setup as Environment.PWD.
// Used for re-localization, to add the user specified configuration (conf_pb_binary_stream)
Configuration callableConf = new Configuration(getConfig());
UserGroupInformation fsTaskUgi = fsUgiFactory == null ? null : fsUgiFactory.createUgi();
boolean isGuaranteed = request.hasIsGuaranteed() && request.getIsGuaranteed();
WmFragmentCounters wmCounters = new WmFragmentCounters(FragmentCountersMap.getCountersForFragment(fragmentId));
TaskRunnerCallable callable = new TaskRunnerCallable(request, fragmentInfo, callableConf, new ExecutionContextImpl(localAddress.get().getHostName()), env, credentials, memoryPerExecutor, amReporter, confParams, metrics, killedTaskHandler, this, tezHadoopShim, attemptId, vertex, initialEvent, fsTaskUgi, completionListener, socketFactory, isGuaranteed, wmCounters);
submissionState = executorService.schedule(callable);
if (LOG.isInfoEnabled()) {
LOG.info("SubmissionState for {} : {} ", fragmentIdString, submissionState);
}
if (submissionState.equals(Scheduler.SubmissionState.REJECTED)) {
// Stop tracking the fragment and re-throw the error.
fragmentComplete(fragmentInfo);
return responseBuilder.setSubmissionState(SubmissionStateProto.valueOf(submissionState.name())).build();
}
if (metrics != null) {
metrics.incrExecutorTotalRequestsHandled();
}
} finally {
MDC.clear();
NDC.clear();
}
return responseBuilder.setUniqueNodeId(daemonId.getUniqueNodeIdInCluster()).setSubmissionState(SubmissionStateProto.valueOf(submissionState.name())).build();
}
Aggregations