Search in sources :

Example 81 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class NativeRuntime method reportStatus.

/**
   * Get the status report from native space
   */
public static void reportStatus(TaskReporter reporter) throws IOException {
    assertNativeLibraryLoaded();
    synchronized (reporter) {
        final byte[] statusBytes = JNIUpdateStatus();
        final DataInputBuffer ib = new DataInputBuffer();
        ib.reset(statusBytes, statusBytes.length);
        final FloatWritable progress = new FloatWritable();
        progress.readFields(ib);
        reporter.setProgress(progress.get());
        final Text status = new Text();
        status.readFields(ib);
        if (status.getLength() > 0) {
            reporter.setStatus(status.toString());
        }
        final IntWritable numCounters = new IntWritable();
        numCounters.readFields(ib);
        if (numCounters.get() == 0) {
            return;
        }
        final Text group = new Text();
        final Text name = new Text();
        final LongWritable amount = new LongWritable();
        for (int i = 0; i < numCounters.get(); i++) {
            group.readFields(ib);
            name.readFields(ib);
            amount.readFields(ib);
            reporter.incrCounter(group.toString(), name.toString(), amount.get());
        }
    }
}
Also used : FloatWritable(org.apache.hadoop.io.FloatWritable) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) Text(org.apache.hadoop.io.Text) LongWritable(org.apache.hadoop.io.LongWritable) IntWritable(org.apache.hadoop.io.IntWritable)

Example 82 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestCopyListingFileStatus method testCopyListingFileStatusSerialization.

@Test
public void testCopyListingFileStatusSerialization() throws Exception {
    CopyListingFileStatus src = new CopyListingFileStatus(4344L, false, 2, 512 << 20, 1234L, 5678L, new FsPermission((short) 0512), "dingo", "yaks", new Path("hdfs://localhost:4344"));
    DataOutputBuffer dob = new DataOutputBuffer();
    src.write(dob);
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(dob.getData(), 0, dob.getLength());
    CopyListingFileStatus dst = new CopyListingFileStatus();
    dst.readFields(dib);
    assertEquals(src, dst);
}
Also used : Path(org.apache.hadoop.fs.Path) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 83 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestGridmixRecord method checkSpec.

static void checkSpec(GridmixKey a, GridmixKey b) throws Exception {
    final Random r = new Random();
    final long s = r.nextLong();
    r.setSeed(s);
    LOG.info("spec: " + s);
    final DataInputBuffer in = new DataInputBuffer();
    final DataOutputBuffer out = new DataOutputBuffer();
    a.setType(GridmixKey.REDUCE_SPEC);
    b.setType(GridmixKey.REDUCE_SPEC);
    for (int i = 0; i < 100; ++i) {
        final int in_rec = r.nextInt(Integer.MAX_VALUE);
        a.setReduceInputRecords(in_rec);
        final int out_rec = r.nextInt(Integer.MAX_VALUE);
        a.setReduceOutputRecords(out_rec);
        final int out_bytes = r.nextInt(Integer.MAX_VALUE);
        a.setReduceOutputBytes(out_bytes);
        final int min = WritableUtils.getVIntSize(in_rec) + WritableUtils.getVIntSize(out_rec) + WritableUtils.getVIntSize(out_bytes) + WritableUtils.getVIntSize(0);
        // meta + vint min
        assertEquals(min + 2, a.fixedBytes());
        final int size = r.nextInt(1024) + a.fixedBytes() + 1;
        setSerialize(a, r.nextLong(), size, out);
        assertEquals(size, out.getLength());
        assertTrue(a.equals(a));
        assertEquals(0, a.compareTo(a));
        in.reset(out.getData(), 0, out.getLength());
        b.readFields(in);
        assertEquals(size, b.getSize());
        assertEquals(in_rec, b.getReduceInputRecords());
        assertEquals(out_rec, b.getReduceOutputRecords());
        assertEquals(out_bytes, b.getReduceOutputBytes());
        assertTrue(a.equals(b));
        assertEquals(0, a.compareTo(b));
        assertEquals(a.hashCode(), b.hashCode());
    }
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) Random(java.util.Random) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer)

Example 84 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestGridmixRecord method randomReplayTest.

static void randomReplayTest(GridmixRecord x, GridmixRecord y, int min, int max) throws Exception {
    final Random r = new Random();
    final long seed = r.nextLong();
    r.setSeed(seed);
    LOG.info("randReplay: " + seed);
    final DataOutputBuffer out1 = new DataOutputBuffer();
    for (int i = min; i < max; ++i) {
        final int s = out1.getLength();
        x.setSeed(r.nextLong());
        x.setSize(i);
        x.write(out1);
        assertEquals(i, out1.getLength() - s);
    }
    final DataInputBuffer in = new DataInputBuffer();
    in.reset(out1.getData(), 0, out1.getLength());
    final DataOutputBuffer out2 = new DataOutputBuffer();
    // deserialize written records, write to separate buffer
    for (int i = min; i < max; ++i) {
        final int s = in.getPosition();
        y.readFields(in);
        assertEquals(i, in.getPosition() - s);
        y.write(out2);
    }
    // verify written contents match
    assertEquals(out1.getLength(), out2.getLength());
    // assumes that writes will grow buffer deterministically
    assertEquals("Bad test", out1.getData().length, out2.getData().length);
    assertArrayEquals(out1.getData(), out2.getData());
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) Random(java.util.Random) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer)

Example 85 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hive by apache.

the class ContainerRunnerImpl method submitWork.

@Override
public SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws IOException {
    LlapTokenInfo tokenInfo = null;
    try {
        tokenInfo = LlapTokenChecker.getTokenInfo(clusterId);
    } catch (SecurityException ex) {
        logSecurityErrorRarely(null);
        throw ex;
    }
    SignableVertexSpec vertex = extractVertexSpec(request, tokenInfo);
    TezEvent initialEvent = extractInitialEvent(request, tokenInfo);
    TezTaskAttemptID attemptId = Converters.createTaskAttemptId(vertex.getQueryIdentifier(), vertex.getVertexIndex(), request.getFragmentNumber(), request.getAttemptNumber());
    String fragmentIdString = attemptId.toString();
    if (LOG.isInfoEnabled()) {
        LOG.info("Queueing container for execution: fragemendId={}, {}", fragmentIdString, stringifySubmitRequest(request, vertex));
    }
    QueryIdentifierProto qIdProto = vertex.getQueryIdentifier();
    HistoryLogger.logFragmentStart(qIdProto.getApplicationIdString(), request.getContainerIdString(), localAddress.get().getHostName(), constructUniqueQueryId(vertex.getHiveQueryId(), qIdProto.getDagIndex()), qIdProto.getDagIndex(), vertex.getVertexName(), request.getFragmentNumber(), request.getAttemptNumber());
    // This is the start of container-annotated logging.
    final String dagId = attemptId.getTaskID().getVertexID().getDAGId().toString();
    final String queryId = vertex.getHiveQueryId();
    final String fragmentId = LlapTezUtils.stripAttemptPrefix(fragmentIdString);
    MDC.put("dagId", dagId);
    MDC.put("queryId", queryId);
    MDC.put("fragmentId", fragmentId);
    // TODO: Ideally we want tez to use CallableWithMdc that retains the MDC for threads created in
    // thread pool. For now, we will push both dagId and queryId into NDC and the custom thread
    // pool that we use for task execution and llap io (StatsRecordingThreadPool) will pop them
    // using reflection and update the MDC.
    NDC.push(dagId);
    NDC.push(queryId);
    NDC.push(fragmentId);
    Scheduler.SubmissionState submissionState;
    SubmitWorkResponseProto.Builder responseBuilder = SubmitWorkResponseProto.newBuilder();
    try {
        Map<String, String> env = new HashMap<>();
        // TODO What else is required in this environment map.
        env.putAll(localEnv);
        env.put(ApplicationConstants.Environment.USER.name(), vertex.getUser());
        TezTaskAttemptID taskAttemptId = TezTaskAttemptID.fromString(fragmentIdString);
        int dagIdentifier = taskAttemptId.getTaskID().getVertexID().getDAGId().getId();
        QueryIdentifier queryIdentifier = new QueryIdentifier(qIdProto.getApplicationIdString(), dagIdentifier);
        Credentials credentials = new Credentials();
        DataInputBuffer dib = new DataInputBuffer();
        byte[] tokenBytes = request.getCredentialsBinary().toByteArray();
        dib.reset(tokenBytes, tokenBytes.length);
        credentials.readTokenStorageStream(dib);
        Token<JobTokenIdentifier> jobToken = TokenCache.getSessionToken(credentials);
        LlapNodeId amNodeId = LlapNodeId.getInstance(request.getAmHost(), request.getAmPort());
        QueryFragmentInfo fragmentInfo = queryTracker.registerFragment(queryIdentifier, qIdProto.getApplicationIdString(), dagId, vertex.getDagName(), vertex.getHiveQueryId(), dagIdentifier, vertex.getVertexName(), request.getFragmentNumber(), request.getAttemptNumber(), vertex.getUser(), vertex, jobToken, fragmentIdString, tokenInfo, amNodeId);
        String[] localDirs = fragmentInfo.getLocalDirs();
        Preconditions.checkNotNull(localDirs);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Dirs are: " + Arrays.toString(localDirs));
        }
        // May need to setup localDir for re-localization, which is usually setup as Environment.PWD.
        // Used for re-localization, to add the user specified configuration (conf_pb_binary_stream)
        Configuration callableConf = new Configuration(getConfig());
        UserGroupInformation fsTaskUgi = fsUgiFactory == null ? null : fsUgiFactory.createUgi();
        boolean isGuaranteed = request.hasIsGuaranteed() && request.getIsGuaranteed();
        WmFragmentCounters wmCounters = new WmFragmentCounters(FragmentCountersMap.getCountersForFragment(fragmentId));
        TaskRunnerCallable callable = new TaskRunnerCallable(request, fragmentInfo, callableConf, new ExecutionContextImpl(localAddress.get().getHostName()), env, credentials, memoryPerExecutor, amReporter, confParams, metrics, killedTaskHandler, this, tezHadoopShim, attemptId, vertex, initialEvent, fsTaskUgi, completionListener, socketFactory, isGuaranteed, wmCounters);
        submissionState = executorService.schedule(callable);
        if (LOG.isInfoEnabled()) {
            LOG.info("SubmissionState for {} : {} ", fragmentIdString, submissionState);
        }
        if (submissionState.equals(Scheduler.SubmissionState.REJECTED)) {
            // Stop tracking the fragment and re-throw the error.
            fragmentComplete(fragmentInfo);
            return responseBuilder.setSubmissionState(SubmissionStateProto.valueOf(submissionState.name())).build();
        }
        if (metrics != null) {
            metrics.incrExecutorTotalRequestsHandled();
        }
    } finally {
        MDC.clear();
        NDC.clear();
    }
    return responseBuilder.setUniqueNodeId(daemonId.getUniqueNodeIdInCluster()).setSubmissionState(SubmissionStateProto.valueOf(submissionState.name())).build();
}
Also used : LlapTokenInfo(org.apache.hadoop.hive.llap.daemon.impl.LlapTokenChecker.LlapTokenInfo) Configuration(org.apache.hadoop.conf.Configuration) TezConfiguration(org.apache.tez.dag.api.TezConfiguration) HashMap(java.util.HashMap) ByteString(com.google.protobuf.ByteString) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) WmFragmentCounters(org.apache.hadoop.hive.llap.counters.WmFragmentCounters) ExecutionContextImpl(org.apache.tez.runtime.api.impl.ExecutionContextImpl) JobTokenIdentifier(org.apache.tez.common.security.JobTokenIdentifier) LlapNodeId(org.apache.hadoop.hive.llap.LlapNodeId) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) SignableVertexSpec(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec) QueryIdentifierProto(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto) SubmitWorkResponseProto(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto) NotTezEvent(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent) TezEvent(org.apache.tez.runtime.api.impl.TezEvent) Credentials(org.apache.hadoop.security.Credentials) TezTaskAttemptID(org.apache.tez.dag.records.TezTaskAttemptID)

Aggregations

DataInputBuffer (org.apache.hadoop.io.DataInputBuffer)112 Test (org.junit.Test)49 DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)45 IOException (java.io.IOException)24 Text (org.apache.hadoop.io.Text)20 Path (org.apache.hadoop.fs.Path)16 Configuration (org.apache.hadoop.conf.Configuration)13 IntWritable (org.apache.hadoop.io.IntWritable)11 Random (java.util.Random)10 DataInputStream (java.io.DataInputStream)9 BufferedInputStream (java.io.BufferedInputStream)8 HashMap (java.util.HashMap)8 DataOutputStream (java.io.DataOutputStream)6 LongWritable (org.apache.hadoop.io.LongWritable)6 SerializationFactory (org.apache.hadoop.io.serializer.SerializationFactory)6 IFile (org.apache.tez.runtime.library.common.sort.impl.IFile)6 BufferedOutputStream (java.io.BufferedOutputStream)5 BytesWritable (org.apache.hadoop.io.BytesWritable)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 Credentials (org.apache.hadoop.security.Credentials)4