Search in sources :

Example 56 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.

the class QuorumOutputStream method flushAndSync.

@Override
protected void flushAndSync(boolean durable) throws IOException {
    int numReadyBytes = buf.countReadyBytes();
    if (numReadyBytes > 0) {
        int numReadyTxns = buf.countReadyTxns();
        long firstTxToFlush = buf.getFirstReadyTxId();
        assert numReadyTxns > 0;
        // Copy from our double-buffer into a new byte array. This is for
        // two reasons:
        // 1) The IPC code has no way of specifying to send only a slice of
        //    a larger array.
        // 2) because the calls to the underlying nodes are asynchronous, we
        //    need a defensive copy to avoid accidentally mutating the buffer
        //    before it is sent.
        DataOutputBuffer bufToSend = new DataOutputBuffer(numReadyBytes);
        buf.flushTo(bufToSend);
        assert bufToSend.getLength() == numReadyBytes;
        byte[] data = bufToSend.getData();
        assert data.length == bufToSend.getLength();
        QuorumCall<AsyncLogger, Void> qcall = loggers.sendEdits(segmentTxId, firstTxToFlush, numReadyTxns, data);
        loggers.waitForWriteQuorum(qcall, writeTimeoutMs, "sendEdits");
        // Since we successfully wrote this batch, let the loggers know. Any future
        // RPCs will thus let the loggers know of the most recent transaction, even
        // if a logger has fallen behind.
        loggers.setCommittedTxId(firstTxToFlush + numReadyTxns - 1);
        // stale on the Journal Nodes
        if (updateCommittedTxId) {
            QuorumCall<AsyncLogger, Void> fakeCall = loggers.sendEdits(segmentTxId, firstTxToFlush, 0, new byte[0]);
            loggers.waitForWriteQuorum(fakeCall, writeTimeoutMs, "sendEdits");
        }
    }
}
Also used : DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer)

Example 57 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project hbase by apache.

the class TestCompressionTest method nativeCodecTest.

/**
   * Verify CompressionTest.testCompression() on a native codec.
   */
private void nativeCodecTest(String codecName, String libName, String codecClassName) {
    if (isCompressionAvailable(codecClassName)) {
        try {
            if (libName != null) {
                System.loadLibrary(libName);
            }
            try {
                Configuration conf = new Configuration();
                CompressionCodec codec = (CompressionCodec) ReflectionUtils.newInstance(conf.getClassByName(codecClassName), conf);
                DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
                CompressionOutputStream deflateFilter = codec.createOutputStream(compressedDataBuffer);
                byte[] data = new byte[1024];
                DataOutputStream deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter));
                deflateOut.write(data, 0, data.length);
                deflateOut.flush();
                deflateFilter.finish();
                // Codec class, codec nativelib and Hadoop nativelib with codec JNIs are present
                assertTrue(CompressionTest.testCompression(codecName));
            } catch (UnsatisfiedLinkError e) {
                // Hadoop nativelib does not have codec JNIs.
                // cannot assert the codec here because the current logic of
                // CompressionTest checks only classloading, not the codec
                // usage.
                LOG.debug("No JNI for codec '" + codecName + "' " + e.getMessage());
            } catch (Exception e) {
                LOG.error(codecName, e);
            }
        } catch (UnsatisfiedLinkError e) {
            // nativelib is not available
            LOG.debug("Native lib not available: " + codecName);
            assertFalse(CompressionTest.testCompression(codecName));
        }
    } else {
        // Compression Codec class is not available
        LOG.debug("Codec class not available: " + codecName);
        assertFalse(CompressionTest.testCompression(codecName));
    }
}
Also used : CompressionOutputStream(org.apache.hadoop.io.compress.CompressionOutputStream) Configuration(org.apache.hadoop.conf.Configuration) DataOutputStream(java.io.DataOutputStream) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) CompressionCodec(org.apache.hadoop.io.compress.CompressionCodec) BufferedOutputStream(java.io.BufferedOutputStream) IOException(java.io.IOException)

Example 58 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.

the class TestBlockToken method testCraftedProtobufBlockTokenIdentifier.

public void testCraftedProtobufBlockTokenIdentifier(BlockTokenIdentifier identifier, boolean expectIOE, boolean expectRTE) throws IOException {
    DataOutputBuffer dob = new DataOutputBuffer(4096);
    DataInputBuffer dib = new DataInputBuffer();
    identifier.writeProtobuf(dob);
    byte[] identBytes = Arrays.copyOf(dob.getData(), dob.getLength());
    BlockTokenIdentifier legacyToken = new BlockTokenIdentifier();
    BlockTokenIdentifier protobufToken = new BlockTokenIdentifier();
    BlockTokenIdentifier readToken = new BlockTokenIdentifier();
    boolean invalidLegacyMessage = false;
    try {
        dib.reset(identBytes, identBytes.length);
        legacyToken.readFieldsLegacy(dib);
    } catch (IOException e) {
        if (!expectIOE) {
            fail("Received IOException but it was not expected.");
        }
        invalidLegacyMessage = true;
    } catch (RuntimeException e) {
        if (!expectRTE) {
            fail("Received RuntimeException but it was not expected.");
        }
        invalidLegacyMessage = true;
    }
    assertTrue(invalidLegacyMessage);
    dib.reset(identBytes, identBytes.length);
    protobufToken.readFieldsProtobuf(dib);
    dib.reset(identBytes, identBytes.length);
    readToken.readFieldsProtobuf(dib);
    assertEquals(protobufToken, readToken);
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) IOException(java.io.IOException)

Example 59 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.

the class QJMTestUtil method createTxnData.

public static byte[] createTxnData(int startTxn, int numTxns) throws Exception {
    DataOutputBuffer buf = new DataOutputBuffer();
    FSEditLogOp.Writer writer = new FSEditLogOp.Writer(buf);
    for (long txid = startTxn; txid < startTxn + numTxns; txid++) {
        FSEditLogOp op = NameNodeAdapter.createMkdirOp("tx " + txid);
        op.setTransactionId(txid);
        writer.writeOp(op);
    }
    return Arrays.copyOf(buf.getData(), buf.getLength());
}
Also used : DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) FSEditLogOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp)

Example 60 with DataOutputBuffer

use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.

the class QJMTestUtil method createGabageTxns.

/**
   * Generate byte array representing a set of GarbageMkdirOp
   */
public static byte[] createGabageTxns(long startTxId, int numTxns) throws IOException {
    DataOutputBuffer buf = new DataOutputBuffer();
    FSEditLogOp.Writer writer = new FSEditLogOp.Writer(buf);
    for (long txid = startTxId; txid < startTxId + numTxns; txid++) {
        FSEditLogOp op = new TestEditLog.GarbageMkdirOp();
        op.setTransactionId(txid);
        writer.writeOp(op);
    }
    return Arrays.copyOf(buf.getData(), buf.getLength());
}
Also used : DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) FSEditLogOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp)

Aggregations

DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)132 Test (org.junit.Test)48 Credentials (org.apache.hadoop.security.Credentials)37 ByteBuffer (java.nio.ByteBuffer)36 DataInputBuffer (org.apache.hadoop.io.DataInputBuffer)36 IOException (java.io.IOException)34 Configuration (org.apache.hadoop.conf.Configuration)25 Token (org.apache.hadoop.security.token.Token)25 Path (org.apache.hadoop.fs.Path)21 HashMap (java.util.HashMap)20 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)20 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)18 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)16 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)16 Random (java.util.Random)15 DataInputStream (java.io.DataInputStream)14 Text (org.apache.hadoop.io.Text)14 ArrayList (java.util.ArrayList)13 Map (java.util.Map)10 FileSystem (org.apache.hadoop.fs.FileSystem)10