Search in sources :

Example 6 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class SerializationTestUtil method testSerialization.

/**
   * A utility that tests serialization/deserialization. 
   * @param conf configuration to use, "io.serializations" is read to 
   * determine the serialization
   * @param <K> the class of the item
   * @param before item to (de)serialize
   * @return deserialized item
   */
public static <K> K testSerialization(Configuration conf, K before) throws Exception {
    SerializationFactory factory = new SerializationFactory(conf);
    Serializer<K> serializer = factory.getSerializer(GenericsUtil.getClass(before));
    Deserializer<K> deserializer = factory.getDeserializer(GenericsUtil.getClass(before));
    DataOutputBuffer out = new DataOutputBuffer();
    serializer.open(out);
    serializer.serialize(before);
    serializer.close();
    DataInputBuffer in = new DataInputBuffer();
    in.reset(out.getData(), out.getLength());
    deserializer.open(in);
    K after = deserializer.deserialize(null);
    deserializer.close();
    return after;
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer)

Example 7 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestWritableSerialization method testWritableComparatorJavaSerialization.

@Test
@SuppressWarnings({ "rawtypes", "unchecked" })
public void testWritableComparatorJavaSerialization() throws Exception {
    Serialization ser = new JavaSerialization();
    Serializer<TestWC> serializer = ser.getSerializer(TestWC.class);
    DataOutputBuffer dob = new DataOutputBuffer();
    serializer.open(dob);
    TestWC orig = new TestWC(0);
    serializer.serialize(orig);
    serializer.close();
    Deserializer<TestWC> deserializer = ser.getDeserializer(TestWC.class);
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(dob.getData(), 0, dob.getLength());
    deserializer.open(dib);
    TestWC deser = deserializer.deserialize(null);
    deserializer.close();
    assertEquals(orig, deser);
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) Test(org.junit.Test)

Example 8 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestViewFsFileStatusHdfs method testFileStatusSerialziation.

@Test
public void testFileStatusSerialziation() throws IOException, URISyntaxException {
    long len = fileSystemTestHelper.createFile(fHdfs, testfilename);
    FileStatus stat = vfs.getFileStatus(new Path(testfilename));
    assertEquals(len, stat.getLen());
    // check serialization/deserialization
    DataOutputBuffer dob = new DataOutputBuffer();
    stat.write(dob);
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(dob.getData(), 0, dob.getLength());
    FileStatus deSer = new FileStatus();
    deSer.readFields(dib);
    assertEquals(len, deSer.getLen());
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) Test(org.junit.Test)

Example 9 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestLz4CompressorDecompressor method testCompressorDecopressorLogicWithCompressionStreams.

// test compress/decompress process through CompressionOutputStream/CompressionInputStream api 
@Test
public void testCompressorDecopressorLogicWithCompressionStreams() {
    DataOutputStream deflateOut = null;
    DataInputStream inflateIn = null;
    int BYTE_SIZE = 1024 * 100;
    byte[] bytes = generate(BYTE_SIZE);
    int bufferSize = 262144;
    int compressionOverhead = (bufferSize / 6) + 32;
    try {
        DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
        CompressionOutputStream deflateFilter = new BlockCompressorStream(compressedDataBuffer, new Lz4Compressor(bufferSize), bufferSize, compressionOverhead);
        deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter));
        deflateOut.write(bytes, 0, bytes.length);
        deflateOut.flush();
        deflateFilter.finish();
        DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
        deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0, compressedDataBuffer.getLength());
        CompressionInputStream inflateFilter = new BlockDecompressorStream(deCompressedDataBuffer, new Lz4Decompressor(bufferSize), bufferSize);
        inflateIn = new DataInputStream(new BufferedInputStream(inflateFilter));
        byte[] result = new byte[BYTE_SIZE];
        inflateIn.read(result);
        assertArrayEquals("original array not equals compress/decompressed array", result, bytes);
    } catch (IOException e) {
        fail("testLz4CompressorDecopressorLogicWithCompressionStreams ex error !!!");
    } finally {
        try {
            if (deflateOut != null)
                deflateOut.close();
            if (inflateIn != null)
                inflateIn.close();
        } catch (Exception e) {
        }
    }
}
Also used : CompressionOutputStream(org.apache.hadoop.io.compress.CompressionOutputStream) Lz4Compressor(org.apache.hadoop.io.compress.lz4.Lz4Compressor) CompressionInputStream(org.apache.hadoop.io.compress.CompressionInputStream) DataOutputStream(java.io.DataOutputStream) BlockDecompressorStream(org.apache.hadoop.io.compress.BlockDecompressorStream) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) IOException(java.io.IOException) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) Lz4Decompressor(org.apache.hadoop.io.compress.lz4.Lz4Decompressor) BlockCompressorStream(org.apache.hadoop.io.compress.BlockCompressorStream) BufferedInputStream(java.io.BufferedInputStream) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) BufferedOutputStream(java.io.BufferedOutputStream) Test(org.junit.Test)

Example 10 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestDelegationToken method testDelegationTokenIdentiferSerializationRoundTrip.

private boolean testDelegationTokenIdentiferSerializationRoundTrip(Text owner, Text renewer, Text realUser) throws IOException {
    TestDelegationTokenIdentifier dtid = new TestDelegationTokenIdentifier(owner, renewer, realUser);
    DataOutputBuffer out = new DataOutputBuffer();
    dtid.writeImpl(out);
    DataInputBuffer in = new DataInputBuffer();
    in.reset(out.getData(), out.getLength());
    try {
        TestDelegationTokenIdentifier dtid2 = new TestDelegationTokenIdentifier();
        dtid2.readFields(in);
        assertTrue(dtid.equals(dtid2));
        return true;
    } catch (IOException e) {
        return false;
    }
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) IOException(java.io.IOException)

Aggregations

DataInputBuffer (org.apache.hadoop.io.DataInputBuffer)68 Test (org.junit.Test)37 DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)36 IOException (java.io.IOException)16 Text (org.apache.hadoop.io.Text)10 BufferedInputStream (java.io.BufferedInputStream)8 DataInputStream (java.io.DataInputStream)8 Random (java.util.Random)8 Configuration (org.apache.hadoop.conf.Configuration)7 Path (org.apache.hadoop.fs.Path)7 DataOutputStream (java.io.DataOutputStream)6 BufferedOutputStream (java.io.BufferedOutputStream)5 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)5 FileSystem (org.apache.hadoop.fs.FileSystem)4 BytesWritable (org.apache.hadoop.io.BytesWritable)4 InputStream (java.io.InputStream)3 HashMap (java.util.HashMap)3 RandomDatum (org.apache.hadoop.io.RandomDatum)3 InvalidToken (org.apache.hadoop.security.token.SecretManager.InvalidToken)3 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)3