Search in sources :

Example 11 with DataOutputStream

use of java.io.DataOutputStream in project kafka by apache.

the class SimpleRecordTest method testCompressedIterationWithNullValue.

@Test(expected = InvalidRecordException.class)
public void testCompressedIterationWithNullValue() throws Exception {
    ByteBuffer buffer = ByteBuffer.allocate(128);
    DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buffer));
    LogEntry.writeHeader(out, 0L, Record.RECORD_OVERHEAD_V1);
    Record.write(out, Record.CURRENT_MAGIC_VALUE, 1L, null, null, CompressionType.GZIP, TimestampType.CREATE_TIME);
    buffer.flip();
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    for (Record record : records.records()) fail("Iteration should have caused invalid record error");
}
Also used : ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) DataOutputStream(java.io.DataOutputStream) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 12 with DataOutputStream

use of java.io.DataOutputStream in project hbase by apache.

the class TestChecksum method testAllChecksumTypes.

/**
   * Test all checksum types by writing and reading back blocks.
   */
@Test
public void testAllChecksumTypes() throws IOException {
    List<ChecksumType> cktypes = new ArrayList<>(Arrays.asList(ChecksumType.values()));
    for (Iterator<ChecksumType> itr = cktypes.iterator(); itr.hasNext(); ) {
        ChecksumType cktype = itr.next();
        Path path = new Path(TEST_UTIL.getDataTestDir(), "checksum" + cktype.getName());
        FSDataOutputStream os = fs.create(path);
        HFileContext meta = new HFileContextBuilder().withChecksumType(cktype).build();
        HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
        DataOutputStream dos = hbw.startWriting(BlockType.DATA);
        for (int i = 0; i < 1000; ++i) {
            dos.writeInt(i);
        }
        hbw.writeHeaderAndData(os);
        int totalSize = hbw.getOnDiskSizeWithHeader();
        os.close();
        // Use hbase checksums.
        assertEquals(true, hfs.useHBaseChecksum());
        FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
        meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(is, totalSize, (HFileSystem) fs, path, meta);
        HFileBlock b = hbr.readBlockData(0, -1, false);
        ByteBuff data = b.getBufferWithoutHeader();
        for (int i = 0; i < 1000; i++) {
            assertEquals(i, data.getInt());
        }
        boolean exception_thrown = false;
        try {
            data.getInt();
        } catch (BufferUnderflowException e) {
            exception_thrown = true;
        }
        assertTrue(exception_thrown);
        assertEquals(0, HFile.getChecksumFailuresCount());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) ArrayList(java.util.ArrayList) ChecksumType(org.apache.hadoop.hbase.util.ChecksumType) FSDataInputStreamWrapper(org.apache.hadoop.hbase.io.FSDataInputStreamWrapper) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) BufferUnderflowException(java.nio.BufferUnderflowException) Test(org.junit.Test)

Example 13 with DataOutputStream

use of java.io.DataOutputStream in project hbase by apache.

the class TestPrefixTreeEncoding method testSeekBeforeWithFixedData.

@Test
public void testSeekBeforeWithFixedData() throws Exception {
    formatRowNum = true;
    PrefixTreeCodec encoder = new PrefixTreeCodec();
    int batchId = numBatchesWritten++;
    HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(false).withIncludesTags(includesTag).withCompression(Algorithm.NONE).build();
    HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
    ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
    DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
    generateFixedTestData(kvset, batchId, false, includesTag, encoder, blkEncodingCtx, userDataStream);
    EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta));
    byte[] onDiskBytes = baosInMemory.toByteArray();
    ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE, onDiskBytes.length - DataBlockEncoding.ID_SIZE);
    seeker.setCurrentBuffer(new SingleByteBuff(readBuffer));
    // Seek before the first keyvalue;
    Cell seekKey = CellUtil.createFirstDeleteFamilyCellOnRow(getRowKey(batchId, 0), CF_BYTES);
    seeker.seekToKeyInBlock(seekKey, true);
    assertEquals(null, seeker.getCell());
    // Seek before the middle keyvalue;
    seekKey = CellUtil.createFirstDeleteFamilyCellOnRow(getRowKey(batchId, NUM_ROWS_PER_BATCH / 3), CF_BYTES);
    seeker.seekToKeyInBlock(seekKey, true);
    assertNotNull(seeker.getCell());
    assertArrayEquals(getRowKey(batchId, NUM_ROWS_PER_BATCH / 3 - 1), CellUtil.cloneRow(seeker.getCell()));
    // Seek before the last keyvalue;
    seekKey = CellUtil.createFirstDeleteFamilyCellOnRow(Bytes.toBytes("zzzz"), CF_BYTES);
    seeker.seekToKeyInBlock(seekKey, true);
    assertNotNull(seeker.getCell());
    assertArrayEquals(getRowKey(batchId, NUM_ROWS_PER_BATCH - 1), CellUtil.cloneRow(seeker.getCell()));
}
Also used : EncodedSeeker(org.apache.hadoop.hbase.io.encoding.DataBlockEncoder.EncodedSeeker) DataOutputStream(java.io.DataOutputStream) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ByteBuffer(java.nio.ByteBuffer) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) PrefixTreeCodec(org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeCodec) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 14 with DataOutputStream

use of java.io.DataOutputStream in project hbase by apache.

the class TestHFileDataBlockEncoder method writeBlock.

private void writeBlock(List<Cell> kvs, HFileContext fileContext, boolean useTags) throws IOException {
    HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER);
    DataOutputStream dos = new DataOutputStream(baos);
    blockEncoder.startBlockEncoding(context, dos);
    for (Cell kv : kvs) {
        blockEncoder.encode(kv, context, dos);
    }
}
Also used : DataOutputStream(java.io.DataOutputStream) HFileBlockDefaultEncodingContext(org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext) HFileBlockEncodingContext(org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext) ByteArrayOutputStream(org.apache.hadoop.hbase.io.ByteArrayOutputStream) Cell(org.apache.hadoop.hbase.Cell)

Example 15 with DataOutputStream

use of java.io.DataOutputStream in project hbase by apache.

the class TestHFileDataBlockEncoder method createBlockOnDisk.

private HFileBlock createBlockOnDisk(List<KeyValue> kvs, HFileBlock block, boolean useTags) throws IOException {
    int size;
    HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER, block.getHFileContext());
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    baos.write(block.getDummyHeaderForVersion());
    DataOutputStream dos = new DataOutputStream(baos);
    blockEncoder.startBlockEncoding(context, dos);
    for (KeyValue kv : kvs) {
        blockEncoder.encode(kv, context, dos);
    }
    blockEncoder.endBlockEncoding(context, dos, baos.getBuffer(), BlockType.DATA);
    byte[] encodedBytes = baos.toByteArray();
    size = encodedBytes.length - block.getDummyHeaderForVersion().length;
    return new HFileBlock(context.getBlockType(), size, size, -1, ByteBuffer.wrap(encodedBytes), HFileBlock.FILL_HEADER, 0, block.getOnDiskDataSizeWithHeader(), -1, block.getHFileContext());
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) DataOutputStream(java.io.DataOutputStream) HFileBlockDefaultEncodingContext(org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext) HFileBlockEncodingContext(org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext) ByteArrayOutputStream(org.apache.hadoop.hbase.io.ByteArrayOutputStream)

Aggregations

DataOutputStream (java.io.DataOutputStream)2957 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1309 IOException (java.io.IOException)1019 Test (org.junit.Test)633 DataInputStream (java.io.DataInputStream)611 FileOutputStream (java.io.FileOutputStream)426 ByteArrayInputStream (java.io.ByteArrayInputStream)409 File (java.io.File)279 BufferedOutputStream (java.io.BufferedOutputStream)227 UnitTest (org.apache.geode.test.junit.categories.UnitTest)172 URL (java.net.URL)149 InputStreamReader (java.io.InputStreamReader)144 BufferedReader (java.io.BufferedReader)140 Path (org.apache.hadoop.fs.Path)137 DataInput (java.io.DataInput)124 ArrayList (java.util.ArrayList)122 HttpURLConnection (java.net.HttpURLConnection)121 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)117 FileInputStream (java.io.FileInputStream)107 InputStream (java.io.InputStream)107