Search in sources :

Example 96 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project tez by apache.

the class TestIFile method testWritingEmptyKeyValues.

@Test(timeout = 5000)
public // Write empty key value pairs
void testWritingEmptyKeyValues() throws IOException {
    DataInputBuffer key = new DataInputBuffer();
    DataInputBuffer value = new DataInputBuffer();
    IFile.Writer writer = new IFile.Writer(defaultConf, localFs, outputPath, null, null, null, null, null);
    writer.append(key, value);
    writer.append(key, value);
    writer.append(key, value);
    writer.append(key, value);
    writer.close();
    IFile.Reader reader = new Reader(localFs, outputPath, null, null, null, false, -1, 1024);
    DataInputBuffer keyIn = new DataInputBuffer();
    DataInputBuffer valIn = new DataInputBuffer();
    int records = 0;
    while (reader.nextRawKey(keyIn)) {
        reader.nextRawValue(valIn);
        records++;
        assert (keyIn.getLength() == 0);
        assert (valIn.getLength() == 0);
    }
    assertTrue("Number of records read does not match", (records == 4));
    reader.close();
}
Also used : Reader(org.apache.tez.runtime.library.common.sort.impl.IFile.Reader) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) Reader(org.apache.tez.runtime.library.common.sort.impl.IFile.Reader) InMemoryReader(org.apache.tez.runtime.library.common.shuffle.orderedgrouped.InMemoryReader) Writer(org.apache.tez.runtime.library.common.sort.impl.IFile.Writer) Writer(org.apache.tez.runtime.library.common.sort.impl.IFile.Writer) InMemoryWriter(org.apache.tez.runtime.library.common.shuffle.orderedgrouped.InMemoryWriter) Test(org.junit.Test)

Example 97 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project systemml by apache.

the class MatrixBlock method readDenseBlock.

private void readDenseBlock(DataInput in) throws IOException, DMLRuntimeException {
    if (// allocate block
    !allocateDenseBlock(false))
        denseBlock.reset(rlen, clen);
    DenseBlock a = getDenseBlock();
    long nnz = 0;
    if (in instanceof MatrixBlockDataInput) {
        // fast deserialize
        MatrixBlockDataInput mbin = (MatrixBlockDataInput) in;
        for (int i = 0; i < a.numBlocks(); i++) nnz += mbin.readDoubleArray(a.size(i), a.valuesAt(i));
    } else if (in instanceof DataInputBuffer && MRJobConfiguration.USE_BINARYBLOCK_SERIALIZATION) {
        // workaround because sequencefile.reader.next(key, value) does not yet support serialization framework
        DataInputBuffer din = (DataInputBuffer) in;
        try (FastBufferedDataInputStream mbin = new FastBufferedDataInputStream(din)) {
            for (int i = 0; i < a.numBlocks(); i++) nnz += mbin.readDoubleArray(a.size(i), a.valuesAt(i));
        }
    } else {
        // default deserialize
        for (int i = 0; i < rlen; i++) {
            double[] avals = a.values(i);
            int aix = a.pos(i);
            for (int j = 0; j < clen; j++) nnz += ((avals[aix + j] = in.readDouble()) != 0) ? 1 : 0;
        }
    }
    nonZeros = nnz;
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) FastBufferedDataInputStream(org.apache.sysml.runtime.util.FastBufferedDataInputStream)

Example 98 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project accumulo by apache.

the class TabletStateChangeIterator method parseMigrations.

private Set<KeyExtent> parseMigrations(String migrations) {
    if (migrations == null)
        return Collections.emptySet();
    try {
        Set<KeyExtent> result = new HashSet<>();
        DataInputBuffer buffer = new DataInputBuffer();
        byte[] data = Base64.getDecoder().decode(migrations);
        buffer.reset(data, data.length);
        while (buffer.available() > 0) {
            KeyExtent extent = new KeyExtent();
            extent.readFields(buffer);
            result.add(extent);
        }
        return result;
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) IOException(java.io.IOException) BadLocationStateException(org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException) HashSet(java.util.HashSet)

Example 99 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project accumulo by apache.

the class MergeInfoTest method readWrite.

private static MergeInfo readWrite(MergeInfo info) throws Exception {
    DataOutputBuffer buffer = new DataOutputBuffer();
    info.write(buffer);
    DataInputBuffer in = new DataInputBuffer();
    in.reset(buffer.getData(), 0, buffer.getLength());
    MergeInfo info2 = new MergeInfo();
    info2.readFields(in);
    assertEquals(info.getExtent(), info2.getExtent());
    assertEquals(info.getState(), info2.getState());
    assertEquals(info.getOperation(), info2.getOperation());
    return info2;
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer)

Example 100 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project accumulo by apache.

the class MergeStats method main.

public static void main(String[] args) throws Exception {
    ClientOpts opts = new ClientOpts();
    opts.parseArgs(MergeStats.class.getName(), args);
    Connector conn = opts.getConnector();
    Map<String, String> tableIdMap = conn.tableOperations().tableIdMap();
    for (Entry<String, String> entry : tableIdMap.entrySet()) {
        final String table = entry.getKey(), tableId = entry.getValue();
        String path = ZooUtil.getRoot(conn.getInstance().getInstanceID()) + Constants.ZTABLES + "/" + tableId + "/merge";
        MergeInfo info = new MergeInfo();
        if (ZooReaderWriter.getInstance().exists(path)) {
            byte[] data = ZooReaderWriter.getInstance().getData(path, new Stat());
            DataInputBuffer in = new DataInputBuffer();
            in.reset(data, data.length);
            info.readFields(in);
        }
        System.out.println(String.format("%25s  %10s %10s %s", table, info.getState(), info.getOperation(), info.getExtent()));
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) MergeInfo(org.apache.accumulo.server.master.state.MergeInfo) Stat(org.apache.zookeeper.data.Stat) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) ClientOpts(org.apache.accumulo.server.cli.ClientOpts)

Aggregations

DataInputBuffer (org.apache.hadoop.io.DataInputBuffer)112 Test (org.junit.Test)49 DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)45 IOException (java.io.IOException)24 Text (org.apache.hadoop.io.Text)20 Path (org.apache.hadoop.fs.Path)16 Configuration (org.apache.hadoop.conf.Configuration)13 IntWritable (org.apache.hadoop.io.IntWritable)11 Random (java.util.Random)10 DataInputStream (java.io.DataInputStream)9 BufferedInputStream (java.io.BufferedInputStream)8 HashMap (java.util.HashMap)8 DataOutputStream (java.io.DataOutputStream)6 LongWritable (org.apache.hadoop.io.LongWritable)6 SerializationFactory (org.apache.hadoop.io.serializer.SerializationFactory)6 IFile (org.apache.tez.runtime.library.common.sort.impl.IFile)6 BufferedOutputStream (java.io.BufferedOutputStream)5 BytesWritable (org.apache.hadoop.io.BytesWritable)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 Credentials (org.apache.hadoop.security.Credentials)4