Search in sources :

Example 26 with BytesRefArrayWritable

use of org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable in project hive by apache.

the class PerformTestRCFileAndSeqFile method writeSeqenceFileTest.

private void writeSeqenceFileTest(FileSystem fs, int rowCount, Path file, int columnNum, CompressionCodec codec) throws IOException {
    byte[][] columnRandom;
    resetRandomGenerators();
    BytesRefArrayWritable bytes = new BytesRefArrayWritable(columnNum);
    columnRandom = new byte[columnNum][];
    for (int i = 0; i < columnNum; i++) {
        BytesRefWritable cu = new BytesRefWritable();
        bytes.set(i, cu);
    }
    // zero length key is not allowed by block compress writer, so we use a byte
    // writable
    ByteWritable key = new ByteWritable();
    SequenceFile.Writer seqWriter = SequenceFile.createWriter(fs, conf, file, ByteWritable.class, BytesRefArrayWritable.class, CompressionType.BLOCK, codec);
    for (int i = 0; i < rowCount; i++) {
        nextRandomRow(columnRandom, bytes);
        seqWriter.append(key, bytes);
    }
    seqWriter.close();
}
Also used : BytesRefArrayWritable(org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable) SequenceFile(org.apache.hadoop.io.SequenceFile) ByteWritable(org.apache.hadoop.hive.serde2.io.ByteWritable) BytesRefWritable(org.apache.hadoop.hive.serde2.columnar.BytesRefWritable)

Example 27 with BytesRefArrayWritable

use of org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable in project hive by apache.

the class RCFileMapReduceOutputFormat method getRecordWriter.

/* (non-Javadoc)
  * @see org.apache.hadoop.mapreduce.lib.output.FileOutputFormat#getRecordWriter(org.apache.hadoop.mapreduce.TaskAttemptContext)
  */
@Override
public org.apache.hadoop.mapreduce.RecordWriter<WritableComparable<?>, BytesRefArrayWritable> getRecordWriter(TaskAttemptContext task) throws IOException, InterruptedException {
    // FileOutputFormat.getWorkOutputPath takes TaskInputOutputContext instead of
    // TaskAttemptContext, so can't use that here
    FileOutputCommitter committer = (FileOutputCommitter) getOutputCommitter(task);
    Path outputPath = committer.getWorkPath();
    FileSystem fs = outputPath.getFileSystem(task.getConfiguration());
    if (!fs.exists(outputPath)) {
        fs.mkdirs(outputPath);
    }
    Path file = getDefaultWorkFile(task, "");
    CompressionCodec codec = null;
    if (getCompressOutput(task)) {
        Class<?> codecClass = getOutputCompressorClass(task, DefaultCodec.class);
        codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, task.getConfiguration());
    }
    final RCFile.Writer out = new RCFile.Writer(fs, task.getConfiguration(), file, task, codec);
    return new RecordWriter<WritableComparable<?>, BytesRefArrayWritable>() {

        /* (non-Javadoc)
      * @see org.apache.hadoop.mapreduce.RecordWriter#write(java.lang.Object, java.lang.Object)
      */
        @Override
        public void write(WritableComparable<?> key, BytesRefArrayWritable value) throws IOException {
            out.append(value);
        }

        /* (non-Javadoc)
      * @see org.apache.hadoop.mapreduce.RecordWriter#close(org.apache.hadoop.mapreduce.TaskAttemptContext)
      */
        @Override
        public void close(TaskAttemptContext task) throws IOException, InterruptedException {
            out.close();
        }
    };
}
Also used : Path(org.apache.hadoop.fs.Path) RCFile(org.apache.hadoop.hive.ql.io.RCFile) RecordWriter(org.apache.hadoop.mapreduce.RecordWriter) BytesRefArrayWritable(org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable) WritableComparable(org.apache.hadoop.io.WritableComparable) FileOutputCommitter(org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter) FileSystem(org.apache.hadoop.fs.FileSystem) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) CompressionCodec(org.apache.hadoop.io.compress.CompressionCodec) RecordWriter(org.apache.hadoop.mapreduce.RecordWriter)

Example 28 with BytesRefArrayWritable

use of org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable in project hive by apache.

the class RCFileMapReduceRecordReader method initialize.

@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
    FileSplit fSplit = (FileSplit) split;
    Path path = fSplit.getPath();
    Configuration conf = context.getConfiguration();
    this.in = new RCFile.Reader(path.getFileSystem(conf), path, conf);
    this.end = fSplit.getStart() + fSplit.getLength();
    if (fSplit.getStart() > in.getPosition()) {
        in.sync(fSplit.getStart());
    }
    this.start = in.getPosition();
    more = start < end;
    key = new LongWritable();
    value = new BytesRefArrayWritable();
}
Also used : Path(org.apache.hadoop.fs.Path) RCFile(org.apache.hadoop.hive.ql.io.RCFile) Configuration(org.apache.hadoop.conf.Configuration) BytesRefArrayWritable(org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable) Reader(org.apache.hadoop.hive.ql.io.RCFile.Reader) LongWritable(org.apache.hadoop.io.LongWritable) FileSplit(org.apache.hadoop.mapreduce.lib.input.FileSplit)

Example 29 with BytesRefArrayWritable

use of org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable in project hive by apache.

the class RCFileCat method run.

@Override
public int run(String[] args) throws Exception {
    long start = 0l;
    long length = -1l;
    int recordCount = 0;
    long startT = System.currentTimeMillis();
    boolean verbose = false;
    boolean columnSizes = false;
    boolean pretty = false;
    boolean fileSizes = false;
    // get options from arguments
    if (args.length < 1 || args.length > 3) {
        printUsage(null);
        return -1;
    }
    Path fileName = null;
    for (int i = 0; i < args.length; i++) {
        String arg = args[i];
        if (arg.startsWith("--start=")) {
            start = Long.parseLong(arg.substring("--start=".length()));
        } else if (arg.startsWith("--length=")) {
            length = Long.parseLong(arg.substring("--length=".length()));
        } else if (arg.equals("--verbose")) {
            verbose = true;
        } else if (arg.equals("--column-sizes")) {
            columnSizes = true;
        } else if (arg.equals("--column-sizes-pretty")) {
            columnSizes = true;
            pretty = true;
        } else if (arg.equals("--file-sizes")) {
            fileSizes = true;
        } else if (fileName == null) {
            fileName = new Path(arg);
        } else {
            printUsage(null);
            return -1;
        }
    }
    setupBufferedOutput();
    FileSystem fs = FileSystem.get(fileName.toUri(), conf);
    long fileLen = fs.getFileStatus(fileName).getLen();
    if (start < 0) {
        start = 0;
    }
    if (start > fileLen) {
        return 0;
    }
    if (length < 0 || (start + length) > fileLen) {
        length = fileLen - start;
    }
    // share the code with RecordReader.
    FileSplit split = new FileSplit(fileName, start, length, new JobConf(conf));
    RCFileRecordReader recordReader = new RCFileRecordReader(conf, split);
    if (columnSizes || fileSizes) {
        // Print out the un/compressed sizes of each column
        long[] compressedColumnSizes = null;
        long[] uncompressedColumnSizes = null;
        // un/compressed sizes of file and no. of rows
        long rowNo = 0;
        long uncompressedFileSize = 0;
        long compressedFileSize = 0;
        // Skip from block to block since we only need the header
        while (recordReader.nextBlock()) {
            // Get the sizes from the key buffer and aggregate
            KeyBuffer keyBuffer = recordReader.getKeyBuffer();
            if (uncompressedColumnSizes == null) {
                uncompressedColumnSizes = new long[keyBuffer.getColumnNumber()];
            }
            if (compressedColumnSizes == null) {
                compressedColumnSizes = new long[keyBuffer.getColumnNumber()];
            }
            for (int i = 0; i < keyBuffer.getColumnNumber(); i++) {
                uncompressedColumnSizes[i] += keyBuffer.getEachColumnUncompressedValueLen()[i];
                compressedColumnSizes[i] += keyBuffer.getEachColumnValueLen()[i];
            }
            rowNo += keyBuffer.getNumberRows();
        }
        if (columnSizes && uncompressedColumnSizes != null && compressedColumnSizes != null) {
            // otherwise print it out as if it were a row
            for (int i = 0; i < uncompressedColumnSizes.length; i++) {
                if (pretty) {
                    System.out.println("Column " + i + ": Uncompressed size: " + uncompressedColumnSizes[i] + " Compressed size: " + compressedColumnSizes[i]);
                } else {
                    System.out.print(i + TAB + uncompressedColumnSizes[i] + TAB + compressedColumnSizes[i] + NEWLINE);
                }
            }
        }
        if (fileSizes) {
            if (uncompressedColumnSizes != null && compressedColumnSizes != null) {
                for (int i = 0; i < uncompressedColumnSizes.length; i++) {
                    uncompressedFileSize += uncompressedColumnSizes[i];
                    compressedFileSize += compressedColumnSizes[i];
                }
            }
            System.out.print("File size (uncompressed): " + uncompressedFileSize + ". File size (compressed): " + compressedFileSize + ". Number of rows: " + rowNo + "." + NEWLINE);
        }
        System.out.flush();
        return 0;
    }
    LongWritable key = new LongWritable();
    BytesRefArrayWritable value = new BytesRefArrayWritable();
    // extra capacity in case we overrun, to avoid resizing
    StringBuilder buf = new StringBuilder(STRING_BUFFER_SIZE);
    while (recordReader.next(key, value)) {
        printRecord(value, buf);
        recordCount++;
        if (verbose && (recordCount % RECORD_PRINT_INTERVAL) == 0) {
            long now = System.currentTimeMillis();
            System.err.println("Read " + recordCount / 1024 + "k records");
            System.err.println("Read " + ((recordReader.getPos() / (1024L * 1024L))) + "MB");
            System.err.printf("Input scan rate %.2f MB/s\n", (recordReader.getPos() * 1.0 / (now - startT)) / 1024.0);
        }
        if (buf.length() > STRING_BUFFER_FLUSH_SIZE) {
            System.out.print(buf.toString());
            buf.setLength(0);
        }
    }
    // print out last part of buffer
    System.out.print(buf.toString());
    System.out.flush();
    return 0;
}
Also used : Path(org.apache.hadoop.fs.Path) BytesRefArrayWritable(org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable) RCFileRecordReader(org.apache.hadoop.hive.ql.io.RCFileRecordReader) KeyBuffer(org.apache.hadoop.hive.ql.io.RCFile.KeyBuffer) FileSplit(org.apache.hadoop.mapred.FileSplit) FileSystem(org.apache.hadoop.fs.FileSystem) LongWritable(org.apache.hadoop.io.LongWritable) JobConf(org.apache.hadoop.mapred.JobConf)

Example 30 with BytesRefArrayWritable

use of org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable in project hive by apache.

the class TestRCFileCat method write.

private void write(RCFile.Writer writer, byte[][] record) throws IOException {
    BytesRefArrayWritable bytes = new BytesRefArrayWritable(record.length);
    for (int i = 0; i < record.length; i++) {
        BytesRefWritable cu = new BytesRefWritable(record[i], 0, record[i].length);
        bytes.set(i, cu);
    }
    writer.append(bytes);
}
Also used : BytesRefArrayWritable(org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable) BytesRefWritable(org.apache.hadoop.hive.serde2.columnar.BytesRefWritable)

Aggregations

BytesRefArrayWritable (org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable)28 BytesRefWritable (org.apache.hadoop.hive.serde2.columnar.BytesRefWritable)17 Configuration (org.apache.hadoop.conf.Configuration)13 LongWritable (org.apache.hadoop.io.LongWritable)12 Path (org.apache.hadoop.fs.Path)11 Test (org.junit.Test)11 StructObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector)10 Properties (java.util.Properties)7 RecordReader (org.apache.hadoop.mapred.RecordReader)7 DefaultCodec (org.apache.hadoop.io.compress.DefaultCodec)5 FileSystem (org.apache.hadoop.fs.FileSystem)4 RCFile (org.apache.hadoop.hive.ql.io.RCFile)4 ObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector)4 JobConf (org.apache.hadoop.mapred.JobConf)4 IOException (java.io.IOException)3 ColumnarSerDe (org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe)3 ByteWritable (org.apache.hadoop.hive.serde2.io.ByteWritable)3 SimpleMapEqualComparer (org.apache.hadoop.hive.serde2.objectinspector.SimpleMapEqualComparer)3 CompressionCodec (org.apache.hadoop.io.compress.CompressionCodec)3 Random (java.util.Random)2