use of org.apache.hadoop.hive.ql.io.RCFileRecordReader in project hive by apache.
the class RCFileCat method run.
@Override
public int run(String[] args) throws Exception {
long start = 0l;
long length = -1l;
int recordCount = 0;
long startT = System.currentTimeMillis();
boolean verbose = false;
boolean columnSizes = false;
boolean pretty = false;
boolean fileSizes = false;
// get options from arguments
if (args.length < 1 || args.length > 3) {
printUsage(null);
return -1;
}
Path fileName = null;
for (int i = 0; i < args.length; i++) {
String arg = args[i];
if (arg.startsWith("--start=")) {
start = Long.parseLong(arg.substring("--start=".length()));
} else if (arg.startsWith("--length=")) {
length = Long.parseLong(arg.substring("--length=".length()));
} else if (arg.equals("--verbose")) {
verbose = true;
} else if (arg.equals("--column-sizes")) {
columnSizes = true;
} else if (arg.equals("--column-sizes-pretty")) {
columnSizes = true;
pretty = true;
} else if (arg.equals("--file-sizes")) {
fileSizes = true;
} else if (fileName == null) {
fileName = new Path(arg);
} else {
printUsage(null);
return -1;
}
}
setupBufferedOutput();
FileSystem fs = FileSystem.get(fileName.toUri(), conf);
long fileLen = fs.getFileStatus(fileName).getLen();
if (start < 0) {
start = 0;
}
if (start > fileLen) {
return 0;
}
if (length < 0 || (start + length) > fileLen) {
length = fileLen - start;
}
// share the code with RecordReader.
FileSplit split = new FileSplit(fileName, start, length, new JobConf(conf));
RCFileRecordReader recordReader = new RCFileRecordReader(conf, split);
if (columnSizes || fileSizes) {
// Print out the un/compressed sizes of each column
long[] compressedColumnSizes = null;
long[] uncompressedColumnSizes = null;
// un/compressed sizes of file and no. of rows
long rowNo = 0;
long uncompressedFileSize = 0;
long compressedFileSize = 0;
// Skip from block to block since we only need the header
while (recordReader.nextBlock()) {
// Get the sizes from the key buffer and aggregate
KeyBuffer keyBuffer = recordReader.getKeyBuffer();
if (uncompressedColumnSizes == null) {
uncompressedColumnSizes = new long[keyBuffer.getColumnNumber()];
}
if (compressedColumnSizes == null) {
compressedColumnSizes = new long[keyBuffer.getColumnNumber()];
}
for (int i = 0; i < keyBuffer.getColumnNumber(); i++) {
uncompressedColumnSizes[i] += keyBuffer.getEachColumnUncompressedValueLen()[i];
compressedColumnSizes[i] += keyBuffer.getEachColumnValueLen()[i];
}
rowNo += keyBuffer.getNumberRows();
}
if (columnSizes && uncompressedColumnSizes != null && compressedColumnSizes != null) {
// otherwise print it out as if it were a row
for (int i = 0; i < uncompressedColumnSizes.length; i++) {
if (pretty) {
System.out.println("Column " + i + ": Uncompressed size: " + uncompressedColumnSizes[i] + " Compressed size: " + compressedColumnSizes[i]);
} else {
System.out.print(i + TAB + uncompressedColumnSizes[i] + TAB + compressedColumnSizes[i] + NEWLINE);
}
}
}
if (fileSizes) {
if (uncompressedColumnSizes != null && compressedColumnSizes != null) {
for (int i = 0; i < uncompressedColumnSizes.length; i++) {
uncompressedFileSize += uncompressedColumnSizes[i];
compressedFileSize += compressedColumnSizes[i];
}
}
System.out.print("File size (uncompressed): " + uncompressedFileSize + ". File size (compressed): " + compressedFileSize + ". Number of rows: " + rowNo + "." + NEWLINE);
}
System.out.flush();
return 0;
}
LongWritable key = new LongWritable();
BytesRefArrayWritable value = new BytesRefArrayWritable();
// extra capacity in case we overrun, to avoid resizing
StringBuilder buf = new StringBuilder(STRING_BUFFER_SIZE);
while (recordReader.next(key, value)) {
printRecord(value, buf);
recordCount++;
if (verbose && (recordCount % RECORD_PRINT_INTERVAL) == 0) {
long now = System.currentTimeMillis();
System.err.println("Read " + recordCount / 1024 + "k records");
System.err.println("Read " + ((recordReader.getPos() / (1024L * 1024L))) + "MB");
System.err.printf("Input scan rate %.2f MB/s\n", (recordReader.getPos() * 1.0 / (now - startT)) / 1024.0);
}
if (buf.length() > STRING_BUFFER_FLUSH_SIZE) {
System.out.print(buf.toString());
buf.setLength(0);
}
}
// print out last part of buffer
System.out.print(buf.toString());
System.out.flush();
return 0;
}
use of org.apache.hadoop.hive.ql.io.RCFileRecordReader in project DataX by alibaba.
the class DFSUtil method rcFileStartRead.
public void rcFileStartRead(String sourceRcFilePath, Configuration readerSliceConfig, RecordSender recordSender, TaskPluginCollector taskPluginCollector) {
LOG.info(String.format("Start Read rcfile [%s].", sourceRcFilePath));
List<ColumnEntry> column = UnstructuredStorageReaderUtil.getListColumnEntry(readerSliceConfig, com.alibaba.datax.plugin.unstructuredstorage.reader.Key.COLUMN);
// warn: no default value '\N'
String nullFormat = readerSliceConfig.getString(com.alibaba.datax.plugin.unstructuredstorage.reader.Key.NULL_FORMAT);
Path rcFilePath = new Path(sourceRcFilePath);
FileSystem fs = null;
RCFileRecordReader recordReader = null;
try {
fs = FileSystem.get(rcFilePath.toUri(), hadoopConf);
long fileLen = fs.getFileStatus(rcFilePath).getLen();
FileSplit split = new FileSplit(rcFilePath, 0, fileLen, (String[]) null);
recordReader = new RCFileRecordReader(hadoopConf, split);
LongWritable key = new LongWritable();
BytesRefArrayWritable value = new BytesRefArrayWritable();
Text txt = new Text();
while (recordReader.next(key, value)) {
String[] sourceLine = new String[value.size()];
txt.clear();
for (int i = 0; i < value.size(); i++) {
BytesRefWritable v = value.get(i);
txt.set(v.getData(), v.getStart(), v.getLength());
sourceLine[i] = txt.toString();
}
UnstructuredStorageReaderUtil.transportOneRecord(recordSender, column, sourceLine, nullFormat, taskPluginCollector);
}
} catch (IOException e) {
String message = String.format("读取文件[%s]时出错", sourceRcFilePath);
LOG.error(message);
throw DataXException.asDataXException(HdfsReaderErrorCode.READ_RCFILE_ERROR, message, e);
} finally {
try {
if (recordReader != null) {
recordReader.close();
LOG.info("Finally, Close RCFileRecordReader.");
}
} catch (IOException e) {
LOG.warn(String.format("finally: 关闭RCFileRecordReader失败, %s", e.getMessage()));
}
}
}
Aggregations