use of org.apache.parquet.hadoop.metadata.CompressionCodecName in project parquet-mr by apache.
the class ParquetMetadataCommand method printColumnChunk.
private void printColumnChunk(Logger console, int width, ColumnChunkMetaData column, MessageType schema) {
String[] path = column.getPath().toArray();
PrimitiveType type = primitive(schema, path);
Preconditions.checkNotNull(type);
ColumnDescriptor desc = schema.getColumnDescription(path);
long size = column.getTotalSize();
long count = column.getValueCount();
float perValue = ((float) size) / count;
CompressionCodecName codec = column.getCodec();
Set<Encoding> encodings = column.getEncodings();
EncodingStats encodingStats = column.getEncodingStats();
String encodingSummary = encodingStats == null ? encodingsAsString(encodings, desc) : encodingStatsAsString(encodingStats);
Statistics stats = column.getStatistics();
String name = column.getPath().toDotString();
PrimitiveType.PrimitiveTypeName typeName = type.getPrimitiveTypeName();
if (typeName == PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY) {
console.info(String.format("%-" + width + "s FIXED[%d] %s %-7s %-9d %-8s %-7s %s", name, type.getTypeLength(), shortCodec(codec), encodingSummary, count, humanReadable(perValue), stats == null || !stats.isNumNullsSet() ? "" : String.valueOf(stats.getNumNulls()), minMaxAsString(stats, type.getOriginalType())));
} else {
console.info(String.format("%-" + width + "s %-9s %s %-7s %-9d %-10s %-7s %s", name, typeName, shortCodec(codec), encodingSummary, count, humanReadable(perValue), stats == null || !stats.isNumNullsSet() ? "" : String.valueOf(stats.getNumNulls()), minMaxAsString(stats, type.getOriginalType())));
}
}
use of org.apache.parquet.hadoop.metadata.CompressionCodecName in project parquet-mr by apache.
the class TestDirectCodecFactory method compressionCodecs.
@Test
public void compressionCodecs() throws Exception {
final int[] sizes = { 4 * 1024, 1 * 1024 * 1024 };
final boolean[] comp = { true, false };
Set<CompressionCodecName> codecsToSkip = new HashSet<>();
// not distributed because it is GPL
codecsToSkip.add(LZO);
// not distributed in the default version of Hadoop
codecsToSkip.add(LZ4);
// not distributed in the default version of Hadoop
codecsToSkip.add(ZSTD);
for (final int size : sizes) {
for (final boolean useOnHeapComp : comp) {
for (final Decompression decomp : Decompression.values()) {
for (final CompressionCodecName codec : CompressionCodecName.values()) {
if (codecsToSkip.contains(codec)) {
continue;
}
test(size, codec, useOnHeapComp, decomp);
}
}
}
}
}
use of org.apache.parquet.hadoop.metadata.CompressionCodecName in project parquet-mr by apache.
the class ParquetOutputFormat method getRecordWriter.
/**
* {@inheritDoc}
*/
@Override
public RecordWriter<Void, T> getRecordWriter(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
final Configuration conf = getConfiguration(taskAttemptContext);
CompressionCodecName codec = getCodec(taskAttemptContext);
String extension = codec.getExtension() + ".parquet";
Path file = getDefaultWorkFile(taskAttemptContext, extension);
return getRecordWriter(conf, file, codec);
}
use of org.apache.parquet.hadoop.metadata.CompressionCodecName in project parquet-mr by apache.
the class CodecConfig method getHadoopCompressionCodec.
private CompressionCodecName getHadoopCompressionCodec() {
CompressionCodecName codec;
try {
// find the right codec
Class<?> codecClass = getHadoopOutputCompressorClass(CompressionCodecName.UNCOMPRESSED.getHadoopCompressionCodecClass());
LOG.info("Compression set through hadoop codec: {}", codecClass.getName());
codec = CompressionCodecName.fromCompressionCodec(codecClass);
} catch (CompressionCodecNotSupportedException e) {
LOG.warn("codec defined in hadoop config is not supported by parquet [{}] and will use UNCOMPRESSED", e.getCodecClass().getName(), e);
codec = CompressionCodecName.UNCOMPRESSED;
} catch (IllegalArgumentException e) {
LOG.warn("codec class not found: {}", e.getMessage(), e);
codec = CompressionCodecName.UNCOMPRESSED;
}
return codec;
}
use of org.apache.parquet.hadoop.metadata.CompressionCodecName in project drill by axbaretto.
the class TestFileGenerator method generateParquetFile.
public static void generateParquetFile(String filename, ParquetTestProperties props) throws Exception {
int currentBooleanByte = 0;
WrapAroundCounter booleanBitCounter = new WrapAroundCounter(7);
Configuration configuration = new Configuration();
configuration.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
// "message m { required int32 integer; required int64 integer64; required boolean b; required float f; required double d;}"
FileSystem fs = FileSystem.get(configuration);
Path path = new Path(filename);
if (fs.exists(path)) {
fs.delete(path, false);
}
String messageSchema = "message m {";
for (FieldInfo fieldInfo : props.fields.values()) {
messageSchema += " required " + fieldInfo.parquetType + " " + fieldInfo.name + ";";
}
// remove the last semicolon, java really needs a join method for strings...
// TODO - nvm apparently it requires a semicolon after every field decl, might want to file a bug
// messageSchema = messageSchema.substring(schemaType, messageSchema.length() - 1);
messageSchema += "}";
MessageType schema = MessageTypeParser.parseMessageType(messageSchema);
CompressionCodecName codec = CompressionCodecName.UNCOMPRESSED;
ParquetFileWriter w = new ParquetFileWriter(configuration, schema, path);
w.start();
HashMap<String, Integer> columnValuesWritten = new HashMap<>();
int valsWritten;
for (int k = 0; k < props.numberRowGroups; k++) {
w.startBlock(props.recordsPerRowGroup);
currentBooleanByte = 0;
booleanBitCounter.reset();
for (FieldInfo fieldInfo : props.fields.values()) {
if (!columnValuesWritten.containsKey(fieldInfo.name)) {
columnValuesWritten.put(fieldInfo.name, 0);
valsWritten = 0;
} else {
valsWritten = columnValuesWritten.get(fieldInfo.name);
}
String[] path1 = { fieldInfo.name };
ColumnDescriptor c1 = schema.getColumnDescription(path1);
w.startColumn(c1, props.recordsPerRowGroup, codec);
final int valsPerPage = (int) Math.ceil(props.recordsPerRowGroup / (float) fieldInfo.numberOfPages);
// 1 MB
final int PAGE_SIZE = 1024 * 1024;
byte[] bytes;
RunLengthBitPackingHybridValuesWriter defLevels = new RunLengthBitPackingHybridValuesWriter(MAX_EXPECTED_BIT_WIDTH_FOR_DEFINITION_LEVELS, valsPerPage, PAGE_SIZE, new DirectByteBufferAllocator());
RunLengthBitPackingHybridValuesWriter repLevels = new RunLengthBitPackingHybridValuesWriter(MAX_EXPECTED_BIT_WIDTH_FOR_DEFINITION_LEVELS, valsPerPage, PAGE_SIZE, new DirectByteBufferAllocator());
// for variable length binary fields
int bytesNeededToEncodeLength = 4;
if (fieldInfo.bitLength > 0) {
bytes = new byte[(int) Math.ceil(valsPerPage * fieldInfo.bitLength / 8.0)];
} else {
// the twelve at the end is to account for storing a 4 byte length with each value
int totalValLength = ((byte[]) fieldInfo.values[0]).length + ((byte[]) fieldInfo.values[1]).length + ((byte[]) fieldInfo.values[2]).length + 3 * bytesNeededToEncodeLength;
// used for the case where there is a number of values in this row group that is not divisible by 3
int leftOverBytes = 0;
if (valsPerPage % 3 > 0) {
leftOverBytes += ((byte[]) fieldInfo.values[1]).length + bytesNeededToEncodeLength;
}
if (valsPerPage % 3 > 1) {
leftOverBytes += ((byte[]) fieldInfo.values[2]).length + bytesNeededToEncodeLength;
}
bytes = new byte[valsPerPage / 3 * totalValLength + leftOverBytes];
}
int bytesPerPage = (int) (valsPerPage * (fieldInfo.bitLength / 8.0));
int bytesWritten = 0;
for (int z = 0; z < fieldInfo.numberOfPages; z++, bytesWritten = 0) {
for (int i = 0; i < valsPerPage; i++) {
repLevels.writeInteger(0);
defLevels.writeInteger(1);
// System.out.print(i + ", " + (i % 25 == 0 ? "\n gen " + fieldInfo.name + ": " : ""));
if (fieldInfo.values[0] instanceof Boolean) {
bytes[currentBooleanByte] |= bitFields[booleanBitCounter.val] & ((boolean) fieldInfo.values[valsWritten % 3] ? allBitsTrue : allBitsFalse);
booleanBitCounter.increment();
if (booleanBitCounter.val == 0) {
currentBooleanByte++;
}
valsWritten++;
if (currentBooleanByte > bytesPerPage) {
break;
}
} else {
if (fieldInfo.values[valsWritten % 3] instanceof byte[]) {
System.arraycopy(ByteArrayUtil.toByta(((byte[]) fieldInfo.values[valsWritten % 3]).length), 0, bytes, bytesWritten, bytesNeededToEncodeLength);
System.arraycopy(fieldInfo.values[valsWritten % 3], 0, bytes, bytesWritten + bytesNeededToEncodeLength, ((byte[]) fieldInfo.values[valsWritten % 3]).length);
bytesWritten += ((byte[]) fieldInfo.values[valsWritten % 3]).length + bytesNeededToEncodeLength;
} else {
System.arraycopy(ByteArrayUtil.toByta(fieldInfo.values[valsWritten % 3]), 0, bytes, i * (fieldInfo.bitLength / 8), fieldInfo.bitLength / 8);
}
valsWritten++;
}
}
byte[] fullPage = new byte[2 * 4 * valsPerPage + bytes.length];
byte[] repLevelBytes = repLevels.getBytes().toByteArray();
byte[] defLevelBytes = defLevels.getBytes().toByteArray();
System.arraycopy(bytes, 0, fullPage, 0, bytes.length);
System.arraycopy(repLevelBytes, 0, fullPage, bytes.length, repLevelBytes.length);
System.arraycopy(defLevelBytes, 0, fullPage, bytes.length + repLevelBytes.length, defLevelBytes.length);
w.writeDataPage((props.recordsPerRowGroup / fieldInfo.numberOfPages), fullPage.length, BytesInput.from(fullPage), RLE, RLE, PLAIN);
currentBooleanByte = 0;
}
w.endColumn();
columnValuesWritten.remove(fieldInfo.name);
columnValuesWritten.put(fieldInfo.name, valsWritten);
}
w.endBlock();
}
w.end(new HashMap<String, String>());
logger.debug("Finished generating parquet file {}", path.getName());
}
Aggregations