Search in sources :

Example 6 with BloomFilter

use of org.apache.hudi.common.bloom.BloomFilter in project hudi by apache.

the class HiveTestUtil method generateParquetDataWithSchema.

private static void generateParquetDataWithSchema(Path filePath, Schema schema) throws IOException {
    org.apache.parquet.schema.MessageType parquetSchema = new AvroSchemaConverter().convert(schema);
    BloomFilter filter = BloomFilterFactory.createBloomFilter(1000, 0.0001, -1, BloomFilterTypeCode.SIMPLE.name());
    HoodieAvroWriteSupport writeSupport = new HoodieAvroWriteSupport(parquetSchema, schema, Option.of(filter));
    ParquetWriter writer = new ParquetWriter(filePath, writeSupport, CompressionCodecName.GZIP, 120 * 1024 * 1024, ParquetWriter.DEFAULT_PAGE_SIZE, ParquetWriter.DEFAULT_PAGE_SIZE, ParquetWriter.DEFAULT_IS_DICTIONARY_ENABLED, ParquetWriter.DEFAULT_IS_VALIDATING_ENABLED, ParquetWriter.DEFAULT_WRITER_VERSION, fileSystem.getConf());
    List<IndexedRecord> testRecords = SchemaTestUtil.generateTestRecordsForSchema(schema);
    testRecords.forEach(s -> {
        try {
            writer.write(s);
        } catch (IOException e) {
            fail("IOException while writing test records as parquet" + e.toString());
        }
    });
    writer.close();
}
Also used : AvroSchemaConverter(org.apache.parquet.avro.AvroSchemaConverter) IndexedRecord(org.apache.avro.generic.IndexedRecord) ParquetWriter(org.apache.parquet.hadoop.ParquetWriter) HoodieAvroWriteSupport(org.apache.hudi.avro.HoodieAvroWriteSupport) IOException(java.io.IOException) BloomFilter(org.apache.hudi.common.bloom.BloomFilter)

Example 7 with BloomFilter

use of org.apache.hudi.common.bloom.BloomFilter in project hudi by apache.

the class TestCluster method generateParquetData.

@SuppressWarnings({ "unchecked", "deprecation" })
private void generateParquetData(Path filePath, boolean isParquetSchemaSimple) throws IOException, URISyntaxException {
    Schema schema = (isParquetSchemaSimple ? SchemaTestUtil.getSimpleSchema() : SchemaTestUtil.getEvolvedSchema());
    org.apache.parquet.schema.MessageType parquetSchema = new AvroSchemaConverter().convert(schema);
    BloomFilter filter = BloomFilterFactory.createBloomFilter(1000, 0.0001, -1, BloomFilterTypeCode.SIMPLE.name());
    HoodieAvroWriteSupport writeSupport = new HoodieAvroWriteSupport(parquetSchema, schema, Option.of(filter));
    ParquetWriter writer = new ParquetWriter(filePath, writeSupport, CompressionCodecName.GZIP, 120 * 1024 * 1024, ParquetWriter.DEFAULT_PAGE_SIZE, ParquetWriter.DEFAULT_PAGE_SIZE, ParquetWriter.DEFAULT_IS_DICTIONARY_ENABLED, ParquetWriter.DEFAULT_IS_VALIDATING_ENABLED, ParquetWriter.DEFAULT_WRITER_VERSION, dfsCluster.getFileSystem().getConf());
    List<IndexedRecord> testRecords = (isParquetSchemaSimple ? SchemaTestUtil.generateTestRecords(0, 100) : SchemaTestUtil.generateEvolvedTestRecords(100, 100));
    testRecords.forEach(s -> {
        try {
            writer.write(s);
        } catch (IOException e) {
            fail("IOException while writing test records as parquet" + e.toString());
        }
    });
    writer.close();
}
Also used : AvroSchemaConverter(org.apache.parquet.avro.AvroSchemaConverter) IndexedRecord(org.apache.avro.generic.IndexedRecord) ParquetWriter(org.apache.parquet.hadoop.ParquetWriter) Schema(org.apache.avro.Schema) HoodieAvroWriteSupport(org.apache.hudi.avro.HoodieAvroWriteSupport) IOException(java.io.IOException) BloomFilter(org.apache.hudi.common.bloom.BloomFilter)

Example 8 with BloomFilter

use of org.apache.hudi.common.bloom.BloomFilter in project hudi by apache.

the class HoodieFileWriterFactory method newHFileFileWriter.

private static <T extends HoodieRecordPayload, R extends IndexedRecord> HoodieFileWriter<R> newHFileFileWriter(String instantTime, Path path, HoodieWriteConfig config, Schema schema, HoodieTable hoodieTable, TaskContextSupplier taskContextSupplier) throws IOException {
    BloomFilter filter = createBloomFilter(config);
    HoodieHFileConfig hfileConfig = new HoodieHFileConfig(hoodieTable.getHadoopConf(), config.getHFileCompressionAlgorithm(), config.getHFileBlockSize(), config.getHFileMaxFileSize(), HoodieHFileReader.KEY_FIELD_NAME, PREFETCH_ON_OPEN, CACHE_DATA_IN_L1, DROP_BEHIND_CACHE_COMPACTION, filter, HFILE_COMPARATOR);
    return new HoodieHFileWriter<>(instantTime, path, hfileConfig, schema, taskContextSupplier, config.populateMetaFields());
}
Also used : BloomFilter(org.apache.hudi.common.bloom.BloomFilter)

Example 9 with BloomFilter

use of org.apache.hudi.common.bloom.BloomFilter in project hudi by apache.

the class HoodieFileWriterFactory method newOrcFileWriter.

private static <T extends HoodieRecordPayload, R extends IndexedRecord> HoodieFileWriter<R> newOrcFileWriter(String instantTime, Path path, HoodieWriteConfig config, Schema schema, HoodieTable hoodieTable, TaskContextSupplier taskContextSupplier) throws IOException {
    BloomFilter filter = createBloomFilter(config);
    HoodieOrcConfig orcConfig = new HoodieOrcConfig(hoodieTable.getHadoopConf(), config.getOrcCompressionCodec(), config.getOrcStripeSize(), config.getOrcBlockSize(), config.getOrcMaxFileSize(), filter);
    return new HoodieOrcWriter<>(instantTime, path, orcConfig, schema, taskContextSupplier);
}
Also used : BloomFilter(org.apache.hudi.common.bloom.BloomFilter)

Example 10 with BloomFilter

use of org.apache.hudi.common.bloom.BloomFilter in project hudi by apache.

the class HoodieOrcWriter method close.

@Override
public void close() throws IOException {
    if (batch.size != 0) {
        writer.addRowBatch(batch);
        batch.reset();
    }
    if (orcConfig.useBloomFilter()) {
        final BloomFilter bloomFilter = orcConfig.getBloomFilter();
        writer.addUserMetadata(HOODIE_AVRO_BLOOM_FILTER_METADATA_KEY, ByteBuffer.wrap(bloomFilter.serializeToString().getBytes()));
        if (minRecordKey != null && maxRecordKey != null) {
            writer.addUserMetadata(HOODIE_MIN_RECORD_KEY_FOOTER, ByteBuffer.wrap(minRecordKey.getBytes()));
            writer.addUserMetadata(HOODIE_MAX_RECORD_KEY_FOOTER, ByteBuffer.wrap(maxRecordKey.getBytes()));
        }
        if (bloomFilter.getBloomFilterTypeCode().name().contains(HoodieDynamicBoundedBloomFilter.TYPE_CODE_PREFIX)) {
            writer.addUserMetadata(HOODIE_BLOOM_FILTER_TYPE_CODE, ByteBuffer.wrap(bloomFilter.getBloomFilterTypeCode().name().getBytes()));
        }
    }
    writer.addUserMetadata(HoodieOrcConfig.AVRO_SCHEMA_METADATA_KEY, ByteBuffer.wrap(avroSchema.toString().getBytes()));
    writer.close();
}
Also used : HoodieDynamicBoundedBloomFilter(org.apache.hudi.common.bloom.HoodieDynamicBoundedBloomFilter) BloomFilter(org.apache.hudi.common.bloom.BloomFilter)

Aggregations

BloomFilter (org.apache.hudi.common.bloom.BloomFilter)30 HoodieRecord (org.apache.hudi.common.model.HoodieRecord)13 Path (org.apache.hadoop.fs.Path)11 ArrayList (java.util.ArrayList)9 GenericRecord (org.apache.avro.generic.GenericRecord)9 HoodieWriteConfig (org.apache.hudi.config.HoodieWriteConfig)9 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)8 IOException (java.io.IOException)7 Schema (org.apache.avro.Schema)7 HoodieAvroRecord (org.apache.hudi.common.model.HoodieAvroRecord)7 HoodieKey (org.apache.hudi.common.model.HoodieKey)7 RawTripTestPayload (org.apache.hudi.common.testutils.RawTripTestPayload)7 AvroSchemaConverter (org.apache.parquet.avro.AvroSchemaConverter)6 IndexedRecord (org.apache.avro.generic.IndexedRecord)5 HoodieAvroWriteSupport (org.apache.hudi.avro.HoodieAvroWriteSupport)5 ParquetWriter (org.apache.parquet.hadoop.ParquetWriter)5 MethodSource (org.junit.jupiter.params.provider.MethodSource)5 HashMap (java.util.HashMap)4 FileStatus (org.apache.hadoop.fs.FileStatus)4 Test (org.junit.jupiter.api.Test)4