Search in sources :

Example 1 with HoodieSparkTable

use of org.apache.hudi.table.HoodieSparkTable in project hudi by apache.

the class TestUpsertPartitioner method testUpsertPartitionerWithSmallFileHandlingWithInflightCompactionWithCanIndexLogFiles.

@Test
public void testUpsertPartitionerWithSmallFileHandlingWithInflightCompactionWithCanIndexLogFiles() throws Exception {
    // Note this is used because it is same partition path used in CompactionTestUtils.createCompactionPlan()
    final String testPartitionPath = DEFAULT_PARTITION_PATHS[0];
    HoodieWriteConfig config = makeHoodieClientConfigBuilder().withCompactionConfig(HoodieCompactionConfig.newBuilder().compactionSmallFileSize(1024).build()).withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(HoodieIndex.IndexType.HBASE).withHBaseIndexConfig(HoodieHBaseIndexConfig.newBuilder().build()).build()).build();
    // This will generate initial commits and create a compaction plan which includes file groups created as part of this
    HoodieCompactionPlan plan = CompactionTestUtils.createCompactionPlan(metaClient, "001", "002", 1, true, false);
    FileCreateUtils.createRequestedCompactionCommit(basePath, "002", plan);
    // Simulate one more commit so that inflight compaction is considered when building file groups in file system view
    FileCreateUtils.createBaseFile(basePath, testPartitionPath, "003", "2", 1);
    FileCreateUtils.createCommit(basePath, "003");
    // Partitioner will attempt to assign inserts to file groups including base file created by inflight compaction
    metaClient = HoodieTableMetaClient.reload(metaClient);
    HoodieTestDataGenerator dataGenerator = new HoodieTestDataGenerator(new String[] { testPartitionPath });
    List<HoodieRecord> insertRecords = dataGenerator.generateInserts("004", 100);
    WorkloadProfile profile = new WorkloadProfile(buildProfile(jsc.parallelize(insertRecords)));
    HoodieSparkTable table = HoodieSparkTable.create(config, context, metaClient);
    SparkUpsertDeltaCommitPartitioner partitioner = new SparkUpsertDeltaCommitPartitioner(profile, context, table, config);
    assertEquals(1, partitioner.numPartitions(), "Should have 1 partitions");
    assertEquals(BucketType.UPDATE, partitioner.getBucketInfo(0).bucketType, "Bucket 0 is UPDATE");
    assertEquals("2", partitioner.getBucketInfo(0).fileIdPrefix, "Should be assigned to only file id not pending compaction which is 2");
}
Also used : WorkloadProfile(org.apache.hudi.table.WorkloadProfile) HoodieCompactionPlan(org.apache.hudi.avro.model.HoodieCompactionPlan) HoodieRecord(org.apache.hudi.common.model.HoodieRecord) HoodieWriteConfig(org.apache.hudi.config.HoodieWriteConfig) SparkUpsertDeltaCommitPartitioner(org.apache.hudi.table.action.deltacommit.SparkUpsertDeltaCommitPartitioner) HoodieTestDataGenerator(org.apache.hudi.common.testutils.HoodieTestDataGenerator) HoodieSparkTable(org.apache.hudi.table.HoodieSparkTable) Test(org.junit.jupiter.api.Test)

Example 2 with HoodieSparkTable

use of org.apache.hudi.table.HoodieSparkTable in project hudi by apache.

the class TestUpsertPartitioner method testUpsertPartitionerWithSmallFileHandlingAndClusteringPlan.

@Test
public void testUpsertPartitionerWithSmallFileHandlingAndClusteringPlan() throws Exception {
    final String testPartitionPath = DEFAULT_PARTITION_PATHS[0];
    // create HoodieWriteConfig and set inline and async clustering disable here.
    HoodieWriteConfig config = makeHoodieClientConfigBuilder().withCompactionConfig(HoodieCompactionConfig.newBuilder().build()).withClusteringConfig(HoodieClusteringConfig.newBuilder().withInlineClustering(false).withAsyncClustering(false).build()).withStorageConfig(HoodieStorageConfig.newBuilder().hfileMaxFileSize(1000 * 1024).parquetMaxFileSize(1000 * 1024).build()).build();
    // create file slice with instantTime 001 and build clustering plan including this created 001 file slice.
    HoodieClusteringPlan clusteringPlan = ClusteringTestUtils.createClusteringPlan(metaClient, "001", "1");
    // create requested replace commit
    HoodieRequestedReplaceMetadata requestedReplaceMetadata = HoodieRequestedReplaceMetadata.newBuilder().setClusteringPlan(clusteringPlan).setOperationType(WriteOperationType.CLUSTER.name()).build();
    FileCreateUtils.createRequestedReplaceCommit(basePath, "002", Option.of(requestedReplaceMetadata));
    // create file slice 003
    FileCreateUtils.createBaseFile(basePath, testPartitionPath, "003", "3", 1);
    FileCreateUtils.createCommit(basePath, "003");
    metaClient = HoodieTableMetaClient.reload(metaClient);
    // generate new data to be ingested
    HoodieTestDataGenerator dataGenerator = new HoodieTestDataGenerator(new String[] { testPartitionPath });
    List<HoodieRecord> insertRecords = dataGenerator.generateInserts("004", 100);
    WorkloadProfile profile = new WorkloadProfile(buildProfile(jsc.parallelize(insertRecords)));
    HoodieSparkTable table = HoodieSparkTable.create(config, context, metaClient);
    // create UpsertPartitioner
    UpsertPartitioner partitioner = new UpsertPartitioner(profile, context, table, config);
    // for now we have file slice1 and file slice3 and file slice1 is contained in pending clustering plan
    // So that only file slice3 can be used for ingestion.
    assertEquals(1, partitioner.smallFiles.size(), "Should have 1 small file to be ingested.");
}
Also used : WorkloadProfile(org.apache.hudi.table.WorkloadProfile) HoodieRecord(org.apache.hudi.common.model.HoodieRecord) HoodieWriteConfig(org.apache.hudi.config.HoodieWriteConfig) HoodieRequestedReplaceMetadata(org.apache.hudi.avro.model.HoodieRequestedReplaceMetadata) HoodieTestDataGenerator(org.apache.hudi.common.testutils.HoodieTestDataGenerator) HoodieClusteringPlan(org.apache.hudi.avro.model.HoodieClusteringPlan) HoodieSparkTable(org.apache.hudi.table.HoodieSparkTable) Test(org.junit.jupiter.api.Test)

Example 3 with HoodieSparkTable

use of org.apache.hudi.table.HoodieSparkTable in project hudi by apache.

the class TestArchivedCommitsCommand method init.

@BeforeEach
public void init() throws Exception {
    HoodieCLI.conf = hadoopConf();
    // Create table and connect
    String tableName = tableName();
    tablePath = tablePath(tableName);
    new TableCommand().createTable(tablePath, tableName, "COPY_ON_WRITE", "", 1, "org.apache.hudi.common.model.HoodieAvroPayload");
    HoodieTableMetaClient metaClient = HoodieCLI.getTableMetaClient();
    // Generate archive
    HoodieWriteConfig cfg = HoodieWriteConfig.newBuilder().withPath(tablePath).withSchema(HoodieTestCommitMetadataGenerator.TRIP_EXAMPLE_SCHEMA).withParallelism(2, 2).withCompactionConfig(HoodieCompactionConfig.newBuilder().retainCommits(1).archiveCommitsWith(2, 3).build()).withFileSystemViewConfig(FileSystemViewStorageConfig.newBuilder().withRemoteServerPort(timelineServicePort).build()).forTable("test-trip-table").build();
    // Create six commits
    for (int i = 100; i < 106; i++) {
        String timestamp = String.valueOf(i);
        // Requested Compaction
        HoodieTestCommitMetadataGenerator.createCompactionAuxiliaryMetadata(tablePath, new HoodieInstant(HoodieInstant.State.REQUESTED, HoodieTimeline.COMPACTION_ACTION, timestamp), hadoopConf());
        // Inflight Compaction
        HoodieTestCommitMetadataGenerator.createCompactionAuxiliaryMetadata(tablePath, new HoodieInstant(HoodieInstant.State.INFLIGHT, HoodieTimeline.COMPACTION_ACTION, timestamp), hadoopConf());
        HoodieTestCommitMetadataGenerator.createCommitFileWithMetadata(tablePath, timestamp, hadoopConf());
    }
    // Simulate a compaction commit in metadata table timeline
    // so the archival in data table can happen
    HoodieTestUtils.createCompactionCommitInMetadataTable(hadoopConf(), metaClient.getFs(), tablePath, "105");
    metaClient = HoodieTableMetaClient.reload(metaClient);
    // reload the timeline and get all the commits before archive
    metaClient.getActiveTimeline().reload().getAllCommitsTimeline().filterCompletedInstants();
    // archive
    HoodieSparkTable table = HoodieSparkTable.create(cfg, context(), metaClient);
    HoodieTimelineArchiver archiver = new HoodieTimelineArchiver(cfg, table);
    archiver.archiveIfRequired(context());
}
Also used : HoodieTableMetaClient(org.apache.hudi.common.table.HoodieTableMetaClient) HoodieInstant(org.apache.hudi.common.table.timeline.HoodieInstant) HoodieTimelineArchiver(org.apache.hudi.client.HoodieTimelineArchiver) HoodieWriteConfig(org.apache.hudi.config.HoodieWriteConfig) HoodieSparkTable(org.apache.hudi.table.HoodieSparkTable) BeforeEach(org.junit.jupiter.api.BeforeEach)

Example 4 with HoodieSparkTable

use of org.apache.hudi.table.HoodieSparkTable in project hudi by apache.

the class TestHoodieClientOnCopyOnWriteStorage method testCommitWritesRelativePaths.

/**
 * Test to ensure commit metadata points to valid files.
 */
@ParameterizedTest
@MethodSource("populateMetaFieldsParams")
public void testCommitWritesRelativePaths(boolean populateMetaFields) throws Exception {
    HoodieWriteConfig.Builder cfgBuilder = getConfigBuilder().withAutoCommit(false);
    addConfigsForPopulateMetaFields(cfgBuilder, populateMetaFields);
    try (SparkRDDWriteClient client = getHoodieWriteClient(cfgBuilder.build())) {
        HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(hadoopConf).setBasePath(basePath).build();
        HoodieSparkTable table = HoodieSparkTable.create(cfgBuilder.build(), context, metaClient);
        String instantTime = "000";
        client.startCommitWithTime(instantTime);
        List<HoodieRecord> records = dataGen.generateInserts(instantTime, 200);
        JavaRDD<HoodieRecord> writeRecords = jsc.parallelize(records, 1);
        JavaRDD<WriteStatus> result = client.bulkInsert(writeRecords, instantTime);
        assertTrue(client.commit(instantTime, result), "Commit should succeed");
        assertTrue(testTable.commitExists(instantTime), "After explicit commit, commit file should be created");
        // Get base file paths from commit metadata
        String actionType = metaClient.getCommitActionType();
        HoodieInstant commitInstant = new HoodieInstant(false, actionType, instantTime);
        HoodieTimeline commitTimeline = metaClient.getCommitTimeline().filterCompletedInstants();
        HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(commitTimeline.getInstantDetails(commitInstant).get(), HoodieCommitMetadata.class);
        String basePath = table.getMetaClient().getBasePath();
        Collection<String> commitPathNames = commitMetadata.getFileIdAndFullPaths(basePath).values();
        // Read from commit file
        try (FSDataInputStream inputStream = fs.open(testTable.getCommitFilePath(instantTime))) {
            String everything = FileIOUtils.readAsUTFString(inputStream);
            HoodieCommitMetadata metadata = HoodieCommitMetadata.fromJsonString(everything, HoodieCommitMetadata.class);
            HashMap<String, String> paths = metadata.getFileIdAndFullPaths(basePath);
            // Compare values in both to make sure they are equal.
            for (String pathName : paths.values()) {
                assertTrue(commitPathNames.contains(pathName));
            }
        }
    }
}
Also used : HoodieInstant(org.apache.hudi.common.table.timeline.HoodieInstant) SparkRDDWriteClient(org.apache.hudi.client.SparkRDDWriteClient) HoodieRecord(org.apache.hudi.common.model.HoodieRecord) HoodieTimeline(org.apache.hudi.common.table.timeline.HoodieTimeline) HoodieWriteConfig(org.apache.hudi.config.HoodieWriteConfig) HoodieTableMetaClient(org.apache.hudi.common.table.HoodieTableMetaClient) HoodieCommitMetadata(org.apache.hudi.common.model.HoodieCommitMetadata) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) WriteStatus(org.apache.hudi.client.WriteStatus) HoodieSparkTable(org.apache.hudi.table.HoodieSparkTable) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 5 with HoodieSparkTable

use of org.apache.hudi.table.HoodieSparkTable in project hudi by apache.

the class TestUpdateSchemaEvolution method assertSchemaEvolutionOnUpdateResult.

private void assertSchemaEvolutionOnUpdateResult(WriteStatus insertResult, HoodieSparkTable updateTable, List<HoodieRecord> updateRecords, String assertMsg, boolean isAssertThrow, Class expectedExceptionType) {
    jsc.parallelize(Arrays.asList(1)).map(x -> {
        Executable executable = () -> {
            HoodieMergeHandle mergeHandle = new HoodieMergeHandle(updateTable.getConfig(), "101", updateTable, updateRecords.iterator(), updateRecords.get(0).getPartitionPath(), insertResult.getFileId(), supplier, Option.empty());
            List<GenericRecord> oldRecords = BaseFileUtils.getInstance(updateTable.getBaseFileFormat()).readAvroRecords(updateTable.getHadoopConf(), new Path(updateTable.getConfig().getBasePath() + "/" + insertResult.getStat().getPath()), mergeHandle.getWriterSchemaWithMetaFields());
            for (GenericRecord rec : oldRecords) {
                mergeHandle.write(rec);
            }
            mergeHandle.close();
        };
        if (isAssertThrow) {
            assertThrows(expectedExceptionType, executable, assertMsg);
        } else {
            assertDoesNotThrow(executable, assertMsg);
        }
        return 1;
    }).collect();
}
Also used : Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) BaseFileUtils(org.apache.hudi.common.util.BaseFileUtils) ParquetDecodingException(org.apache.parquet.io.ParquetDecodingException) HoodieUpsertException(org.apache.hudi.exception.HoodieUpsertException) Option(org.apache.hudi.common.util.Option) Function(java.util.function.Function) HoodieClientTestHarness(org.apache.hudi.testutils.HoodieClientTestHarness) ArrayList(java.util.ArrayList) HoodieSparkTable(org.apache.hudi.table.HoodieSparkTable) HoodieMergeHandle(org.apache.hudi.io.HoodieMergeHandle) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) HoodieTimeline(org.apache.hudi.common.table.timeline.HoodieTimeline) SchemaTestUtil.getSchemaFromResource(org.apache.hudi.common.testutils.SchemaTestUtil.getSchemaFromResource) HoodieRecord(org.apache.hudi.common.model.HoodieRecord) GenericRecord(org.apache.avro.generic.GenericRecord) Schema(org.apache.avro.Schema) HoodieWriteConfig(org.apache.hudi.config.HoodieWriteConfig) RawTripTestPayload(org.apache.hudi.common.testutils.RawTripTestPayload) HoodieCreateHandle(org.apache.hudi.io.HoodieCreateHandle) InvalidRecordException(org.apache.parquet.io.InvalidRecordException) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) HoodieAvroRecord(org.apache.hudi.common.model.HoodieAvroRecord) Test(org.junit.jupiter.api.Test) AfterEach(org.junit.jupiter.api.AfterEach) List(java.util.List) HoodieRecordLocation(org.apache.hudi.common.model.HoodieRecordLocation) FileSystemViewStorageConfig(org.apache.hudi.common.table.view.FileSystemViewStorageConfig) Executable(org.junit.jupiter.api.function.Executable) HoodieKey(org.apache.hudi.common.model.HoodieKey) HoodieTestUtils(org.apache.hudi.common.testutils.HoodieTestUtils) Assertions.assertDoesNotThrow(org.junit.jupiter.api.Assertions.assertDoesNotThrow) FSUtils(org.apache.hudi.common.fs.FSUtils) Path(org.apache.hadoop.fs.Path) HoodieMergeHandle(org.apache.hudi.io.HoodieMergeHandle) Executable(org.junit.jupiter.api.function.Executable) GenericRecord(org.apache.avro.generic.GenericRecord)

Aggregations

HoodieSparkTable (org.apache.hudi.table.HoodieSparkTable)24 HoodieWriteConfig (org.apache.hudi.config.HoodieWriteConfig)22 HoodieRecord (org.apache.hudi.common.model.HoodieRecord)17 Test (org.junit.jupiter.api.Test)14 Map (java.util.Map)9 Arrays (java.util.Arrays)7 HashMap (java.util.HashMap)7 HoodieTableMetaClient (org.apache.hudi.common.table.HoodieTableMetaClient)7 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)7 IOException (java.io.IOException)6 List (java.util.List)6 Schema (org.apache.avro.Schema)6 HoodieAvroRecord (org.apache.hudi.common.model.HoodieAvroRecord)6 Option (org.apache.hudi.common.util.Option)6 ArrayList (java.util.ArrayList)5 Collectors (java.util.stream.Collectors)5 Path (org.apache.hadoop.fs.Path)5 HoodieKey (org.apache.hudi.common.model.HoodieKey)5 HoodieTestDataGenerator (org.apache.hudi.common.testutils.HoodieTestDataGenerator)5 RawTripTestPayload (org.apache.hudi.common.testutils.RawTripTestPayload)5