Search in sources :

Example 1 with HoodieTableType

use of org.apache.hudi.common.model.HoodieTableType in project hudi by apache.

the class TestSparkHoodieHBaseIndex method testSimpleTagLocationAndUpdate.

@ParameterizedTest
@EnumSource(HoodieTableType.class)
public void testSimpleTagLocationAndUpdate(HoodieTableType tableType) throws Exception {
    metaClient = HoodieTestUtils.init(hadoopConf, basePath, tableType);
    final String newCommitTime = "001";
    final int numRecords = 10;
    List<HoodieRecord> records = dataGen.generateInserts(newCommitTime, numRecords);
    JavaRDD<HoodieRecord> writeRecords = jsc().parallelize(records, 1);
    // Load to memory
    HoodieWriteConfig config = getConfig();
    SparkHoodieHBaseIndex index = new SparkHoodieHBaseIndex(config);
    try (SparkRDDWriteClient writeClient = getHoodieWriteClient(config)) {
        metaClient = HoodieTableMetaClient.reload(metaClient);
        HoodieTable hoodieTable = HoodieSparkTable.create(config, context, metaClient);
        // Test tagLocation without any entries in index
        JavaRDD<HoodieRecord> records1 = tagLocation(index, writeRecords, hoodieTable);
        assertEquals(0, records1.filter(record -> record.isCurrentLocationKnown()).count());
        // Insert 200 records
        writeClient.startCommitWithTime(newCommitTime);
        JavaRDD<WriteStatus> writeStatues = writeClient.upsert(writeRecords, newCommitTime);
        assertNoWriteErrors(writeStatues.collect());
        // Now tagLocation for these records, hbaseIndex should not tag them since commit never occurred
        JavaRDD<HoodieRecord> records2 = tagLocation(index, writeRecords, hoodieTable);
        assertEquals(0, records2.filter(record -> record.isCurrentLocationKnown()).count());
        // Now commit this & update location of records inserted and validate no errors
        writeClient.commit(newCommitTime, writeStatues);
        // Now tagLocation for these records, hbaseIndex should tag them correctly
        metaClient = HoodieTableMetaClient.reload(metaClient);
        hoodieTable = HoodieSparkTable.create(config, context, metaClient);
        List<HoodieRecord> records3 = tagLocation(index, writeRecords, hoodieTable).collect();
        assertEquals(numRecords, records3.stream().filter(record -> record.isCurrentLocationKnown()).count());
        assertEquals(numRecords, records3.stream().map(record -> record.getKey().getRecordKey()).distinct().count());
        assertEquals(numRecords, records3.stream().filter(record -> (record.getCurrentLocation() != null && record.getCurrentLocation().getInstantTime().equals(newCommitTime))).distinct().count());
    }
}
Also used : HoodieTable(org.apache.hudi.table.HoodieTable) BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) Result(org.apache.hadoop.hbase.client.Result) HoodieTestDataGenerator(org.apache.hudi.common.testutils.HoodieTestDataGenerator) AfterAll(org.junit.jupiter.api.AfterAll) HoodieTableType(org.apache.hudi.common.model.HoodieTableType) BeforeAll(org.junit.jupiter.api.BeforeAll) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) HoodieStorageConfig(org.apache.hudi.config.HoodieStorageConfig) Path(org.apache.hadoop.fs.Path) HoodieSparkEngineContext(org.apache.hudi.client.common.HoodieSparkEngineContext) Tag(org.junit.jupiter.api.Tag) Get(org.apache.hadoop.hbase.client.Get) UUID(java.util.UUID) Tuple2(scala.Tuple2) Collectors(java.util.stream.Collectors) HoodieIndex(org.apache.hudi.index.HoodieIndex) Test(org.junit.jupiter.api.Test) List(java.util.List) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) HoodieWriteStat(org.apache.hudi.common.model.HoodieWriteStat) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) Mockito.atMost(org.mockito.Mockito.atMost) Mockito.mock(org.mockito.Mockito.mock) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) Option(org.apache.hudi.common.util.Option) EnumSource(org.junit.jupiter.params.provider.EnumSource) HashMap(java.util.HashMap) HoodieSparkTable(org.apache.hudi.table.HoodieSparkTable) HTable(org.apache.hadoop.hbase.client.HTable) HoodieTableMetaClient(org.apache.hudi.common.table.HoodieTableMetaClient) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) EmptyHoodieRecordPayload(org.apache.hudi.common.model.EmptyHoodieRecordPayload) LinkedList(java.util.LinkedList) JavaRDD(org.apache.spark.api.java.JavaRDD) Bytes(org.apache.hadoop.hbase.util.Bytes) HoodieRecord(org.apache.hudi.common.model.HoodieRecord) TableName(org.apache.hadoop.hbase.TableName) TestMethodOrder(org.junit.jupiter.api.TestMethodOrder) Assertions.assertNoWriteErrors(org.apache.hudi.testutils.Assertions.assertNoWriteErrors) HoodieWriteConfig(org.apache.hudi.config.HoodieWriteConfig) Put(org.apache.hadoop.hbase.client.Put) HoodieHBaseIndexConfig(org.apache.hudi.config.HoodieHBaseIndexConfig) IOException(java.io.IOException) Mockito.times(org.mockito.Mockito.times) Mockito.when(org.mockito.Mockito.when) HoodieAvroRecord(org.apache.hudi.common.model.HoodieAvroRecord) MethodOrderer(org.junit.jupiter.api.MethodOrderer) Mockito.verify(org.mockito.Mockito.verify) HoodieCompactionConfig(org.apache.hudi.config.HoodieCompactionConfig) WriteStatus(org.apache.hudi.client.WriteStatus) HoodieRecordPayload(org.apache.hudi.common.model.HoodieRecordPayload) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) SparkRDDWriteClient(org.apache.hudi.client.SparkRDDWriteClient) SparkClientFunctionalTestHarness(org.apache.hudi.testutils.SparkClientFunctionalTestHarness) Connection(org.apache.hadoop.hbase.client.Connection) HoodieIndexConfig(org.apache.hudi.config.HoodieIndexConfig) HoodieKey(org.apache.hudi.common.model.HoodieKey) HoodieTestUtils(org.apache.hudi.common.testutils.HoodieTestUtils) SparkRDDWriteClient(org.apache.hudi.client.SparkRDDWriteClient) HoodieRecord(org.apache.hudi.common.model.HoodieRecord) HoodieTable(org.apache.hudi.table.HoodieTable) HoodieWriteConfig(org.apache.hudi.config.HoodieWriteConfig) WriteStatus(org.apache.hudi.client.WriteStatus) EnumSource(org.junit.jupiter.params.provider.EnumSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 2 with HoodieTableType

use of org.apache.hudi.common.model.HoodieTableType in project hudi by apache.

the class HoodieTableSource method getStreamInputFormat.

private InputFormat<RowData, ?> getStreamInputFormat() {
    // if table does not exist, use schema from the DDL
    Schema tableAvroSchema = this.metaClient == null ? inferSchemaFromDdl() : getTableAvroSchema();
    final DataType rowDataType = AvroSchemaConverter.convertToDataType(tableAvroSchema);
    final RowType rowType = (RowType) rowDataType.getLogicalType();
    final RowType requiredRowType = (RowType) getProducedDataType().notNull().getLogicalType();
    final String queryType = this.conf.getString(FlinkOptions.QUERY_TYPE);
    if (FlinkOptions.QUERY_TYPE_SNAPSHOT.equals(queryType)) {
        final HoodieTableType tableType = HoodieTableType.valueOf(this.conf.getString(FlinkOptions.TABLE_TYPE));
        boolean emitDelete = tableType == HoodieTableType.MERGE_ON_READ;
        return mergeOnReadInputFormat(rowType, requiredRowType, tableAvroSchema, rowDataType, Collections.emptyList(), emitDelete);
    }
    String errMsg = String.format("Invalid query type : '%s', options ['%s'] are supported now", queryType, FlinkOptions.QUERY_TYPE_SNAPSHOT);
    throw new HoodieException(errMsg);
}
Also used : ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Schema(org.apache.avro.Schema) HoodieTableType(org.apache.hudi.common.model.HoodieTableType) DataType(org.apache.flink.table.types.DataType) RowType(org.apache.flink.table.types.logical.RowType) HoodieException(org.apache.hudi.exception.HoodieException)

Example 3 with HoodieTableType

use of org.apache.hudi.common.model.HoodieTableType in project hudi by apache.

the class TestHoodieBackedMetadata method testMetadataTableWithPendingCompaction.

/**
 * Tests that virtual key configs are honored in base files after compaction in metadata table.
 */
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testMetadataTableWithPendingCompaction(boolean simulateFailedCompaction) throws Exception {
    HoodieTableType tableType = COPY_ON_WRITE;
    init(tableType, false);
    writeConfig = getWriteConfigBuilder(true, true, false).withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true).enableFullScan(true).enableMetrics(false).withMaxNumDeltaCommitsBeforeCompaction(3).build()).build();
    initWriteConfigAndMetatableWriter(writeConfig, true);
    doWriteOperation(testTable, "0000001", INSERT);
    // create an inflight compaction in metadata table.
    // not easy to create an inflight in metadata table directly, hence letting compaction succeed and then deleting the completed instant.
    // this new write is expected to trigger metadata table compaction
    String commitInstant = "0000002";
    doWriteOperation(testTable, commitInstant, INSERT);
    doWriteOperation(testTable, "0000003", INSERT);
    HoodieTableMetadata tableMetadata = metadata(writeConfig, context);
    String metadataCompactionInstant = commitInstant + "001";
    assertTrue(tableMetadata.getLatestCompactionTime().isPresent());
    assertEquals(tableMetadata.getLatestCompactionTime().get(), metadataCompactionInstant);
    validateMetadata(testTable);
    // Fetch compaction Commit file and rename to some other file. completed compaction meta file should have some serialized info that table interprets
    // for future upserts. so, renaming the file here to some temp name and later renaming it back to same name.
    java.nio.file.Path parentPath = Paths.get(metadataTableBasePath, HoodieTableMetaClient.METAFOLDER_NAME);
    java.nio.file.Path metaFilePath = parentPath.resolve(metadataCompactionInstant + HoodieTimeline.COMMIT_EXTENSION);
    java.nio.file.Path tempFilePath = FileCreateUtils.renameFileToTemp(metaFilePath, metadataCompactionInstant);
    metaClient.reloadActiveTimeline();
    testTable = HoodieMetadataTestTable.of(metaClient, metadataWriter);
    // this validation will exercise the code path where a compaction is inflight in metadata table, but still metadata based file listing should match non
    // metadata based file listing.
    validateMetadata(testTable);
    if (simulateFailedCompaction) {
        // this should retry the compaction in metadata table.
        doWriteOperation(testTable, "0000004", INSERT);
    } else {
        // let the compaction succeed in metadata and validation should succeed.
        FileCreateUtils.renameTempToMetaFile(tempFilePath, metaFilePath);
    }
    validateMetadata(testTable);
    // add few more write and validate
    doWriteOperation(testTable, "0000005", INSERT);
    doWriteOperation(testTable, "0000006", UPSERT);
    validateMetadata(testTable);
    if (simulateFailedCompaction) {
        // trigger another compaction failure.
        metadataCompactionInstant = "0000005001";
        tableMetadata = metadata(writeConfig, context);
        assertTrue(tableMetadata.getLatestCompactionTime().isPresent());
        assertEquals(tableMetadata.getLatestCompactionTime().get(), metadataCompactionInstant);
        // Fetch compaction Commit file and rename to some other file. completed compaction meta file should have some serialized info that table interprets
        // for future upserts. so, renaming the file here to some temp name and later renaming it back to same name.
        parentPath = Paths.get(metadataTableBasePath, HoodieTableMetaClient.METAFOLDER_NAME);
        metaFilePath = parentPath.resolve(metadataCompactionInstant + HoodieTimeline.COMMIT_EXTENSION);
        tempFilePath = FileCreateUtils.renameFileToTemp(metaFilePath, metadataCompactionInstant);
        validateMetadata(testTable);
        // this should retry the failed compaction in metadata table.
        doWriteOperation(testTable, "0000007", INSERT);
        validateMetadata(testTable);
        // add few more write and validate
        doWriteOperation(testTable, "0000008", INSERT);
        doWriteOperation(testTable, "0000009", UPSERT);
        validateMetadata(testTable);
    }
}
Also used : HoodieTableType(org.apache.hudi.common.model.HoodieTableType) HoodieTableMetadata(org.apache.hudi.metadata.HoodieTableMetadata) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 4 with HoodieTableType

use of org.apache.hudi.common.model.HoodieTableType in project hudi by apache.

the class TestHoodieBackedMetadata method testManualRollbacks.

/**
 * Test that manual rollbacks work correctly and enough timeline history is maintained on the metadata table
 * timeline.
 */
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testManualRollbacks(final boolean populateMateFields) throws Exception {
    HoodieTableType tableType = COPY_ON_WRITE;
    init(tableType, false);
    // Setting to archive more aggressively on the Metadata Table than the Dataset
    final int maxDeltaCommitsBeforeCompaction = 4;
    final int minArchiveCommitsMetadata = 2;
    final int minArchiveCommitsDataset = 4;
    writeConfig = getWriteConfigBuilder(true, true, false).withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true).archiveCommitsWith(minArchiveCommitsMetadata, minArchiveCommitsMetadata + 1).retainCommits(1).withMaxNumDeltaCommitsBeforeCompaction(maxDeltaCommitsBeforeCompaction).withPopulateMetaFields(populateMateFields).build()).withCompactionConfig(HoodieCompactionConfig.newBuilder().archiveCommitsWith(minArchiveCommitsDataset, minArchiveCommitsDataset + 1).retainCommits(1).retainFileVersions(1).withAutoClean(false).withAsyncClean(true).build()).build();
    initWriteConfigAndMetatableWriter(writeConfig, true);
    doWriteInsertAndUpsert(testTable, "000001", "000002", false);
    for (int i = 3; i < 10; i++) {
        doWriteOperation(testTable, "00000" + i);
        archiveDataTable(writeConfig, metaClient);
    }
    validateMetadata(testTable);
    // We can only rollback those commits whose deltacommit have not been archived yet.
    int numRollbacks = 0;
    boolean exceptionRaised = false;
    List<HoodieInstant> allInstants = metaClient.reloadActiveTimeline().getCommitsTimeline().getReverseOrderedInstants().collect(Collectors.toList());
    for (HoodieInstant instantToRollback : allInstants) {
        try {
            testTable.doRollback(instantToRollback.getTimestamp(), String.valueOf(Time.now()));
            validateMetadata(testTable);
            ++numRollbacks;
        } catch (HoodieMetadataException e) {
            exceptionRaised = true;
            break;
        }
    }
    assertTrue(exceptionRaised, "Rollback of archived instants should fail");
    // Since each rollback also creates a deltacommit, we can only support rolling back of half of the original
    // instants present before rollback started.
    assertTrue(numRollbacks >= Math.max(minArchiveCommitsDataset, minArchiveCommitsMetadata) / 2, "Rollbacks of non archived instants should work");
}
Also used : HoodieInstant(org.apache.hudi.common.table.timeline.HoodieInstant) HoodieMetadataException(org.apache.hudi.exception.HoodieMetadataException) HoodieTableType(org.apache.hudi.common.model.HoodieTableType) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 5 with HoodieTableType

use of org.apache.hudi.common.model.HoodieTableType in project hudi by apache.

the class TestHoodieDeltaStreamerWithMultiWriter method runJobsInParallel.

private void runJobsInParallel(String tableBasePath, HoodieTableType tableType, int totalRecords, HoodieDeltaStreamer ingestionJob, HoodieDeltaStreamer.Config cfgIngestionJob, HoodieDeltaStreamer backfillJob, HoodieDeltaStreamer.Config cfgBackfillJob, boolean expectConflict, String jobId) throws Exception {
    ExecutorService service = Executors.newFixedThreadPool(2);
    HoodieTableMetaClient meta = HoodieTableMetaClient.builder().setConf(hadoopConf()).setBasePath(tableBasePath).build();
    HoodieTimeline timeline = meta.getActiveTimeline().getCommitsTimeline().filterCompletedInstants();
    String lastSuccessfulCommit = timeline.lastInstant().get().getTimestamp();
    // Condition for parallel ingestion job
    Function<Boolean, Boolean> conditionForRegularIngestion = (r) -> {
        if (tableType.equals(HoodieTableType.MERGE_ON_READ)) {
            TestHoodieDeltaStreamer.TestHelpers.assertAtleastNDeltaCommitsAfterCommit(3, lastSuccessfulCommit, tableBasePath, fs());
        } else {
            TestHoodieDeltaStreamer.TestHelpers.assertAtleastNCompactionCommitsAfterCommit(3, lastSuccessfulCommit, tableBasePath, fs());
        }
        TestHoodieDeltaStreamer.TestHelpers.assertRecordCount(totalRecords, tableBasePath + "/*/*.parquet", sqlContext());
        TestHoodieDeltaStreamer.TestHelpers.assertDistanceCount(totalRecords, tableBasePath + "/*/*.parquet", sqlContext());
        return true;
    };
    AtomicBoolean continuousFailed = new AtomicBoolean(false);
    AtomicBoolean backfillFailed = new AtomicBoolean(false);
    try {
        Future regularIngestionJobFuture = service.submit(() -> {
            try {
                deltaStreamerTestRunner(ingestionJob, cfgIngestionJob, conditionForRegularIngestion, jobId);
            } catch (Throwable ex) {
                continuousFailed.set(true);
                LOG.error("Continuous job failed " + ex.getMessage());
                throw new RuntimeException(ex);
            }
        });
        Future backfillJobFuture = service.submit(() -> {
            try {
                // trigger backfill atleast after 1 requested entry is added to timeline from continuous job. If not, there is a chance that backfill will complete even before
                // continuous job starts.
                awaitCondition(new GetCommitsAfterInstant(tableBasePath, lastSuccessfulCommit));
                backfillJob.sync();
            } catch (Throwable ex) {
                LOG.error("Backfilling job failed " + ex.getMessage());
                backfillFailed.set(true);
                throw new RuntimeException(ex);
            }
        });
        backfillJobFuture.get();
        regularIngestionJobFuture.get();
        if (expectConflict) {
            Assertions.fail("Failed to handle concurrent writes");
        }
    } catch (Exception e) {
        /*
       * Need to perform getMessage().contains since the exception coming
       * from {@link org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.DeltaSyncService} gets wrapped many times into RuntimeExceptions.
       */
        if (expectConflict && e.getCause().getMessage().contains(ConcurrentModificationException.class.getName())) {
            // expected ConcurrentModificationException since ingestion & backfill will have overlapping writes
            if (backfillFailed.get()) {
                // if backfill job failed, shutdown the continuous job.
                LOG.warn("Calling shutdown on ingestion job since the backfill job has failed for " + jobId);
                ingestionJob.shutdownGracefully();
            }
        } else {
            LOG.error("Conflict happened, but not expected " + e.getCause().getMessage());
            throw e;
        }
    }
}
Also used : SourceConfigs(org.apache.hudi.utilities.testutils.sources.config.SourceConfigs) BULK_INSERT_SORT_MODE(org.apache.hudi.config.HoodieWriteConfig.BULK_INSERT_SORT_MODE) FileSystem(org.apache.hadoop.fs.FileSystem) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CHECKPOINT_KEY(org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.CHECKPOINT_KEY) HoodieDeltaStreamer(org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer) EnumSource(org.junit.jupiter.params.provider.EnumSource) Disabled(org.junit.jupiter.api.Disabled) Function(java.util.function.Function) Logger(org.apache.log4j.Logger) INSERT_PARALLELISM_VALUE(org.apache.hudi.config.HoodieWriteConfig.INSERT_PARALLELISM_VALUE) HoodieTableType(org.apache.hudi.common.model.HoodieTableType) Future(java.util.concurrent.Future) HoodieTableMetaClient(org.apache.hudi.common.table.HoodieTableMetaClient) Tag(org.junit.jupiter.api.Tag) URI(java.net.URI) HoodieTimeline(org.apache.hudi.common.table.timeline.HoodieTimeline) ExecutorService(java.util.concurrent.ExecutorService) BULKINSERT_PARALLELISM_VALUE(org.apache.hudi.config.HoodieWriteConfig.BULKINSERT_PARALLELISM_VALUE) HoodieDeltaStreamerTestBase.prepareInitialConfigs(org.apache.hudi.utilities.functional.HoodieDeltaStreamerTestBase.prepareInitialConfigs) HoodieDeltaStreamerTestBase.defaultSchemaProviderClassName(org.apache.hudi.utilities.functional.HoodieDeltaStreamerTestBase.defaultSchemaProviderClassName) TypedProperties(org.apache.hudi.common.config.TypedProperties) LockConfiguration(org.apache.hudi.common.config.LockConfiguration) HoodieDeltaStreamerTestBase.addCommitToTimeline(org.apache.hudi.utilities.functional.HoodieDeltaStreamerTestBase.addCommitToTimeline) HoodieCommitMetadata(org.apache.hudi.common.model.HoodieCommitMetadata) UtilitiesTestBase(org.apache.hudi.utilities.testutils.UtilitiesTestBase) IOException(java.io.IOException) BulkInsertSortMode(org.apache.hudi.execution.bulkinsert.BulkInsertSortMode) Executors(java.util.concurrent.Executors) HoodieCompactionConfig(org.apache.hudi.config.HoodieCompactionConfig) UPSERT_PARALLELISM_VALUE(org.apache.hudi.config.HoodieWriteConfig.UPSERT_PARALLELISM_VALUE) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) List(java.util.List) Paths(java.nio.file.Paths) SparkClientFunctionalTestHarness(org.apache.hudi.testutils.SparkClientFunctionalTestHarness) Assertions(org.junit.jupiter.api.Assertions) TestHoodieDeltaStreamer.deltaStreamerTestRunner(org.apache.hudi.utilities.functional.TestHoodieDeltaStreamer.deltaStreamerTestRunner) WriteOperationType(org.apache.hudi.common.model.WriteOperationType) FINALIZE_WRITE_PARALLELISM_VALUE(org.apache.hudi.config.HoodieWriteConfig.FINALIZE_WRITE_PARALLELISM_VALUE) LogManager(org.apache.log4j.LogManager) TestDataSource(org.apache.hudi.utilities.sources.TestDataSource) Collections(java.util.Collections) ConcurrentModificationException(java.util.ConcurrentModificationException) PROPS_FILENAME_TEST_MULTI_WRITER(org.apache.hudi.utilities.functional.HoodieDeltaStreamerTestBase.PROPS_FILENAME_TEST_MULTI_WRITER) ConcurrentModificationException(java.util.ConcurrentModificationException) HoodieTimeline(org.apache.hudi.common.table.timeline.HoodieTimeline) IOException(java.io.IOException) ConcurrentModificationException(java.util.ConcurrentModificationException) HoodieTableMetaClient(org.apache.hudi.common.table.HoodieTableMetaClient) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean)

Aggregations

HoodieTableType (org.apache.hudi.common.model.HoodieTableType)15 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)11 Test (org.junit.jupiter.api.Test)6 WriteStatus (org.apache.hudi.client.WriteStatus)3 HoodieRecord (org.apache.hudi.common.model.HoodieRecord)3 HoodieTableMetaClient (org.apache.hudi.common.table.HoodieTableMetaClient)3 HoodieTestDataGenerator (org.apache.hudi.common.testutils.HoodieTestDataGenerator)3 HoodieTable (org.apache.hudi.table.HoodieTable)3 IOException (java.io.IOException)2 List (java.util.List)2 Schema (org.apache.avro.Schema)2 ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)2 DataType (org.apache.flink.table.types.DataType)2 RowType (org.apache.flink.table.types.logical.RowType)2 Path (org.apache.hadoop.fs.Path)2 SparkRDDWriteClient (org.apache.hudi.client.SparkRDDWriteClient)2 HoodieCommitMetadata (org.apache.hudi.common.model.HoodieCommitMetadata)2 HoodieFileGroup (org.apache.hudi.common.model.HoodieFileGroup)2 SyncableFileSystemView (org.apache.hudi.common.table.view.SyncableFileSystemView)2 HoodieTableMetadata (org.apache.hudi.metadata.HoodieTableMetadata)2