Search in sources :

Example 1 with DataWriter

use of org.apache.spark.sql.connector.write.DataWriter in project hudi by apache.

the class TestHoodieDataSourceInternalBatchWrite method testDataSourceWriterInternal.

private void testDataSourceWriterInternal(Map<String, String> extraMetadata, Map<String, String> expectedExtraMetadata, boolean populateMetaFields) throws Exception {
    // init config and table
    HoodieWriteConfig cfg = getWriteConfig(populateMetaFields);
    HoodieTable table = HoodieSparkTable.create(cfg, context, metaClient);
    String instantTime = "001";
    // init writer
    HoodieDataSourceInternalBatchWrite dataSourceInternalBatchWrite = new HoodieDataSourceInternalBatchWrite(instantTime, cfg, STRUCT_TYPE, sqlContext.sparkSession(), hadoopConf, extraMetadata, populateMetaFields, false);
    DataWriter<InternalRow> writer = dataSourceInternalBatchWrite.createBatchWriterFactory(null).createWriter(0, RANDOM.nextLong());
    String[] partitionPaths = HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS;
    List<String> partitionPathsAbs = new ArrayList<>();
    for (String partitionPath : partitionPaths) {
        partitionPathsAbs.add(basePath + "/" + partitionPath + "/*");
    }
    int size = 10 + RANDOM.nextInt(1000);
    int batches = 5;
    Dataset<Row> totalInputRows = null;
    for (int j = 0; j < batches; j++) {
        String partitionPath = HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS[j % 3];
        Dataset<Row> inputRows = getRandomRows(sqlContext, size, partitionPath, false);
        writeRows(inputRows, writer);
        if (totalInputRows == null) {
            totalInputRows = inputRows;
        } else {
            totalInputRows = totalInputRows.union(inputRows);
        }
    }
    HoodieWriterCommitMessage commitMetadata = (HoodieWriterCommitMessage) writer.commit();
    List<HoodieWriterCommitMessage> commitMessages = new ArrayList<>();
    commitMessages.add(commitMetadata);
    dataSourceInternalBatchWrite.commit(commitMessages.toArray(new HoodieWriterCommitMessage[0]));
    metaClient.reloadActiveTimeline();
    Dataset<Row> result = HoodieClientTestUtils.read(jsc, basePath, sqlContext, metaClient.getFs(), partitionPathsAbs.toArray(new String[0]));
    // verify output
    assertOutput(totalInputRows, result, instantTime, Option.empty(), populateMetaFields);
    assertWriteStatuses(commitMessages.get(0).getWriteStatuses(), batches, size, Option.empty(), Option.empty());
    // verify extra metadata
    Option<HoodieCommitMetadata> commitMetadataOption = HoodieClientTestUtils.getCommitMetadataForLatestInstant(metaClient);
    assertTrue(commitMetadataOption.isPresent());
    Map<String, String> actualExtraMetadata = new HashMap<>();
    commitMetadataOption.get().getExtraMetadata().entrySet().stream().filter(entry -> !entry.getKey().equals(HoodieCommitMetadata.SCHEMA_KEY)).forEach(entry -> actualExtraMetadata.put(entry.getKey(), entry.getValue()));
    assertEquals(actualExtraMetadata, expectedExtraMetadata);
}
Also used : HoodieTable(org.apache.hudi.table.HoodieTable) InternalRow(org.apache.spark.sql.catalyst.InternalRow) Arrays(java.util.Arrays) Dataset(org.apache.spark.sql.Dataset) HoodieTestDataGenerator(org.apache.hudi.common.testutils.HoodieTestDataGenerator) Option(org.apache.hudi.common.util.Option) HashMap(java.util.HashMap) DataWriter(org.apache.spark.sql.connector.write.DataWriter) Disabled(org.junit.jupiter.api.Disabled) DataSourceWriteOptions(org.apache.hudi.DataSourceWriteOptions) ArrayList(java.util.ArrayList) HoodieSparkTable(org.apache.hudi.table.HoodieSparkTable) Map(java.util.Map) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) MethodSource(org.junit.jupiter.params.provider.MethodSource) ENCODER(org.apache.hudi.testutils.SparkDatasetTestUtils.ENCODER) HoodieWriteConfig(org.apache.hudi.config.HoodieWriteConfig) HoodieCommitMetadata(org.apache.hudi.common.model.HoodieCommitMetadata) Row(org.apache.spark.sql.Row) STRUCT_TYPE(org.apache.hudi.testutils.SparkDatasetTestUtils.STRUCT_TYPE) Arguments(org.junit.jupiter.params.provider.Arguments) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) List(java.util.List) Stream(java.util.stream.Stream) SparkDatasetTestUtils.toInternalRows(org.apache.hudi.testutils.SparkDatasetTestUtils.toInternalRows) HoodieBulkInsertInternalWriterTestBase(org.apache.hudi.internal.HoodieBulkInsertInternalWriterTestBase) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) SparkDatasetTestUtils.getRandomRows(org.apache.hudi.testutils.SparkDatasetTestUtils.getRandomRows) HoodieClientTestUtils(org.apache.hudi.testutils.HoodieClientTestUtils) Collections(java.util.Collections) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HoodieWriteConfig(org.apache.hudi.config.HoodieWriteConfig) HoodieCommitMetadata(org.apache.hudi.common.model.HoodieCommitMetadata) HoodieTable(org.apache.hudi.table.HoodieTable) InternalRow(org.apache.spark.sql.catalyst.InternalRow) Row(org.apache.spark.sql.Row) InternalRow(org.apache.spark.sql.catalyst.InternalRow)

Aggregations

ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 Collections (java.util.Collections)1 HashMap (java.util.HashMap)1 List (java.util.List)1 Map (java.util.Map)1 Stream (java.util.stream.Stream)1 DataSourceWriteOptions (org.apache.hudi.DataSourceWriteOptions)1 HoodieCommitMetadata (org.apache.hudi.common.model.HoodieCommitMetadata)1 HoodieTestDataGenerator (org.apache.hudi.common.testutils.HoodieTestDataGenerator)1 Option (org.apache.hudi.common.util.Option)1 HoodieWriteConfig (org.apache.hudi.config.HoodieWriteConfig)1 HoodieBulkInsertInternalWriterTestBase (org.apache.hudi.internal.HoodieBulkInsertInternalWriterTestBase)1 HoodieSparkTable (org.apache.hudi.table.HoodieSparkTable)1 HoodieTable (org.apache.hudi.table.HoodieTable)1 HoodieClientTestUtils (org.apache.hudi.testutils.HoodieClientTestUtils)1 ENCODER (org.apache.hudi.testutils.SparkDatasetTestUtils.ENCODER)1 STRUCT_TYPE (org.apache.hudi.testutils.SparkDatasetTestUtils.STRUCT_TYPE)1 SparkDatasetTestUtils.getRandomRows (org.apache.hudi.testutils.SparkDatasetTestUtils.getRandomRows)1 SparkDatasetTestUtils.toInternalRows (org.apache.hudi.testutils.SparkDatasetTestUtils.toInternalRows)1