Search in sources :

Example 1 with HoodieFileStatus

use of org.apache.hudi.avro.model.HoodieFileStatus in project hudi by apache.

the class TestCleaner method testKeepLatestFileVersions.

/**
 * Test Hudi COW Table Cleaner - Keep the latest file versions policy.
 */
@ParameterizedTest
@ValueSource(booleans = { false, true })
public void testKeepLatestFileVersions(Boolean enableBootstrapSourceClean) throws Exception {
    HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath).withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).build()).withCompactionConfig(HoodieCompactionConfig.newBuilder().withCleanBootstrapBaseFileEnabled(enableBootstrapSourceClean).withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainFileVersions(1).build()).build();
    HoodieTableMetadataWriter metadataWriter = SparkHoodieBackedTableMetadataWriter.create(hadoopConf, config, context);
    HoodieTestTable testTable = HoodieMetadataTestTable.of(metaClient, metadataWriter);
    final String p0 = "2020/01/01";
    final String p1 = "2020/01/02";
    final Map<String, List<BootstrapFileMapping>> bootstrapMapping = enableBootstrapSourceClean ? generateBootstrapIndexAndSourceData(p0, p1) : null;
    // make 1 commit, with 1 file per partition
    final String file1P0C0 = enableBootstrapSourceClean ? bootstrapMapping.get(p0).get(0).getFileId() : UUID.randomUUID().toString();
    final String file1P1C0 = enableBootstrapSourceClean ? bootstrapMapping.get(p1).get(0).getFileId() : UUID.randomUUID().toString();
    Map<String, List<Pair<String, Integer>>> c1PartitionToFilesNameLengthMap = new HashMap<>();
    c1PartitionToFilesNameLengthMap.put(p0, Collections.singletonList(Pair.of(file1P0C0, 100)));
    c1PartitionToFilesNameLengthMap.put(p1, Collections.singletonList(Pair.of(file1P1C0, 200)));
    testTable.doWriteOperation("00000000000001", WriteOperationType.INSERT, Arrays.asList(p0, p1), c1PartitionToFilesNameLengthMap, false, false);
    List<HoodieCleanStat> hoodieCleanStatsOne = runCleaner(config);
    assertEquals(0, hoodieCleanStatsOne.size(), "Must not clean any files");
    assertTrue(testTable.baseFileExists(p0, "00000000000001", file1P0C0));
    assertTrue(testTable.baseFileExists(p1, "00000000000001", file1P1C0));
    // make next commit, with 1 insert & 1 update per partition
    final String file2P0C1 = UUID.randomUUID().toString();
    final String file2P1C1 = UUID.randomUUID().toString();
    Map<String, List<Pair<String, Integer>>> c2PartitionToFilesNameLengthMap = new HashMap<>();
    c2PartitionToFilesNameLengthMap.put(p0, Arrays.asList(Pair.of(file1P0C0, 101), Pair.of(file2P0C1, 100)));
    c2PartitionToFilesNameLengthMap.put(p1, Arrays.asList(Pair.of(file1P1C0, 201), Pair.of(file2P1C1, 200)));
    testTable.doWriteOperation("00000000000002", WriteOperationType.UPSERT, Collections.emptyList(), c2PartitionToFilesNameLengthMap, false, false);
    // enableBootstrapSourceClean would delete the bootstrap base file at the same time
    List<HoodieCleanStat> hoodieCleanStatsTwo = runCleaner(config, 1);
    HoodieCleanStat cleanStat = getCleanStat(hoodieCleanStatsTwo, p0);
    assertEquals(enableBootstrapSourceClean ? 2 : 1, cleanStat.getSuccessDeleteFiles().size() + (cleanStat.getSuccessDeleteBootstrapBaseFiles() == null ? 0 : cleanStat.getSuccessDeleteBootstrapBaseFiles().size()), "Must clean at least 1 file");
    if (enableBootstrapSourceClean) {
        HoodieFileStatus fstatus = bootstrapMapping.get(p0).get(0).getBootstrapFileStatus();
        // This ensures full path is recorded in metadata.
        assertTrue(cleanStat.getSuccessDeleteBootstrapBaseFiles().contains(fstatus.getPath().getUri()), "Successful delete files were " + cleanStat.getSuccessDeleteBootstrapBaseFiles() + " but did not contain " + fstatus.getPath().getUri());
        assertFalse(Files.exists(Paths.get(bootstrapMapping.get(p0).get(0).getBootstrapFileStatus().getPath().getUri())));
    }
    cleanStat = getCleanStat(hoodieCleanStatsTwo, p1);
    assertTrue(testTable.baseFileExists(p0, "00000000000002", file2P0C1));
    assertTrue(testTable.baseFileExists(p1, "00000000000002", file2P1C1));
    assertFalse(testTable.baseFileExists(p0, "00000000000001", file1P0C0));
    assertFalse(testTable.baseFileExists(p1, "00000000000001", file1P1C0));
    assertEquals(enableBootstrapSourceClean ? 2 : 1, cleanStat.getSuccessDeleteFiles().size() + (cleanStat.getSuccessDeleteBootstrapBaseFiles() == null ? 0 : cleanStat.getSuccessDeleteBootstrapBaseFiles().size()), "Must clean at least 1 file");
    if (enableBootstrapSourceClean) {
        HoodieFileStatus fstatus = bootstrapMapping.get(p1).get(0).getBootstrapFileStatus();
        // This ensures full path is recorded in metadata.
        assertTrue(cleanStat.getSuccessDeleteBootstrapBaseFiles().contains(fstatus.getPath().getUri()), "Successful delete files were " + cleanStat.getSuccessDeleteBootstrapBaseFiles() + " but did not contain " + fstatus.getPath().getUri());
        assertFalse(Files.exists(Paths.get(bootstrapMapping.get(p1).get(0).getBootstrapFileStatus().getPath().getUri())));
    }
    // make next commit, with 2 updates to existing files, and 1 insert
    final String file3P0C2 = UUID.randomUUID().toString();
    Map<String, List<Pair<String, Integer>>> c3PartitionToFilesNameLengthMap = new HashMap<>();
    c3PartitionToFilesNameLengthMap.put(p0, Arrays.asList(Pair.of(file1P0C0, 102), Pair.of(file2P0C1, 101), Pair.of(file3P0C2, 100)));
    testTable.doWriteOperation("00000000000003", WriteOperationType.UPSERT, Collections.emptyList(), c3PartitionToFilesNameLengthMap, false, false);
    List<HoodieCleanStat> hoodieCleanStatsThree = runCleaner(config, 3);
    assertEquals(2, getCleanStat(hoodieCleanStatsThree, p0).getSuccessDeleteFiles().size(), "Must clean two files");
    assertFalse(testTable.baseFileExists(p0, "00000000000002", file1P0C0));
    assertFalse(testTable.baseFileExists(p0, "00000000000002", file2P0C1));
    assertTrue(testTable.baseFileExists(p0, "00000000000003", file3P0C2));
    // No cleaning on partially written file, with no commit.
    testTable.forCommit("00000000000004").withBaseFilesInPartition(p0, file3P0C2);
    List<HoodieCleanStat> hoodieCleanStatsFour = runCleaner(config);
    assertEquals(0, hoodieCleanStatsFour.size(), "Must not clean any files");
    assertTrue(testTable.baseFileExists(p0, "00000000000003", file3P0C2));
}
Also used : HoodieCleanStat(org.apache.hudi.common.HoodieCleanStat) HoodieFileStatus(org.apache.hudi.avro.model.HoodieFileStatus) HashMap(java.util.HashMap) HoodieTestTable(org.apache.hudi.common.testutils.HoodieTestTable) HoodieWriteConfig(org.apache.hudi.config.HoodieWriteConfig) ArrayList(java.util.ArrayList) List(java.util.List) HoodieTableMetadataWriter(org.apache.hudi.metadata.HoodieTableMetadataWriter) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 2 with HoodieFileStatus

use of org.apache.hudi.avro.model.HoodieFileStatus in project hudi by apache.

the class TestBootstrapUtils method testAllLeafFoldersWithFiles.

@Test
public void testAllLeafFoldersWithFiles() throws IOException {
    // All directories including marker dirs.
    List<String> folders = Arrays.asList("2016/04/15", "2016/05/16", "2016/05/17");
    folders.forEach(f -> {
        try {
            metaClient.getFs().mkdirs(new Path(new Path(basePath), f));
        } catch (IOException e) {
            throw new HoodieException(e);
        }
    });
    // Files inside partitions and marker directories
    List<String> files = Stream.of("2016/04/15/1_1-0-1_20190528120000", "2016/04/15/2_1-0-1_20190528120000", "2016/05/16/3_1-0-1_20190528120000", "2016/05/16/4_1-0-1_20190528120000", "2016/04/17/5_1-0-1_20190528120000", "2016/04/17/6_1-0-1_20190528120000").map(file -> file + metaClient.getTableConfig().getBaseFileFormat().getFileExtension()).collect(Collectors.toList());
    files.forEach(f -> {
        try {
            metaClient.getFs().create(new Path(new Path(basePath), f));
        } catch (IOException e) {
            throw new HoodieException(e);
        }
    });
    List<Pair<String, List<HoodieFileStatus>>> collected = BootstrapUtils.getAllLeafFoldersWithFiles(metaClient, metaClient.getFs(), basePath, context);
    assertEquals(3, collected.size());
    collected.stream().forEach(k -> {
        assertEquals(2, k.getRight().size());
    });
    // Simulate reading from un-partitioned dataset
    collected = BootstrapUtils.getAllLeafFoldersWithFiles(metaClient, metaClient.getFs(), basePath + "/" + folders.get(0), context);
    assertEquals(1, collected.size());
    collected.stream().forEach(k -> {
        assertEquals(2, k.getRight().size());
    });
}
Also used : Path(org.apache.hadoop.fs.Path) HoodieClientTestBase(org.apache.hudi.testutils.HoodieClientTestBase) Test(org.junit.jupiter.api.Test) Arrays(java.util.Arrays) List(java.util.List) Stream(java.util.stream.Stream) HoodieFileStatus(org.apache.hudi.avro.model.HoodieFileStatus) HoodieException(org.apache.hudi.exception.HoodieException) Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) Collectors(java.util.stream.Collectors) Pair(org.apache.hudi.common.util.collection.Pair) HoodieFileStatus(org.apache.hudi.avro.model.HoodieFileStatus) HoodieException(org.apache.hudi.exception.HoodieException) IOException(java.io.IOException) Pair(org.apache.hudi.common.util.collection.Pair) Test(org.junit.jupiter.api.Test)

Example 3 with HoodieFileStatus

use of org.apache.hudi.avro.model.HoodieFileStatus in project hudi by apache.

the class TestOrcBootstrap method generateInputBatch.

private static JavaRDD<HoodieRecord> generateInputBatch(JavaSparkContext jsc, List<Pair<String, List<HoodieFileStatus>>> partitionPaths, Schema writerSchema) {
    List<Pair<String, Path>> fullFilePathsWithPartition = partitionPaths.stream().flatMap(p -> p.getValue().stream().map(x -> Pair.of(p.getKey(), FileStatusUtils.toPath(x.getPath())))).collect(Collectors.toList());
    return jsc.parallelize(fullFilePathsWithPartition.stream().flatMap(p -> {
        try {
            Configuration conf = jsc.hadoopConfiguration();
            AvroReadSupport.setAvroReadSchema(conf, writerSchema);
            Reader orcReader = OrcFile.createReader(p.getValue(), new OrcFile.ReaderOptions(jsc.hadoopConfiguration()));
            RecordReader recordReader = orcReader.rows();
            TypeDescription orcSchema = orcReader.getSchema();
            Schema avroSchema = AvroOrcUtils.createAvroSchemaWithDefaultValue(orcSchema, "test_orc_record", null, true);
            Iterator<GenericRecord> recIterator = new OrcReaderIterator(recordReader, avroSchema, orcSchema);
            return StreamSupport.stream(Spliterators.spliteratorUnknownSize(recIterator, 0), false).map(gr -> {
                try {
                    String key = gr.get("_row_key").toString();
                    String pPath = p.getKey();
                    return new HoodieAvroRecord<>(new HoodieKey(key, pPath), new RawTripTestPayload(gr.toString(), key, pPath, HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA));
                } catch (IOException e) {
                    throw new HoodieIOException(e.getMessage(), e);
                }
            });
        } catch (IOException ioe) {
            throw new HoodieIOException(ioe.getMessage(), ioe);
        }
    }).collect(Collectors.toList()));
}
Also used : BootstrapUtils(org.apache.hudi.table.action.bootstrap.BootstrapUtils) BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) BootstrapMode(org.apache.hudi.client.bootstrap.BootstrapMode) Spliterators(java.util.Spliterators) HoodieTestDataGenerator(org.apache.hudi.common.testutils.HoodieTestDataGenerator) Random(java.util.Random) HoodieParquetRealtimeInputFormat(org.apache.hudi.hadoop.realtime.HoodieParquetRealtimeInputFormat) DataSourceWriteOptions(org.apache.hudi.DataSourceWriteOptions) HoodieTableType(org.apache.hudi.common.model.HoodieTableType) NonpartitionedKeyGenerator(org.apache.hudi.keygen.NonpartitionedKeyGenerator) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) HoodieFileStatus(org.apache.hudi.avro.model.HoodieFileStatus) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) HoodieSparkEngineContext(org.apache.hudi.client.common.HoodieSparkEngineContext) Tag(org.junit.jupiter.api.Tag) DataTypes(org.apache.spark.sql.types.DataTypes) Schema(org.apache.avro.Schema) IndexType(org.apache.hudi.index.HoodieIndex.IndexType) RawTripTestPayload(org.apache.hudi.common.testutils.RawTripTestPayload) MetadataOnlyBootstrapModeSelector(org.apache.hudi.client.bootstrap.selector.MetadataOnlyBootstrapModeSelector) SimpleKeyGenerator(org.apache.hudi.keygen.SimpleKeyGenerator) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) Test(org.junit.jupiter.api.Test) HoodieFileFormat(org.apache.hudi.common.model.HoodieFileFormat) List(java.util.List) TempDir(org.junit.jupiter.api.io.TempDir) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) FullRecordBootstrapDataProvider(org.apache.hudi.client.bootstrap.FullRecordBootstrapDataProvider) HoodieClientTestBase(org.apache.hudi.testutils.HoodieClientTestBase) IntStream(java.util.stream.IntStream) AvroReadSupport(org.apache.parquet.avro.AvroReadSupport) AvroOrcUtils(org.apache.hudi.common.util.AvroOrcUtils) Dataset(org.apache.spark.sql.Dataset) OrcReaderIterator(org.apache.hudi.common.util.OrcReaderIterator) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) Option(org.apache.hudi.common.util.Option) OrcFile(org.apache.orc.OrcFile) ArrayList(java.util.ArrayList) Reader(org.apache.orc.Reader) Collectors.mapping(java.util.stream.Collectors.mapping) StreamSupport(java.util.stream.StreamSupport) HoodieParquetInputFormat(org.apache.hudi.hadoop.HoodieParquetInputFormat) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) FullRecordBootstrapModeSelector(org.apache.hudi.client.bootstrap.selector.FullRecordBootstrapModeSelector) HoodieTimeline(org.apache.hudi.common.table.timeline.HoodieTimeline) JavaRDD(org.apache.spark.api.java.JavaRDD) SparkSession(org.apache.spark.sql.SparkSession) HoodieRecord(org.apache.hudi.common.model.HoodieRecord) GenericRecord(org.apache.avro.generic.GenericRecord) SaveMode(org.apache.spark.sql.SaveMode) BootstrapModeSelector(org.apache.hudi.client.bootstrap.selector.BootstrapModeSelector) BootstrapIndex(org.apache.hudi.common.bootstrap.index.BootstrapIndex) TypedProperties(org.apache.hudi.common.config.TypedProperties) HoodieWriteConfig(org.apache.hudi.config.HoodieWriteConfig) Iterator(java.util.Iterator) Column(org.apache.spark.sql.Column) RecordReader(org.apache.orc.RecordReader) SQLContext(org.apache.spark.sql.SQLContext) TypeDescription(org.apache.orc.TypeDescription) IOException(java.io.IOException) Row(org.apache.spark.sql.Row) HoodieAvroRecord(org.apache.hudi.common.model.HoodieAvroRecord) FileCreateUtils(org.apache.hudi.common.testutils.FileCreateUtils) HoodieCompactionConfig(org.apache.hudi.config.HoodieCompactionConfig) JobConf(org.apache.hadoop.mapred.JobConf) AfterEach(org.junit.jupiter.api.AfterEach) Collectors.toList(java.util.stream.Collectors.toList) UDF1(org.apache.spark.sql.api.java.UDF1) HoodieKey(org.apache.hudi.common.model.HoodieKey) FileStatusUtils(org.apache.hudi.common.bootstrap.FileStatusUtils) HoodieIOException(org.apache.hudi.exception.HoodieIOException) PartitionPathEncodeUtils(org.apache.hudi.common.util.PartitionPathEncodeUtils) HoodieTestUtils(org.apache.hudi.common.testutils.HoodieTestUtils) org.apache.spark.sql.functions.callUDF(org.apache.spark.sql.functions.callUDF) Pair(org.apache.hudi.common.util.collection.Pair) HoodieBootstrapConfig(org.apache.hudi.config.HoodieBootstrapConfig) Configuration(org.apache.hadoop.conf.Configuration) RecordReader(org.apache.orc.RecordReader) Schema(org.apache.avro.Schema) Reader(org.apache.orc.Reader) RecordReader(org.apache.orc.RecordReader) IOException(java.io.IOException) HoodieIOException(org.apache.hudi.exception.HoodieIOException) RawTripTestPayload(org.apache.hudi.common.testutils.RawTripTestPayload) HoodieIOException(org.apache.hudi.exception.HoodieIOException) OrcReaderIterator(org.apache.hudi.common.util.OrcReaderIterator) HoodieAvroRecord(org.apache.hudi.common.model.HoodieAvroRecord) OrcFile(org.apache.orc.OrcFile) HoodieKey(org.apache.hudi.common.model.HoodieKey) TypeDescription(org.apache.orc.TypeDescription) GenericRecord(org.apache.avro.generic.GenericRecord) Pair(org.apache.hudi.common.util.collection.Pair)

Example 4 with HoodieFileStatus

use of org.apache.hudi.avro.model.HoodieFileStatus in project hudi by apache.

the class SparkFullBootstrapDataProviderBase method generateInputRecords.

@Override
public JavaRDD<HoodieRecord> generateInputRecords(String tableName, String sourceBasePath, List<Pair<String, List<HoodieFileStatus>>> partitionPathsWithFiles) {
    String[] filePaths = partitionPathsWithFiles.stream().map(Pair::getValue).flatMap(f -> f.stream().map(fs -> FileStatusUtils.toPath(fs.getPath()).toString())).toArray(String[]::new);
    Dataset inputDataset = sparkSession.read().format(getFormat()).load(filePaths);
    try {
        KeyGenerator keyGenerator = HoodieSparkKeyGeneratorFactory.createKeyGenerator(props);
        String structName = tableName + "_record";
        String namespace = "hoodie." + tableName;
        RDD<GenericRecord> genericRecords = HoodieSparkUtils.createRdd(inputDataset, structName, namespace, false, Option.empty());
        return genericRecords.toJavaRDD().map(gr -> {
            String orderingVal = HoodieAvroUtils.getNestedFieldValAsString(gr, props.getString("hoodie.datasource.write.precombine.field"), false, props.getBoolean(KeyGeneratorOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.key(), Boolean.parseBoolean(KeyGeneratorOptions.KEYGENERATOR_CONSISTENT_LOGICAL_TIMESTAMP_ENABLED.defaultValue())));
            try {
                return DataSourceUtils.createHoodieRecord(gr, orderingVal, keyGenerator.getKey(gr), props.getString("hoodie.datasource.write.payload.class"));
            } catch (IOException ioe) {
                throw new HoodieIOException(ioe.getMessage(), ioe);
            }
        });
    } catch (IOException ioe) {
        throw new HoodieIOException(ioe.getMessage(), ioe);
    }
}
Also used : HoodieRecord(org.apache.hudi.common.model.HoodieRecord) GenericRecord(org.apache.avro.generic.GenericRecord) HoodieAvroUtils(org.apache.hudi.avro.HoodieAvroUtils) TypedProperties(org.apache.hudi.common.config.TypedProperties) Dataset(org.apache.spark.sql.Dataset) KeyGeneratorOptions(org.apache.hudi.keygen.constant.KeyGeneratorOptions) Option(org.apache.hudi.common.util.Option) IOException(java.io.IOException) DataSourceUtils(org.apache.hudi.DataSourceUtils) KeyGenerator(org.apache.hudi.keygen.KeyGenerator) List(java.util.List) HoodieSparkUtils(org.apache.hudi.HoodieSparkUtils) HoodieFileStatus(org.apache.hudi.avro.model.HoodieFileStatus) HoodieSparkEngineContext(org.apache.hudi.client.common.HoodieSparkEngineContext) FileStatusUtils(org.apache.hudi.common.bootstrap.FileStatusUtils) HoodieIOException(org.apache.hudi.exception.HoodieIOException) RDD(org.apache.spark.rdd.RDD) HoodieSparkKeyGeneratorFactory(org.apache.hudi.keygen.factory.HoodieSparkKeyGeneratorFactory) JavaRDD(org.apache.spark.api.java.JavaRDD) FullRecordBootstrapDataProvider(org.apache.hudi.client.bootstrap.FullRecordBootstrapDataProvider) Pair(org.apache.hudi.common.util.collection.Pair) SparkSession(org.apache.spark.sql.SparkSession) HoodieIOException(org.apache.hudi.exception.HoodieIOException) Dataset(org.apache.spark.sql.Dataset) IOException(java.io.IOException) HoodieIOException(org.apache.hudi.exception.HoodieIOException) GenericRecord(org.apache.avro.generic.GenericRecord) KeyGenerator(org.apache.hudi.keygen.KeyGenerator) Pair(org.apache.hudi.common.util.collection.Pair)

Example 5 with HoodieFileStatus

use of org.apache.hudi.avro.model.HoodieFileStatus in project hudi by apache.

the class BootstrapUtils method getAllLeafFoldersWithFiles.

/**
 * Returns leaf folders with files under a path.
 * @param metaClient Hoodie table metadata client
 * @param fs  File System
 * @param context JHoodieEngineContext
 * @return list of partition paths with files under them.
 * @throws IOException
 */
public static List<Pair<String, List<HoodieFileStatus>>> getAllLeafFoldersWithFiles(HoodieTableMetaClient metaClient, FileSystem fs, String basePathStr, HoodieEngineContext context) throws IOException {
    final Path basePath = new Path(basePathStr);
    final String baseFileExtension = metaClient.getTableConfig().getBaseFileFormat().getFileExtension();
    final Map<Integer, List<String>> levelToPartitions = new HashMap<>();
    final Map<String, List<HoodieFileStatus>> partitionToFiles = new HashMap<>();
    PathFilter filePathFilter = getFilePathFilter(baseFileExtension);
    PathFilter metaPathFilter = getExcludeMetaPathFilter();
    FileStatus[] topLevelStatuses = fs.listStatus(basePath);
    List<String> subDirectories = new ArrayList<>();
    List<Pair<HoodieFileStatus, Pair<Integer, String>>> result = new ArrayList<>();
    for (FileStatus topLevelStatus : topLevelStatuses) {
        if (topLevelStatus.isFile() && filePathFilter.accept(topLevelStatus.getPath())) {
            String relativePath = FSUtils.getRelativePartitionPath(basePath, topLevelStatus.getPath().getParent());
            Integer level = (int) relativePath.chars().filter(ch -> ch == '/').count();
            HoodieFileStatus hoodieFileStatus = FileStatusUtils.fromFileStatus(topLevelStatus);
            result.add(Pair.of(hoodieFileStatus, Pair.of(level, relativePath)));
        } else if (topLevelStatus.isDirectory() && metaPathFilter.accept(topLevelStatus.getPath())) {
            subDirectories.add(topLevelStatus.getPath().toString());
        }
    }
    if (subDirectories.size() > 0) {
        result.addAll(context.flatMap(subDirectories, directory -> {
            PathFilter pathFilter = getFilePathFilter(baseFileExtension);
            Path path = new Path(directory);
            FileSystem fileSystem = path.getFileSystem(new Configuration());
            RemoteIterator<LocatedFileStatus> itr = fileSystem.listFiles(path, true);
            List<Pair<HoodieFileStatus, Pair<Integer, String>>> res = new ArrayList<>();
            while (itr.hasNext()) {
                FileStatus status = itr.next();
                if (pathFilter.accept(status.getPath())) {
                    String relativePath = FSUtils.getRelativePartitionPath(new Path(basePathStr), status.getPath().getParent());
                    Integer level = (int) relativePath.chars().filter(ch -> ch == '/').count();
                    HoodieFileStatus hoodieFileStatus = FileStatusUtils.fromFileStatus(status);
                    res.add(Pair.of(hoodieFileStatus, Pair.of(level, relativePath)));
                }
            }
            return res.stream();
        }, subDirectories.size()));
    }
    result.forEach(val -> {
        String relativePath = val.getRight().getRight();
        List<HoodieFileStatus> statusList = partitionToFiles.get(relativePath);
        if (null == statusList) {
            Integer level = val.getRight().getLeft();
            List<String> dirs = levelToPartitions.get(level);
            if (null == dirs) {
                dirs = new ArrayList<>();
                levelToPartitions.put(level, dirs);
            }
            dirs.add(relativePath);
            statusList = new ArrayList<>();
            partitionToFiles.put(relativePath, statusList);
        }
        statusList.add(val.getLeft());
    });
    OptionalInt maxLevelOpt = levelToPartitions.keySet().stream().mapToInt(x -> x).max();
    int maxLevel = maxLevelOpt.orElse(-1);
    return maxLevel >= 0 ? levelToPartitions.get(maxLevel).stream().map(d -> Pair.of(d, partitionToFiles.get(d))).collect(Collectors.toList()) : new ArrayList<>();
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) PathFilter(org.apache.hadoop.fs.PathFilter) IOException(java.io.IOException) HashMap(java.util.HashMap) HoodieEngineContext(org.apache.hudi.common.engine.HoodieEngineContext) FileStatus(org.apache.hadoop.fs.FileStatus) OptionalInt(java.util.OptionalInt) Collectors(java.util.stream.Collectors) ArrayList(java.util.ArrayList) List(java.util.List) HoodieTableMetaClient(org.apache.hudi.common.table.HoodieTableMetaClient) Configuration(org.apache.hadoop.conf.Configuration) HoodieFileStatus(org.apache.hudi.avro.model.HoodieFileStatus) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) FileStatusUtils(org.apache.hudi.common.bootstrap.FileStatusUtils) RemoteIterator(org.apache.hadoop.fs.RemoteIterator) FSUtils(org.apache.hudi.common.fs.FSUtils) Pair(org.apache.hudi.common.util.collection.Pair) HoodieFileStatus(org.apache.hudi.avro.model.HoodieFileStatus) PathFilter(org.apache.hadoop.fs.PathFilter) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) FileStatus(org.apache.hadoop.fs.FileStatus) HoodieFileStatus(org.apache.hudi.avro.model.HoodieFileStatus) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) OptionalInt(java.util.OptionalInt) RemoteIterator(org.apache.hadoop.fs.RemoteIterator) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) List(java.util.List) Pair(org.apache.hudi.common.util.collection.Pair)

Aggregations

HoodieFileStatus (org.apache.hudi.avro.model.HoodieFileStatus)13 List (java.util.List)12 IOException (java.io.IOException)11 Pair (org.apache.hudi.common.util.collection.Pair)11 Collectors (java.util.stream.Collectors)10 Map (java.util.Map)9 Option (org.apache.hudi.common.util.Option)8 HoodieWriteConfig (org.apache.hudi.config.HoodieWriteConfig)8 HoodieIOException (org.apache.hudi.exception.HoodieIOException)8 ArrayList (java.util.ArrayList)7 FullRecordBootstrapDataProvider (org.apache.hudi.client.bootstrap.FullRecordBootstrapDataProvider)7 HoodieSparkEngineContext (org.apache.hudi.client.common.HoodieSparkEngineContext)7 TypedProperties (org.apache.hudi.common.config.TypedProperties)7 FSUtils (org.apache.hudi.common.fs.FSUtils)7 HoodieRecord (org.apache.hudi.common.model.HoodieRecord)7 JavaRDD (org.apache.spark.api.java.JavaRDD)7 Instant (java.time.Instant)6 Arrays (java.util.Arrays)6 Iterator (java.util.Iterator)6 Path (org.apache.hadoop.fs.Path)6