Search in sources :

Example 6 with HoodieRollbackPlan

use of org.apache.hudi.avro.model.HoodieRollbackPlan in project hudi by apache.

the class BaseHoodieWriteClient method getPendingRollbackInfos.

/**
 * Fetch map of pending commits to be rolled-back to {@link HoodiePendingRollbackInfo}.
 * @param metaClient instance of {@link HoodieTableMetaClient} to use.
 * @return map of pending commits to be rolled-back instants to Rollback Instant and Rollback plan Pair.
 */
protected Map<String, Option<HoodiePendingRollbackInfo>> getPendingRollbackInfos(HoodieTableMetaClient metaClient, boolean ignoreCompactionAndClusteringInstants) {
    List<HoodieInstant> instants = metaClient.getActiveTimeline().filterPendingRollbackTimeline().getInstants().collect(Collectors.toList());
    Map<String, Option<HoodiePendingRollbackInfo>> infoMap = new HashMap<>();
    for (HoodieInstant instant : instants) {
        try {
            HoodieRollbackPlan rollbackPlan = RollbackUtils.getRollbackPlan(metaClient, instant);
            String action = rollbackPlan.getInstantToRollback().getAction();
            if (ignoreCompactionAndClusteringInstants) {
                if (!HoodieTimeline.COMPACTION_ACTION.equals(action)) {
                    boolean isClustering = HoodieTimeline.REPLACE_COMMIT_ACTION.equals(action) && ClusteringUtils.getClusteringPlan(metaClient, new HoodieInstant(true, rollbackPlan.getInstantToRollback().getAction(), rollbackPlan.getInstantToRollback().getCommitTime())).isPresent();
                    if (!isClustering) {
                        String instantToRollback = rollbackPlan.getInstantToRollback().getCommitTime();
                        infoMap.putIfAbsent(instantToRollback, Option.of(new HoodiePendingRollbackInfo(instant, rollbackPlan)));
                    }
                }
            } else {
                infoMap.putIfAbsent(rollbackPlan.getInstantToRollback().getCommitTime(), Option.of(new HoodiePendingRollbackInfo(instant, rollbackPlan)));
            }
        } catch (IOException e) {
            LOG.warn("Fetching rollback plan failed for " + infoMap + ", skip the plan", e);
        }
    }
    return infoMap;
}
Also used : HoodieInstant(org.apache.hudi.common.table.timeline.HoodieInstant) HoodieRollbackPlan(org.apache.hudi.avro.model.HoodieRollbackPlan) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Option(org.apache.hudi.common.util.Option) IOException(java.io.IOException) HoodieIOException(org.apache.hudi.exception.HoodieIOException) HoodiePendingRollbackInfo(org.apache.hudi.common.HoodiePendingRollbackInfo)

Example 7 with HoodieRollbackPlan

use of org.apache.hudi.avro.model.HoodieRollbackPlan in project hudi by apache.

the class TestCopyOnWriteRollbackActionExecutor method performRollbackAndValidate.

private void performRollbackAndValidate(boolean isUsingMarkers, HoodieWriteConfig cfg, HoodieTable table, List<FileSlice> firstPartitionCommit2FileSlices, List<FileSlice> secondPartitionCommit2FileSlices) throws IOException {
    // 2. rollback
    HoodieInstant commitInstant;
    if (isUsingMarkers) {
        commitInstant = table.getActiveTimeline().getCommitTimeline().filterInflights().lastInstant().get();
    } else {
        commitInstant = table.getCompletedCommitTimeline().lastInstant().get();
    }
    BaseRollbackPlanActionExecutor copyOnWriteRollbackPlanActionExecutor = new BaseRollbackPlanActionExecutor(context, table.getConfig(), table, "003", commitInstant, false, table.getConfig().shouldRollbackUsingMarkers());
    HoodieRollbackPlan hoodieRollbackPlan = (HoodieRollbackPlan) copyOnWriteRollbackPlanActionExecutor.execute().get();
    CopyOnWriteRollbackActionExecutor copyOnWriteRollbackActionExecutor = new CopyOnWriteRollbackActionExecutor(context, cfg, table, "003", commitInstant, false, false);
    Map<String, HoodieRollbackPartitionMetadata> rollbackMetadata = copyOnWriteRollbackActionExecutor.execute().getPartitionMetadata();
    // 3. assert the rollback stat
    assertEquals(2, rollbackMetadata.size());
    for (Map.Entry<String, HoodieRollbackPartitionMetadata> entry : rollbackMetadata.entrySet()) {
        HoodieRollbackPartitionMetadata meta = entry.getValue();
        assertTrue(meta.getFailedDeleteFiles() == null || meta.getFailedDeleteFiles().size() == 0);
        assertTrue(meta.getSuccessDeleteFiles() == null || meta.getSuccessDeleteFiles().size() == 1);
    }
    // 4. assert filegroup after rollback, and compare to the rollbackstat
    // assert the first partition file group and file slice
    List<HoodieFileGroup> firstPartitionRollBack1FileGroups = table.getFileSystemView().getAllFileGroups(DEFAULT_FIRST_PARTITION_PATH).collect(Collectors.toList());
    assertEquals(1, firstPartitionRollBack1FileGroups.size());
    List<FileSlice> firstPartitionRollBack1FileSlices = firstPartitionRollBack1FileGroups.get(0).getAllFileSlices().collect(Collectors.toList());
    assertEquals(1, firstPartitionRollBack1FileSlices.size());
    firstPartitionCommit2FileSlices.removeAll(firstPartitionRollBack1FileSlices);
    assertEquals(1, firstPartitionCommit2FileSlices.size());
    assertEquals(firstPartitionCommit2FileSlices.get(0).getBaseFile().get().getPath(), this.fs.getScheme() + ":" + rollbackMetadata.get(DEFAULT_FIRST_PARTITION_PATH).getSuccessDeleteFiles().get(0));
    // assert the second partition file group and file slice
    List<HoodieFileGroup> secondPartitionRollBack1FileGroups = table.getFileSystemView().getAllFileGroups(DEFAULT_SECOND_PARTITION_PATH).collect(Collectors.toList());
    assertEquals(1, secondPartitionRollBack1FileGroups.size());
    List<FileSlice> secondPartitionRollBack1FileSlices = secondPartitionRollBack1FileGroups.get(0).getAllFileSlices().collect(Collectors.toList());
    assertEquals(1, secondPartitionRollBack1FileSlices.size());
    // assert the second partition rollback file is equals rollBack1SecondPartitionStat
    secondPartitionCommit2FileSlices.removeAll(secondPartitionRollBack1FileSlices);
    assertEquals(1, secondPartitionCommit2FileSlices.size());
    assertEquals(secondPartitionCommit2FileSlices.get(0).getBaseFile().get().getPath(), this.fs.getScheme() + ":" + rollbackMetadata.get(DEFAULT_SECOND_PARTITION_PATH).getSuccessDeleteFiles().get(0));
    assertFalse(WriteMarkersFactory.get(cfg.getMarkersType(), table, commitInstant.getTimestamp()).doesMarkerDirExist());
}
Also used : HoodieInstant(org.apache.hudi.common.table.timeline.HoodieInstant) HoodieRollbackPlan(org.apache.hudi.avro.model.HoodieRollbackPlan) FileSlice(org.apache.hudi.common.model.FileSlice) HoodieRollbackPartitionMetadata(org.apache.hudi.avro.model.HoodieRollbackPartitionMetadata) HoodieFileGroup(org.apache.hudi.common.model.HoodieFileGroup) Map(java.util.Map)

Aggregations

HoodieRollbackPlan (org.apache.hudi.avro.model.HoodieRollbackPlan)7 HoodieInstant (org.apache.hudi.common.table.timeline.HoodieInstant)7 IOException (java.io.IOException)4 HoodieIOException (org.apache.hudi.exception.HoodieIOException)4 HashMap (java.util.HashMap)3 Option (org.apache.hudi.common.util.Option)3 HoodieWriteConfig (org.apache.hudi.config.HoodieWriteConfig)3 Collections (java.util.Collections)2 LinkedHashMap (java.util.LinkedHashMap)2 List (java.util.List)2 Map (java.util.Map)2 Collectors (java.util.stream.Collectors)2 HoodieInstantInfo (org.apache.hudi.avro.model.HoodieInstantInfo)2 HoodieRollbackMetadata (org.apache.hudi.avro.model.HoodieRollbackMetadata)2 TransactionManager (org.apache.hudi.client.transaction.TransactionManager)2 HoodiePendingRollbackInfo (org.apache.hudi.common.HoodiePendingRollbackInfo)2 HoodieRollbackStat (org.apache.hudi.common.HoodieRollbackStat)2 HoodieEngineContext (org.apache.hudi.common.engine.HoodieEngineContext)2 HoodieTestTable (org.apache.hudi.common.testutils.HoodieTestTable)2 HoodieTable (org.apache.hudi.table.HoodieTable)2