use of org.apache.hudi.common.table.timeline.HoodieTimeline.COMPACTION_ACTION in project hudi by apache.
the class CompactionAdminClient method unscheduleCompactionFileId.
/**
* Remove a fileId from pending compaction. Removes the associated compaction operation and rename delta-files that
* were generated for that file-id after the compaction operation was scheduled.
*
* This operation MUST be executed with compactions and writer turned OFF.
*
* @param fgId FileGroupId to be unscheduled
* @param skipValidation Skip validation
* @param dryRun Dry Run Mode
*/
public List<RenameOpResult> unscheduleCompactionFileId(HoodieFileGroupId fgId, boolean skipValidation, boolean dryRun) throws Exception {
HoodieTableMetaClient metaClient = createMetaClient(false);
List<Pair<HoodieLogFile, HoodieLogFile>> renameActions = getRenamingActionsForUnschedulingCompactionForFileId(metaClient, fgId, Option.empty(), skipValidation);
List<RenameOpResult> res = runRenamingOps(metaClient, renameActions, 1, dryRun);
if (!dryRun && !res.isEmpty() && res.get(0).isExecuted() && res.get(0).isSuccess()) {
// Ready to remove this file-Id from compaction request
Pair<String, HoodieCompactionOperation> compactionOperationWithInstant = CompactionUtils.getAllPendingCompactionOperations(metaClient).get(fgId);
HoodieCompactionPlan plan = CompactionUtils.getCompactionPlan(metaClient, compactionOperationWithInstant.getKey());
List<HoodieCompactionOperation> newOps = plan.getOperations().stream().filter(op -> (!op.getFileId().equals(fgId.getFileId())) && (!op.getPartitionPath().equals(fgId.getPartitionPath()))).collect(Collectors.toList());
HoodieCompactionPlan newPlan = HoodieCompactionPlan.newBuilder().setOperations(newOps).setExtraMetadata(plan.getExtraMetadata()).build();
HoodieInstant inflight = new HoodieInstant(State.INFLIGHT, COMPACTION_ACTION, compactionOperationWithInstant.getLeft());
Path inflightPath = new Path(metaClient.getMetaPath(), inflight.getFileName());
if (metaClient.getFs().exists(inflightPath)) {
// revert if in inflight state
metaClient.getActiveTimeline().revertCompactionInflightToRequested(inflight);
}
// Overwrite compaction plan with updated info
metaClient.getActiveTimeline().saveToCompactionRequested(new HoodieInstant(State.REQUESTED, COMPACTION_ACTION, compactionOperationWithInstant.getLeft()), TimelineMetadataUtils.serializeCompactionPlan(newPlan), true);
}
return res;
}
use of org.apache.hudi.common.table.timeline.HoodieTimeline.COMPACTION_ACTION in project hudi by apache.
the class ConcurrentOperation method init.
private void init(HoodieInstant instant) {
if (this.metadataWrapper.isAvroMetadata()) {
switch(getInstantActionType()) {
case COMPACTION_ACTION:
this.operationType = WriteOperationType.COMPACT;
this.mutatedFileIds = this.metadataWrapper.getMetadataFromTimeline().getHoodieCompactionPlan().getOperations().stream().map(op -> op.getFileId()).collect(Collectors.toSet());
break;
case COMMIT_ACTION:
case DELTA_COMMIT_ACTION:
this.mutatedFileIds = CommitUtils.getFileIdWithoutSuffixAndRelativePathsFromSpecificRecord(this.metadataWrapper.getMetadataFromTimeline().getHoodieCommitMetadata().getPartitionToWriteStats()).keySet();
this.operationType = WriteOperationType.fromValue(this.metadataWrapper.getMetadataFromTimeline().getHoodieCommitMetadata().getOperationType());
break;
case REPLACE_COMMIT_ACTION:
if (instant.isCompleted()) {
this.mutatedFileIds = CommitUtils.getFileIdWithoutSuffixAndRelativePathsFromSpecificRecord(this.metadataWrapper.getMetadataFromTimeline().getHoodieReplaceCommitMetadata().getPartitionToWriteStats()).keySet();
this.operationType = WriteOperationType.fromValue(this.metadataWrapper.getMetadataFromTimeline().getHoodieReplaceCommitMetadata().getOperationType());
} else {
HoodieRequestedReplaceMetadata requestedReplaceMetadata = this.metadataWrapper.getMetadataFromTimeline().getHoodieRequestedReplaceMetadata();
this.mutatedFileIds = requestedReplaceMetadata.getClusteringPlan().getInputGroups().stream().flatMap(ig -> ig.getSlices().stream()).map(file -> file.getFileId()).collect(Collectors.toSet());
this.operationType = WriteOperationType.CLUSTER;
}
break;
default:
throw new IllegalArgumentException("Unsupported Action Type " + getInstantActionType());
}
} else {
switch(getInstantActionType()) {
case COMMIT_ACTION:
case DELTA_COMMIT_ACTION:
this.mutatedFileIds = CommitUtils.getFileIdWithoutSuffixAndRelativePaths(this.metadataWrapper.getCommitMetadata().getPartitionToWriteStats()).keySet();
this.operationType = this.metadataWrapper.getCommitMetadata().getOperationType();
break;
default:
throw new IllegalArgumentException("Unsupported Action Type " + getInstantActionType());
}
}
}
use of org.apache.hudi.common.table.timeline.HoodieTimeline.COMPACTION_ACTION in project hudi by apache.
the class TestIncrementalFSViewSync method unscheduleCompaction.
/**
* Unschedule a compaction instant and validate incremental fs view.
*
* @param view Hoodie View
* @param compactionInstantTime Compaction Instant to be removed
* @param newLastInstant New Last instant
* @param newBaseInstant New Base instant of last file-slice
*/
private void unscheduleCompaction(SyncableFileSystemView view, String compactionInstantTime, String newLastInstant, String newBaseInstant) throws IOException {
HoodieInstant instant = new HoodieInstant(State.REQUESTED, COMPACTION_ACTION, compactionInstantTime);
boolean deleted = metaClient.getFs().delete(new Path(metaClient.getMetaPath(), instant.getFileName()), false);
ValidationUtils.checkArgument(deleted, "Unable to delete compaction instant.");
view.sync();
assertEquals(newLastInstant, view.getLastInstant().get().getTimestamp());
partitions.forEach(p -> view.getLatestFileSlices(p).forEach(fs -> assertEquals(newBaseInstant, fs.getBaseInstantTime())));
}
Aggregations