use of org.apache.hudi.avro.model.HoodieInstantInfo in project hudi by apache.
the class HoodieTestTable method getRollbackMetadata.
public HoodieRollbackMetadata getRollbackMetadata(String instantTimeToDelete, Map<String, List<String>> partitionToFilesMeta) throws Exception {
HoodieRollbackMetadata rollbackMetadata = new HoodieRollbackMetadata();
rollbackMetadata.setCommitsRollback(Collections.singletonList(instantTimeToDelete));
rollbackMetadata.setStartRollbackTime(instantTimeToDelete);
Map<String, HoodieRollbackPartitionMetadata> partitionMetadataMap = new HashMap<>();
for (Map.Entry<String, List<String>> entry : partitionToFilesMeta.entrySet()) {
HoodieRollbackPartitionMetadata rollbackPartitionMetadata = new HoodieRollbackPartitionMetadata();
rollbackPartitionMetadata.setPartitionPath(entry.getKey());
rollbackPartitionMetadata.setSuccessDeleteFiles(entry.getValue());
rollbackPartitionMetadata.setFailedDeleteFiles(new ArrayList<>());
long rollbackLogFileSize = 50 + RANDOM.nextInt(500);
String fileId = UUID.randomUUID().toString();
String logFileName = logFileName(instantTimeToDelete, fileId, 0);
FileCreateUtils.createLogFile(basePath, entry.getKey(), instantTimeToDelete, fileId, 0, (int) rollbackLogFileSize);
rollbackPartitionMetadata.setRollbackLogFiles(createImmutableMap(logFileName, rollbackLogFileSize));
partitionMetadataMap.put(entry.getKey(), rollbackPartitionMetadata);
}
rollbackMetadata.setPartitionMetadata(partitionMetadataMap);
rollbackMetadata.setInstantsRollback(Collections.singletonList(new HoodieInstantInfo(instantTimeToDelete, HoodieTimeline.ROLLBACK_ACTION)));
return rollbackMetadata;
}
use of org.apache.hudi.avro.model.HoodieInstantInfo in project hudi by apache.
the class BaseRestoreActionExecutor method getInstantsToRollback.
private List<HoodieInstant> getInstantsToRollback(HoodieInstant restoreInstant) throws IOException {
List<HoodieInstant> instantsToRollback = new ArrayList<>();
HoodieRestorePlan restorePlan = RestoreUtils.getRestorePlan(table.getMetaClient(), restoreInstant);
for (HoodieInstantInfo instantInfo : restorePlan.getInstantsToRollback()) {
// If restore crashed mid-way, there are chances that some commits are already rolled back,
// but some are not. so, we can ignore those commits which are fully rolledback in previous attempt if any.
Option<HoodieInstant> rollbackInstantOpt = table.getActiveTimeline().getWriteTimeline().filter(instant -> instant.getTimestamp().equals(instantInfo.getCommitTime()) && instant.getAction().equals(instantInfo.getAction())).firstInstant();
if (rollbackInstantOpt.isPresent()) {
instantsToRollback.add(rollbackInstantOpt.get());
} else {
LOG.warn("Ignoring already rolledback instant " + instantInfo.toString());
}
}
return instantsToRollback;
}
use of org.apache.hudi.avro.model.HoodieInstantInfo in project hudi by apache.
the class TestClientRollback method testFailedRollbackCommit.
/**
* Test Cases for effects of rollbacking completed/inflight commits.
*/
@Test
public void testFailedRollbackCommit() throws Exception {
// Let's create some commit files and base files
final String p1 = "2016/05/01";
final String p2 = "2016/05/02";
final String p3 = "2016/05/06";
final String commitTime1 = "20160501010101";
final String commitTime2 = "20160502020601";
final String commitTime3 = "20160506030611";
Map<String, String> partitionAndFileId1 = new HashMap<String, String>() {
{
put(p1, "id11");
put(p2, "id12");
put(p3, "id13");
}
};
Map<String, String> partitionAndFileId2 = new HashMap<String, String>() {
{
put(p1, "id21");
put(p2, "id22");
put(p3, "id23");
}
};
Map<String, String> partitionAndFileId3 = new HashMap<String, String>() {
{
put(p1, "id31");
put(p2, "id32");
put(p3, "id33");
}
};
HoodieTestTable testTable = HoodieTestTable.of(metaClient).withPartitionMetaFiles(p1, p2, p3).addCommit(commitTime1).withBaseFilesInPartitions(partitionAndFileId1).addCommit(commitTime2).withBaseFilesInPartitions(partitionAndFileId2).addInflightCommit(commitTime3).withBaseFilesInPartitions(partitionAndFileId3);
HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath).withRollbackUsingMarkers(false).withCompactionConfig(HoodieCompactionConfig.newBuilder().withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).build()).withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(HoodieIndex.IndexType.INMEMORY).build()).build();
try (SparkRDDWriteClient client = getHoodieWriteClient(config)) {
// Rollback commit3
client.rollback(commitTime3);
assertFalse(testTable.inflightCommitExists(commitTime3));
assertFalse(testTable.baseFilesExist(partitionAndFileId3, commitTime3));
assertTrue(testTable.baseFilesExist(partitionAndFileId2, commitTime2));
metaClient.reloadActiveTimeline();
List<HoodieInstant> rollbackInstants = metaClient.getActiveTimeline().getRollbackTimeline().getInstants().collect(Collectors.toList());
assertEquals(rollbackInstants.size(), 1);
HoodieInstant rollbackInstant = rollbackInstants.get(0);
// delete rollback completed meta file and retry rollback.
FileCreateUtils.deleteRollbackCommit(basePath, rollbackInstant.getTimestamp());
// recreate actual commit files so that we can retry the rollback
testTable.addInflightCommit(commitTime3).withBaseFilesInPartitions(partitionAndFileId3);
// retry rolling back the commit again.
client.rollback(commitTime3);
// verify there are no extra rollback instants
metaClient.reloadActiveTimeline();
rollbackInstants = metaClient.getActiveTimeline().getRollbackTimeline().getInstants().collect(Collectors.toList());
assertEquals(rollbackInstants.size(), 1);
assertEquals(rollbackInstants.get(0), rollbackInstant);
final String commitTime4 = "20160507040601";
final String commitTime5 = "20160507050611";
// add inflight compaction then rolls it back
testTable.addInflightCompaction(commitTime4, new HoodieCommitMetadata());
HoodieRollbackPlan rollbackPlan = new HoodieRollbackPlan();
rollbackPlan.setRollbackRequests(Collections.emptyList());
rollbackPlan.setInstantToRollback(new HoodieInstantInfo(commitTime4, HoodieTimeline.COMPACTION_ACTION));
testTable.addRequestedRollback(commitTime5, rollbackPlan);
// the compaction instants should be excluded
metaClient.reloadActiveTimeline();
assertEquals(0, client.getPendingRollbackInfos(metaClient).size());
// verify there is no extra rollback instants
client.rollback(commitTime4);
metaClient.reloadActiveTimeline();
rollbackInstants = metaClient.reloadActiveTimeline().getRollbackTimeline().getInstants().collect(Collectors.toList());
assertEquals(2, rollbackInstants.size());
}
}
use of org.apache.hudi.avro.model.HoodieInstantInfo in project hudi by apache.
the class RestorePlanActionExecutor method execute.
@Override
public Option<HoodieRestorePlan> execute() {
final HoodieInstant restoreInstant = new HoodieInstant(HoodieInstant.State.REQUESTED, HoodieTimeline.RESTORE_ACTION, instantTime);
try {
// Get all the commits on the timeline after the provided commit time
// rollback pending clustering instants first before other instants (See HUDI-3362)
List<HoodieInstant> pendingClusteringInstantsToRollback = table.getActiveTimeline().filterPendingReplaceTimeline().filter(instant -> ClusteringUtils.isPendingClusteringInstant(table.getMetaClient(), instant)).getReverseOrderedInstants().filter(instant -> HoodieActiveTimeline.GREATER_THAN.test(instant.getTimestamp(), restoreInstantTime)).collect(Collectors.toList());
// Get all the commits on the timeline after the provided commit time
List<HoodieInstant> commitInstantsToRollback = table.getActiveTimeline().getWriteTimeline().getReverseOrderedInstants().filter(instant -> HoodieActiveTimeline.GREATER_THAN.test(instant.getTimestamp(), restoreInstantTime)).filter(instant -> !pendingClusteringInstantsToRollback.contains(instant)).collect(Collectors.toList());
// Combine both lists - first rollback pending clustering and then rollback all other commits
List<HoodieInstantInfo> instantsToRollback = Stream.concat(pendingClusteringInstantsToRollback.stream(), commitInstantsToRollback.stream()).map(entry -> new HoodieInstantInfo(entry.getTimestamp(), entry.getAction())).collect(Collectors.toList());
HoodieRestorePlan restorePlan = new HoodieRestorePlan(instantsToRollback, LATEST_RESTORE_PLAN_VERSION);
table.getActiveTimeline().saveToRestoreRequested(restoreInstant, TimelineMetadataUtils.serializeRestorePlan(restorePlan));
table.getMetaClient().reloadActiveTimeline();
LOG.info("Requesting Restore with instant time " + restoreInstant);
return Option.of(restorePlan);
} catch (IOException e) {
LOG.error("Got exception when saving restore requested file", e);
throw new HoodieIOException(e.getMessage(), e);
}
}
use of org.apache.hudi.avro.model.HoodieInstantInfo in project hudi by apache.
the class BaseRollbackPlanActionExecutor method requestRollback.
/**
* Creates a Rollback plan if there are files to be rolledback and stores them in instant file.
* Rollback Plan contains absolute file paths.
*
* @param startRollbackTime Rollback Instant Time
* @return Rollback Plan if generated
*/
protected Option<HoodieRollbackPlan> requestRollback(String startRollbackTime) {
final HoodieInstant rollbackInstant = new HoodieInstant(HoodieInstant.State.REQUESTED, HoodieTimeline.ROLLBACK_ACTION, startRollbackTime);
try {
List<HoodieRollbackRequest> rollbackRequests = new ArrayList<>();
if (!instantToRollback.isRequested()) {
rollbackRequests.addAll(getRollbackStrategy().getRollbackRequests(instantToRollback));
}
HoodieRollbackPlan rollbackPlan = new HoodieRollbackPlan(new HoodieInstantInfo(instantToRollback.getTimestamp(), instantToRollback.getAction()), rollbackRequests, LATEST_ROLLBACK_PLAN_VERSION);
if (!skipTimelinePublish) {
if (table.getRollbackTimeline().filterInflightsAndRequested().containsInstant(rollbackInstant.getTimestamp())) {
LOG.warn("Request Rollback found with instant time " + rollbackInstant + ", hence skipping scheduling rollback");
} else {
table.getActiveTimeline().saveToRollbackRequested(rollbackInstant, TimelineMetadataUtils.serializeRollbackPlan(rollbackPlan));
table.getMetaClient().reloadActiveTimeline();
LOG.info("Requesting Rollback with instant time " + rollbackInstant);
}
}
return Option.of(rollbackPlan);
} catch (IOException e) {
LOG.error("Got exception when saving rollback requested file", e);
throw new HoodieIOException(e.getMessage(), e);
}
}
Aggregations