use of org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion in project hudi by apache.
the class TestHoodieActiveTimeline method testLoadingInstantsFromFiles.
@Test
public void testLoadingInstantsFromFiles() throws IOException {
HoodieInstant instant1 = new HoodieInstant(State.REQUESTED, HoodieTimeline.COMMIT_ACTION, "1");
HoodieInstant instant2 = new HoodieInstant(State.REQUESTED, HoodieTimeline.COMMIT_ACTION, "3");
HoodieInstant instant3 = new HoodieInstant(State.REQUESTED, HoodieTimeline.COMMIT_ACTION, "5");
HoodieInstant instant4 = new HoodieInstant(State.REQUESTED, HoodieTimeline.COMMIT_ACTION, "8");
HoodieInstant instant1Complete = new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "1");
HoodieInstant instant2Complete = new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "3");
HoodieInstant instant3Complete = new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "5");
HoodieInstant instant4Complete = new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "8");
HoodieInstant instant5 = new HoodieInstant(true, HoodieTimeline.COMMIT_ACTION, "9");
timeline = new HoodieActiveTimeline(metaClient);
timeline.createNewInstant(instant1);
timeline.transitionRequestedToInflight(instant1, Option.empty());
timeline.saveAsComplete(new HoodieInstant(true, instant1.getAction(), instant1.getTimestamp()), Option.empty());
timeline.createNewInstant(instant2);
timeline.transitionRequestedToInflight(instant2, Option.empty());
timeline.saveAsComplete(new HoodieInstant(true, instant2.getAction(), instant2.getTimestamp()), Option.empty());
timeline.createNewInstant(instant3);
timeline.transitionRequestedToInflight(instant3, Option.empty());
timeline.saveAsComplete(new HoodieInstant(true, instant3.getAction(), instant3.getTimestamp()), Option.empty());
timeline.createNewInstant(instant4);
timeline.transitionRequestedToInflight(instant4, Option.empty());
timeline.saveAsComplete(new HoodieInstant(true, instant4.getAction(), instant4.getTimestamp()), Option.empty());
timeline.createNewInstant(instant5);
timeline = timeline.reload();
assertEquals(5, timeline.countInstants(), "Total instants should be 5");
assertStreamEquals(Stream.of(instant1Complete, instant2Complete, instant3Complete, instant4Complete, instant5), timeline.getInstants(), "Check the instants stream");
assertStreamEquals(Stream.of(instant1Complete, instant2Complete, instant3Complete, instant4Complete, instant5), timeline.getCommitTimeline().getInstants(), "Check the instants stream");
assertStreamEquals(Stream.of(instant1Complete, instant2Complete, instant3Complete, instant4Complete), timeline.getCommitTimeline().filterCompletedInstants().getInstants(), "Check the instants stream");
assertStreamEquals(Stream.of(instant5), timeline.getCommitTimeline().filterPendingExcludingCompaction().getInstants(), "Check the instants stream");
// Backwards compatibility testing for reading compaction plans
metaClient = HoodieTableMetaClient.withPropertyBuilder().fromMetaClient(metaClient).setTimelineLayoutVersion(VERSION_0).initTable(metaClient.getHadoopConf(), metaClient.getBasePath());
HoodieInstant instant6 = new HoodieInstant(State.REQUESTED, HoodieTimeline.COMPACTION_ACTION, "9");
byte[] dummy = new byte[5];
HoodieActiveTimeline oldTimeline = new HoodieActiveTimeline(HoodieTableMetaClient.builder().setConf(metaClient.getHadoopConf()).setBasePath(metaClient.getBasePath()).setLoadActiveTimelineOnLoad(true).setConsistencyGuardConfig(metaClient.getConsistencyGuardConfig()).setFileSystemRetryConfig(metaClient.getFileSystemRetryConfig()).setLayoutVersion(Option.of(new TimelineLayoutVersion(VERSION_0))).build());
// Old Timeline writes both to aux and timeline folder
oldTimeline.saveToCompactionRequested(instant6, Option.of(dummy));
// Now use latest timeline version
timeline = timeline.reload();
// Ensure aux file is present
assertTrue(metaClient.getFs().exists(new Path(metaClient.getMetaAuxiliaryPath(), instant6.getFileName())));
// Read 5 bytes
assertEquals(5, timeline.readCompactionPlanAsBytes(instant6).get().length);
// Delete auxiliary file to mimic future release where we stop writing to aux
metaClient.getFs().delete(new Path(metaClient.getMetaAuxiliaryPath(), instant6.getFileName()));
// Ensure requested instant is not present in aux
assertFalse(metaClient.getFs().exists(new Path(metaClient.getMetaAuxiliaryPath(), instant6.getFileName())));
// Now read compaction plan again which should not throw exception
assertEquals(5, timeline.readCompactionPlanAsBytes(instant6).get().length);
}
use of org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion in project hudi by apache.
the class SparkMain method upgradeOrDowngradeTable.
/**
* Upgrade or downgrade table.
*
* @param jsc instance of {@link JavaSparkContext} to use.
* @param basePath base path of the dataset.
* @param toVersion version to which upgrade/downgrade to be done.
* @return 0 if success, else -1.
* @throws Exception
*/
protected static int upgradeOrDowngradeTable(JavaSparkContext jsc, String basePath, String toVersion) {
HoodieWriteConfig config = getWriteConfig(basePath, Boolean.parseBoolean(HoodieWriteConfig.ROLLBACK_USING_MARKERS_ENABLE.defaultValue()));
HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(jsc.hadoopConfiguration()).setBasePath(config.getBasePath()).setLoadActiveTimelineOnLoad(false).setConsistencyGuardConfig(config.getConsistencyGuardConfig()).setLayoutVersion(Option.of(new TimelineLayoutVersion(config.getTimelineLayoutVersion()))).setFileSystemRetryConfig(config.getFileSystemRetryConfig()).build();
try {
new UpgradeDowngrade(metaClient, config, new HoodieSparkEngineContext(jsc), SparkUpgradeDowngradeHelper.getInstance()).run(HoodieTableVersion.valueOf(toVersion), null);
LOG.info(String.format("Table at \"%s\" upgraded / downgraded to version \"%s\".", basePath, toVersion));
return 0;
} catch (Exception e) {
LOG.warn(String.format("Failed: Could not upgrade/downgrade table at \"%s\" to version \"%s\".", basePath, toVersion), e);
return -1;
}
}
Aggregations