use of org.apache.hudi.common.util.HoodieTimer in project hudi by apache.
the class BaseRollbackActionExecutor method runRollback.
private HoodieRollbackMetadata runRollback(HoodieTable<T, I, K, O> table, HoodieInstant rollbackInstant, HoodieRollbackPlan rollbackPlan) {
ValidationUtils.checkArgument(rollbackInstant.getState().equals(HoodieInstant.State.REQUESTED) || rollbackInstant.getState().equals(HoodieInstant.State.INFLIGHT));
final HoodieTimer timer = new HoodieTimer();
timer.startTimer();
final HoodieInstant inflightInstant = rollbackInstant.isRequested() ? table.getActiveTimeline().transitionRollbackRequestedToInflight(rollbackInstant) : rollbackInstant;
HoodieTimer rollbackTimer = new HoodieTimer().startTimer();
List<HoodieRollbackStat> stats = doRollbackAndGetStats(rollbackPlan);
HoodieRollbackMetadata rollbackMetadata = TimelineMetadataUtils.convertRollbackMetadata(instantTime, Option.of(rollbackTimer.endTimer()), Collections.singletonList(instantToRollback), stats);
if (!skipTimelinePublish) {
finishRollback(inflightInstant, rollbackMetadata);
}
// Finally, remove the markers post rollback.
WriteMarkersFactory.get(config.getMarkersType(), table, instantToRollback.getTimestamp()).quietDeleteMarkerDir(context, config.getMarkersDeleteParallelism());
return rollbackMetadata;
}
use of org.apache.hudi.common.util.HoodieTimer in project hudi by apache.
the class CopyOnWriteRollbackActionExecutor method executeRollback.
@Override
protected List<HoodieRollbackStat> executeRollback(HoodieRollbackPlan hoodieRollbackPlan) {
HoodieTimer rollbackTimer = new HoodieTimer();
rollbackTimer.startTimer();
List<HoodieRollbackStat> stats = new ArrayList<>();
HoodieActiveTimeline activeTimeline = table.getActiveTimeline();
HoodieInstant resolvedInstant = instantToRollback;
if (instantToRollback.isCompleted()) {
LOG.info("Unpublishing instant " + instantToRollback);
resolvedInstant = activeTimeline.revertToInflight(instantToRollback);
// reload meta-client to reflect latest timeline status
table.getMetaClient().reloadActiveTimeline();
}
// deleting the timeline file
if (!resolvedInstant.isRequested()) {
// delete all the data files for this commit
LOG.info("Clean out all base files generated for commit: " + resolvedInstant);
stats = executeRollback(resolvedInstant, hoodieRollbackPlan);
}
dropBootstrapIndexIfNeeded(instantToRollback);
// Delete Inflight instant if enabled
deleteInflightAndRequestedInstant(deleteInstants, activeTimeline, resolvedInstant);
LOG.info("Time(in ms) taken to finish rollback " + rollbackTimer.endTimer());
return stats;
}
use of org.apache.hudi.common.util.HoodieTimer in project hudi by apache.
the class HoodieKeyLookupHandle method getBloomFilter.
private BloomFilter getBloomFilter() {
BloomFilter bloomFilter = null;
HoodieTimer timer = new HoodieTimer().startTimer();
try {
if (config.isMetadataBloomFilterIndexEnabled()) {
bloomFilter = hoodieTable.getMetadataTable().getBloomFilter(partitionPathFileIDPair.getLeft(), partitionPathFileIDPair.getRight()).orElseThrow(() -> new HoodieIndexException("BloomFilter missing for " + partitionPathFileIDPair.getRight()));
} else {
try (HoodieFileReader reader = createNewFileReader()) {
bloomFilter = reader.readBloomFilter();
}
}
} catch (IOException e) {
throw new HoodieIndexException(String.format("Error reading bloom filter from %s", getPartitionPathFileIDPair()), e);
}
LOG.info(String.format("Read bloom filter from %s in %d ms", partitionPathFileIDPair, timer.endTimer()));
return bloomFilter;
}
use of org.apache.hudi.common.util.HoodieTimer in project hudi by apache.
the class HoodieIndexUtils method filterKeysFromFile.
/**
* Given a list of row keys and one file, return only row keys existing in that file.
*
* @param filePath - File to filter keys from
* @param candidateRecordKeys - Candidate keys to filter
* @return List of candidate keys that are available in the file
*/
public static List<String> filterKeysFromFile(Path filePath, List<String> candidateRecordKeys, Configuration configuration) throws HoodieIndexException {
ValidationUtils.checkArgument(FSUtils.isBaseFile(filePath));
List<String> foundRecordKeys = new ArrayList<>();
try {
// Load all rowKeys from the file, to double-confirm
if (!candidateRecordKeys.isEmpty()) {
HoodieTimer timer = new HoodieTimer().startTimer();
HoodieFileReader fileReader = HoodieFileReaderFactory.getFileReader(configuration, filePath);
Set<String> fileRowKeys = fileReader.filterRowKeys(new TreeSet<>(candidateRecordKeys));
foundRecordKeys.addAll(fileRowKeys);
LOG.info(String.format("Checked keys against file %s, in %d ms. #candidates (%d) #found (%d)", filePath, timer.endTimer(), candidateRecordKeys.size(), foundRecordKeys.size()));
if (LOG.isDebugEnabled()) {
LOG.debug("Keys matching for file " + filePath + " => " + foundRecordKeys);
}
}
} catch (Exception e) {
throw new HoodieIndexException("Error checking candidate keys against file.", e);
}
return foundRecordKeys;
}
use of org.apache.hudi.common.util.HoodieTimer in project hudi by apache.
the class AbstractTableFileSystemView method resetFileGroupsReplaced.
/**
* Get replaced instant for each file group by looking at all commit instants.
*/
private void resetFileGroupsReplaced(HoodieTimeline timeline) {
HoodieTimer hoodieTimer = new HoodieTimer();
hoodieTimer.startTimer();
// for each REPLACE instant, get map of (partitionPath -> deleteFileGroup)
HoodieTimeline replacedTimeline = timeline.getCompletedReplaceTimeline();
Stream<Map.Entry<HoodieFileGroupId, HoodieInstant>> resultStream = replacedTimeline.getInstants().flatMap(instant -> {
try {
HoodieReplaceCommitMetadata replaceMetadata = HoodieReplaceCommitMetadata.fromBytes(metaClient.getActiveTimeline().getInstantDetails(instant).get(), HoodieReplaceCommitMetadata.class);
// get replace instant mapping for each partition, fileId
return replaceMetadata.getPartitionToReplaceFileIds().entrySet().stream().flatMap(entry -> entry.getValue().stream().map(e -> new AbstractMap.SimpleEntry<>(new HoodieFileGroupId(entry.getKey(), e), instant)));
} catch (HoodieIOException ex) {
if (ex.getIOException() instanceof FileNotFoundException) {
// Replace instant could be deleted by archive and FileNotFoundException could be threw during getInstantDetails function
// So that we need to catch the FileNotFoundException here and continue
LOG.warn(ex.getMessage());
return Stream.empty();
} else {
throw ex;
}
} catch (IOException e) {
throw new HoodieIOException("error reading commit metadata for " + instant);
}
});
Map<HoodieFileGroupId, HoodieInstant> replacedFileGroups = resultStream.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
resetReplacedFileGroups(replacedFileGroups);
LOG.info("Took " + hoodieTimer.endTimer() + " ms to read " + replacedTimeline.countInstants() + " instants, " + replacedFileGroups.size() + " replaced file groups");
}
Aggregations