use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class BaseRollbackActionExecutor method finishRollback.
protected void finishRollback(HoodieInstant inflightInstant, HoodieRollbackMetadata rollbackMetadata) throws HoodieIOException {
try {
if (!skipLocking) {
this.txnManager.beginTransaction(Option.empty(), Option.empty());
}
writeTableMetadata(rollbackMetadata);
table.getActiveTimeline().transitionRollbackInflightToComplete(inflightInstant, TimelineMetadataUtils.serializeRollbackMetadata(rollbackMetadata));
LOG.info("Rollback of Commits " + rollbackMetadata.getCommitsRollback() + " is complete");
} catch (IOException e) {
throw new HoodieIOException("Error executing rollback at instant " + instantTime, e);
} finally {
if (!skipLocking) {
this.txnManager.endTransaction(Option.empty());
}
}
}
use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class BaseRollbackPlanActionExecutor method requestRollback.
/**
* Creates a Rollback plan if there are files to be rolledback and stores them in instant file.
* Rollback Plan contains absolute file paths.
*
* @param startRollbackTime Rollback Instant Time
* @return Rollback Plan if generated
*/
protected Option<HoodieRollbackPlan> requestRollback(String startRollbackTime) {
final HoodieInstant rollbackInstant = new HoodieInstant(HoodieInstant.State.REQUESTED, HoodieTimeline.ROLLBACK_ACTION, startRollbackTime);
try {
List<HoodieRollbackRequest> rollbackRequests = new ArrayList<>();
if (!instantToRollback.isRequested()) {
rollbackRequests.addAll(getRollbackStrategy().getRollbackRequests(instantToRollback));
}
HoodieRollbackPlan rollbackPlan = new HoodieRollbackPlan(new HoodieInstantInfo(instantToRollback.getTimestamp(), instantToRollback.getAction()), rollbackRequests, LATEST_ROLLBACK_PLAN_VERSION);
if (!skipTimelinePublish) {
if (table.getRollbackTimeline().filterInflightsAndRequested().containsInstant(rollbackInstant.getTimestamp())) {
LOG.warn("Request Rollback found with instant time " + rollbackInstant + ", hence skipping scheduling rollback");
} else {
table.getActiveTimeline().saveToRollbackRequested(rollbackInstant, TimelineMetadataUtils.serializeRollbackPlan(rollbackPlan));
table.getMetaClient().reloadActiveTimeline();
LOG.info("Requesting Rollback with instant time " + rollbackInstant);
}
}
return Option.of(rollbackPlan);
} catch (IOException e) {
LOG.error("Got exception when saving rollback requested file", e);
throw new HoodieIOException(e.getMessage(), e);
}
}
use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class HoodieTestDataGenerator method generateUniqueUpdatesStream.
/**
* Generates deduped updates of keys previously inserted, randomly distributed across the keys above.
*
* @param instantTime Commit Timestamp
* @param n Number of unique records
* @return stream of hoodie record updates
*/
public Stream<HoodieRecord> generateUniqueUpdatesStream(String instantTime, Integer n, String schemaStr) {
final Set<KeyPartition> used = new HashSet<>();
int numExistingKeys = numKeysBySchema.getOrDefault(schemaStr, 0);
Map<Integer, KeyPartition> existingKeys = existingKeysBySchema.get(schemaStr);
if (n > numExistingKeys) {
throw new IllegalArgumentException("Requested unique updates is greater than number of available keys");
}
return IntStream.range(0, n).boxed().map(i -> {
int index = numExistingKeys == 1 ? 0 : rand.nextInt(numExistingKeys - 1);
KeyPartition kp = existingKeys.get(index);
// Find the available keyPartition starting from randomly chosen one.
while (used.contains(kp)) {
index = (index + 1) % numExistingKeys;
kp = existingKeys.get(index);
}
logger.debug("key getting updated: " + kp.key.getRecordKey());
used.add(kp);
try {
return new HoodieAvroRecord(kp.key, generateRandomValueAsPerSchema(schemaStr, kp.key, instantTime, false));
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
});
}
use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class HoodieTestDataGenerator method generateUniqueDeleteRecordStream.
/**
* Generates deduped delete records previously inserted, randomly distributed across the keys above.
*
* @param instantTime Commit Timestamp
* @param n Number of unique records
* @return stream of hoodie records for delete
*/
public Stream<HoodieRecord> generateUniqueDeleteRecordStream(String instantTime, Integer n) {
final Set<KeyPartition> used = new HashSet<>();
Map<Integer, KeyPartition> existingKeys = existingKeysBySchema.get(TRIP_EXAMPLE_SCHEMA);
Integer numExistingKeys = numKeysBySchema.get(TRIP_EXAMPLE_SCHEMA);
if (n > numExistingKeys) {
throw new IllegalArgumentException("Requested unique deletes is greater than number of available keys");
}
List<HoodieRecord> result = new ArrayList<>();
for (int i = 0; i < n; i++) {
int index = rand.nextInt(numExistingKeys);
while (!existingKeys.containsKey(index)) {
index = (index + 1) % numExistingKeys;
}
// swap chosen index with last index and remove last entry.
KeyPartition kp = existingKeys.remove(index);
existingKeys.put(index, existingKeys.get(numExistingKeys - 1));
existingKeys.remove(numExistingKeys - 1);
numExistingKeys--;
used.add(kp);
try {
result.add(new HoodieAvroRecord(kp.key, generateRandomDeleteValue(kp.key, instantTime)));
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
}
numKeysBySchema.put(TRIP_EXAMPLE_SCHEMA, numExistingKeys);
return result.stream();
}
use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class HoodieTestDataGenerator method createMetadataFile.
private static void createMetadataFile(String f, String basePath, Configuration configuration, byte[] content) {
Path commitFile = new Path(basePath + "/" + HoodieTableMetaClient.METAFOLDER_NAME + "/" + f);
FSDataOutputStream os = null;
try {
FileSystem fs = FSUtils.getFs(basePath, configuration);
os = fs.create(commitFile, true);
// Write empty commit metadata
os.write(content);
} catch (IOException ioe) {
throw new HoodieIOException(ioe.getMessage(), ioe);
} finally {
if (null != os) {
try {
os.close();
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
}
}
}
Aggregations