Search in sources :

Example 71 with HoodieIOException

use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.

the class BaseRollbackActionExecutor method finishRollback.

protected void finishRollback(HoodieInstant inflightInstant, HoodieRollbackMetadata rollbackMetadata) throws HoodieIOException {
    try {
        if (!skipLocking) {
            this.txnManager.beginTransaction(Option.empty(), Option.empty());
        }
        writeTableMetadata(rollbackMetadata);
        table.getActiveTimeline().transitionRollbackInflightToComplete(inflightInstant, TimelineMetadataUtils.serializeRollbackMetadata(rollbackMetadata));
        LOG.info("Rollback of Commits " + rollbackMetadata.getCommitsRollback() + " is complete");
    } catch (IOException e) {
        throw new HoodieIOException("Error executing rollback at instant " + instantTime, e);
    } finally {
        if (!skipLocking) {
            this.txnManager.endTransaction(Option.empty());
        }
    }
}
Also used : HoodieIOException(org.apache.hudi.exception.HoodieIOException) IOException(java.io.IOException) HoodieIOException(org.apache.hudi.exception.HoodieIOException)

Example 72 with HoodieIOException

use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.

the class BaseRollbackPlanActionExecutor method requestRollback.

/**
 * Creates a Rollback plan if there are files to be rolledback and stores them in instant file.
 * Rollback Plan contains absolute file paths.
 *
 * @param startRollbackTime Rollback Instant Time
 * @return Rollback Plan if generated
 */
protected Option<HoodieRollbackPlan> requestRollback(String startRollbackTime) {
    final HoodieInstant rollbackInstant = new HoodieInstant(HoodieInstant.State.REQUESTED, HoodieTimeline.ROLLBACK_ACTION, startRollbackTime);
    try {
        List<HoodieRollbackRequest> rollbackRequests = new ArrayList<>();
        if (!instantToRollback.isRequested()) {
            rollbackRequests.addAll(getRollbackStrategy().getRollbackRequests(instantToRollback));
        }
        HoodieRollbackPlan rollbackPlan = new HoodieRollbackPlan(new HoodieInstantInfo(instantToRollback.getTimestamp(), instantToRollback.getAction()), rollbackRequests, LATEST_ROLLBACK_PLAN_VERSION);
        if (!skipTimelinePublish) {
            if (table.getRollbackTimeline().filterInflightsAndRequested().containsInstant(rollbackInstant.getTimestamp())) {
                LOG.warn("Request Rollback found with instant time " + rollbackInstant + ", hence skipping scheduling rollback");
            } else {
                table.getActiveTimeline().saveToRollbackRequested(rollbackInstant, TimelineMetadataUtils.serializeRollbackPlan(rollbackPlan));
                table.getMetaClient().reloadActiveTimeline();
                LOG.info("Requesting Rollback with instant time " + rollbackInstant);
            }
        }
        return Option.of(rollbackPlan);
    } catch (IOException e) {
        LOG.error("Got exception when saving rollback requested file", e);
        throw new HoodieIOException(e.getMessage(), e);
    }
}
Also used : HoodieInstant(org.apache.hudi.common.table.timeline.HoodieInstant) HoodieIOException(org.apache.hudi.exception.HoodieIOException) HoodieRollbackPlan(org.apache.hudi.avro.model.HoodieRollbackPlan) HoodieInstantInfo(org.apache.hudi.avro.model.HoodieInstantInfo) ArrayList(java.util.ArrayList) IOException(java.io.IOException) HoodieIOException(org.apache.hudi.exception.HoodieIOException) HoodieRollbackRequest(org.apache.hudi.avro.model.HoodieRollbackRequest)

Example 73 with HoodieIOException

use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.

the class HoodieTestDataGenerator method generateUniqueUpdatesStream.

/**
 * Generates deduped updates of keys previously inserted, randomly distributed across the keys above.
 *
 * @param instantTime Commit Timestamp
 * @param n          Number of unique records
 * @return stream of hoodie record updates
 */
public Stream<HoodieRecord> generateUniqueUpdatesStream(String instantTime, Integer n, String schemaStr) {
    final Set<KeyPartition> used = new HashSet<>();
    int numExistingKeys = numKeysBySchema.getOrDefault(schemaStr, 0);
    Map<Integer, KeyPartition> existingKeys = existingKeysBySchema.get(schemaStr);
    if (n > numExistingKeys) {
        throw new IllegalArgumentException("Requested unique updates is greater than number of available keys");
    }
    return IntStream.range(0, n).boxed().map(i -> {
        int index = numExistingKeys == 1 ? 0 : rand.nextInt(numExistingKeys - 1);
        KeyPartition kp = existingKeys.get(index);
        // Find the available keyPartition starting from randomly chosen one.
        while (used.contains(kp)) {
            index = (index + 1) % numExistingKeys;
            kp = existingKeys.get(index);
        }
        logger.debug("key getting updated: " + kp.key.getRecordKey());
        used.add(kp);
        try {
            return new HoodieAvroRecord(kp.key, generateRandomValueAsPerSchema(schemaStr, kp.key, instantTime, false));
        } catch (IOException e) {
            throw new HoodieIOException(e.getMessage(), e);
        }
    });
}
Also used : HoodieIOException(org.apache.hudi.exception.HoodieIOException) HoodieAvroRecord(org.apache.hudi.common.model.HoodieAvroRecord) IOException(java.io.IOException) HoodieIOException(org.apache.hudi.exception.HoodieIOException) HashSet(java.util.HashSet)

Example 74 with HoodieIOException

use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.

the class HoodieTestDataGenerator method generateUniqueDeleteRecordStream.

/**
 * Generates deduped delete records previously inserted, randomly distributed across the keys above.
 *
 * @param instantTime Commit Timestamp
 * @param n          Number of unique records
 * @return stream of hoodie records for delete
 */
public Stream<HoodieRecord> generateUniqueDeleteRecordStream(String instantTime, Integer n) {
    final Set<KeyPartition> used = new HashSet<>();
    Map<Integer, KeyPartition> existingKeys = existingKeysBySchema.get(TRIP_EXAMPLE_SCHEMA);
    Integer numExistingKeys = numKeysBySchema.get(TRIP_EXAMPLE_SCHEMA);
    if (n > numExistingKeys) {
        throw new IllegalArgumentException("Requested unique deletes is greater than number of available keys");
    }
    List<HoodieRecord> result = new ArrayList<>();
    for (int i = 0; i < n; i++) {
        int index = rand.nextInt(numExistingKeys);
        while (!existingKeys.containsKey(index)) {
            index = (index + 1) % numExistingKeys;
        }
        // swap chosen index with last index and remove last entry.
        KeyPartition kp = existingKeys.remove(index);
        existingKeys.put(index, existingKeys.get(numExistingKeys - 1));
        existingKeys.remove(numExistingKeys - 1);
        numExistingKeys--;
        used.add(kp);
        try {
            result.add(new HoodieAvroRecord(kp.key, generateRandomDeleteValue(kp.key, instantTime)));
        } catch (IOException e) {
            throw new HoodieIOException(e.getMessage(), e);
        }
    }
    numKeysBySchema.put(TRIP_EXAMPLE_SCHEMA, numExistingKeys);
    return result.stream();
}
Also used : HoodieRecord(org.apache.hudi.common.model.HoodieRecord) ArrayList(java.util.ArrayList) IOException(java.io.IOException) HoodieIOException(org.apache.hudi.exception.HoodieIOException) HoodieIOException(org.apache.hudi.exception.HoodieIOException) HoodieAvroRecord(org.apache.hudi.common.model.HoodieAvroRecord) HashSet(java.util.HashSet)

Example 75 with HoodieIOException

use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.

the class HoodieTestDataGenerator method createMetadataFile.

private static void createMetadataFile(String f, String basePath, Configuration configuration, byte[] content) {
    Path commitFile = new Path(basePath + "/" + HoodieTableMetaClient.METAFOLDER_NAME + "/" + f);
    FSDataOutputStream os = null;
    try {
        FileSystem fs = FSUtils.getFs(basePath, configuration);
        os = fs.create(commitFile, true);
        // Write empty commit metadata
        os.write(content);
    } catch (IOException ioe) {
        throw new HoodieIOException(ioe.getMessage(), ioe);
    } finally {
        if (null != os) {
            try {
                os.close();
            } catch (IOException e) {
                throw new HoodieIOException(e.getMessage(), e);
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HoodieIOException(org.apache.hudi.exception.HoodieIOException) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) HoodieIOException(org.apache.hudi.exception.HoodieIOException)

Aggregations

HoodieIOException (org.apache.hudi.exception.HoodieIOException)139 IOException (java.io.IOException)127 Path (org.apache.hadoop.fs.Path)45 List (java.util.List)31 ArrayList (java.util.ArrayList)30 Option (org.apache.hudi.common.util.Option)27 Collectors (java.util.stream.Collectors)26 HoodieInstant (org.apache.hudi.common.table.timeline.HoodieInstant)26 Pair (org.apache.hudi.common.util.collection.Pair)25 LogManager (org.apache.log4j.LogManager)25 Logger (org.apache.log4j.Logger)25 Map (java.util.Map)21 FileSystem (org.apache.hadoop.fs.FileSystem)20 GenericRecord (org.apache.avro.generic.GenericRecord)19 HashSet (java.util.HashSet)18 HoodieRecord (org.apache.hudi.common.model.HoodieRecord)18 HoodieTableMetaClient (org.apache.hudi.common.table.HoodieTableMetaClient)18 Set (java.util.Set)17 HoodieTimeline (org.apache.hudi.common.table.timeline.HoodieTimeline)17 HoodieException (org.apache.hudi.exception.HoodieException)17