use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class ClusteringPlanActionExecutor method execute.
@Override
public Option<HoodieClusteringPlan> execute() {
Option<HoodieClusteringPlan> planOption = createClusteringPlan();
if (planOption.isPresent()) {
HoodieInstant clusteringInstant = new HoodieInstant(HoodieInstant.State.REQUESTED, HoodieTimeline.REPLACE_COMMIT_ACTION, instantTime);
try {
HoodieRequestedReplaceMetadata requestedReplaceMetadata = HoodieRequestedReplaceMetadata.newBuilder().setOperationType(WriteOperationType.CLUSTER.name()).setExtraMetadata(extraMetadata.orElse(Collections.emptyMap())).setClusteringPlan(planOption.get()).build();
table.getActiveTimeline().saveToPendingReplaceCommit(clusteringInstant, TimelineMetadataUtils.serializeRequestedReplaceMetadata(requestedReplaceMetadata));
} catch (IOException ioe) {
throw new HoodieIOException("Exception scheduling clustering", ioe);
}
}
return planOption;
}
use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class BaseCommitActionExecutor method finalizeWrite.
/**
* Finalize Write operation.
* @param instantTime Instant Time
* @param stats Hoodie Write Stat
*/
protected void finalizeWrite(String instantTime, List<HoodieWriteStat> stats, HoodieWriteMetadata result) {
try {
Instant start = Instant.now();
table.finalizeWrite(context, instantTime, stats);
result.setFinalizeDuration(Duration.between(start, Instant.now()));
} catch (HoodieIOException ioe) {
throw new HoodieCommitException("Failed to complete commit " + instantTime + " due to finalize errors.", ioe);
}
}
use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class ScheduleCompactionActionExecutor method execute.
@Override
public Option<HoodieCompactionPlan> execute() {
if (!config.getWriteConcurrencyMode().supportsOptimisticConcurrencyControl() && !config.getFailedWritesCleanPolicy().isLazy()) {
// TODO(yihua): this validation is removed for Java client used by kafka-connect. Need to revisit this.
if (config.getEngineType() != EngineType.JAVA) {
// if there are inflight writes, their instantTime must not be less than that of compaction instant time
table.getActiveTimeline().getCommitsTimeline().filterPendingExcludingCompaction().firstInstant().ifPresent(earliestInflight -> ValidationUtils.checkArgument(HoodieTimeline.compareTimestamps(earliestInflight.getTimestamp(), HoodieTimeline.GREATER_THAN, instantTime), "Earliest write inflight instant time must be later than compaction time. Earliest :" + earliestInflight + ", Compaction scheduled at " + instantTime));
}
// Committed and pending compaction instants should have strictly lower timestamps
List<HoodieInstant> conflictingInstants = table.getActiveTimeline().getWriteTimeline().filterCompletedAndCompactionInstants().getInstants().filter(instant -> HoodieTimeline.compareTimestamps(instant.getTimestamp(), HoodieTimeline.GREATER_THAN_OR_EQUALS, instantTime)).collect(Collectors.toList());
ValidationUtils.checkArgument(conflictingInstants.isEmpty(), "Following instants have timestamps >= compactionInstant (" + instantTime + ") Instants :" + conflictingInstants);
}
HoodieCompactionPlan plan = scheduleCompaction();
if (plan != null && (plan.getOperations() != null) && (!plan.getOperations().isEmpty())) {
extraMetadata.ifPresent(plan::setExtraMetadata);
HoodieInstant compactionInstant = new HoodieInstant(HoodieInstant.State.REQUESTED, HoodieTimeline.COMPACTION_ACTION, instantTime);
try {
table.getActiveTimeline().saveToCompactionRequested(compactionInstant, TimelineMetadataUtils.serializeCompactionPlan(plan));
} catch (IOException ioe) {
throw new HoodieIOException("Exception scheduling compaction", ioe);
}
return Option.of(plan);
}
return Option.empty();
}
use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class HoodieAvroDataBlock method serializeRecords.
@Override
protected byte[] serializeRecords(List<IndexedRecord> records) throws IOException {
Schema schema = new Schema.Parser().parse(super.getLogBlockHeader().get(HeaderMetadataType.SCHEMA));
GenericDatumWriter<IndexedRecord> writer = new GenericDatumWriter<>(schema);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream output = new DataOutputStream(baos);
// 1. Write out the log block version
output.writeInt(HoodieLogBlock.version);
// 2. Write total number of records
output.writeInt(records.size());
// 3. Write the records
for (IndexedRecord s : records) {
ByteArrayOutputStream temp = new ByteArrayOutputStream();
BinaryEncoder encoder = EncoderFactory.get().binaryEncoder(temp, encoderCache.get());
encoderCache.set(encoder);
try {
// Encode the record into bytes
writer.write(s, encoder);
encoder.flush();
// Get the size of the bytes
int size = temp.toByteArray().length;
// Write the record size
output.writeInt(size);
// Write the content
output.write(temp.toByteArray());
} catch (IOException e) {
throw new HoodieIOException("IOException converting HoodieAvroDataBlock to bytes", e);
}
}
encoderCache.remove();
output.close();
return baos.toByteArray();
}
use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class HoodieAvroDataBlock method getBytes.
@Deprecated
public byte[] getBytes(Schema schema) throws IOException {
GenericDatumWriter<IndexedRecord> writer = new GenericDatumWriter<>(schema);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream output = new DataOutputStream(baos);
// 2. Compress and Write schema out
byte[] schemaContent = compress(schema.toString());
output.writeInt(schemaContent.length);
output.write(schemaContent);
List<IndexedRecord> records = new ArrayList<>();
try (ClosableIterator<IndexedRecord> recordItr = getRecordItr()) {
recordItr.forEachRemaining(records::add);
}
// 3. Write total number of records
output.writeInt(records.size());
// 4. Write the records
Iterator<IndexedRecord> itr = records.iterator();
while (itr.hasNext()) {
IndexedRecord s = itr.next();
ByteArrayOutputStream temp = new ByteArrayOutputStream();
Encoder encoder = EncoderFactory.get().binaryEncoder(temp, null);
try {
// Encode the record into bytes
writer.write(s, encoder);
encoder.flush();
// Get the size of the bytes
int size = temp.toByteArray().length;
// Write the record size
output.writeInt(size);
// Write the content
output.write(temp.toByteArray());
itr.remove();
} catch (IOException e) {
throw new HoodieIOException("IOException converting HoodieAvroDataBlock to bytes", e);
}
}
output.close();
return baos.toByteArray();
}
Aggregations