use of org.apache.hudi.exception.HoodieUpsertException in project hudi by apache.
the class HoodieMergeHandle method writeRecord.
protected boolean writeRecord(HoodieRecord<T> hoodieRecord, Option<IndexedRecord> indexedRecord, boolean isDelete, GenericRecord oldRecord) {
Option recordMetadata = hoodieRecord.getData().getMetadata();
if (!partitionPath.equals(hoodieRecord.getPartitionPath())) {
HoodieUpsertException failureEx = new HoodieUpsertException("mismatched partition path, record partition: " + hoodieRecord.getPartitionPath() + " but trying to insert into partition: " + partitionPath);
writeStatus.markFailure(hoodieRecord, failureEx, recordMetadata);
return false;
}
try {
if (indexedRecord.isPresent() && !isDelete) {
// Convert GenericRecord to GenericRecord with hoodie commit metadata in schema
IndexedRecord recordWithMetadataInSchema = rewriteRecord((GenericRecord) indexedRecord.get(), preserveMetadata, oldRecord);
if (preserveMetadata && useWriterSchema) {
// useWriteSchema will be true only incase of compaction.
// do not preserve FILENAME_METADATA_FIELD
recordWithMetadataInSchema.put(FILENAME_METADATA_FIELD_POS, newFilePath.getName());
fileWriter.writeAvro(hoodieRecord.getRecordKey(), recordWithMetadataInSchema);
} else {
fileWriter.writeAvroWithMetadata(recordWithMetadataInSchema, hoodieRecord);
}
recordsWritten++;
} else {
recordsDeleted++;
}
writeStatus.markSuccess(hoodieRecord, recordMetadata);
// deflate record payload after recording success. This will help users access payload as a
// part of marking
// record successful.
hoodieRecord.deflate();
return true;
} catch (Exception e) {
LOG.error("Error writing record " + hoodieRecord, e);
writeStatus.markFailure(hoodieRecord, e, recordMetadata);
}
return false;
}
use of org.apache.hudi.exception.HoodieUpsertException in project hudi by apache.
the class JavaUpsertPreppedDeltaCommitActionExecutor method execute.
@Override
public HoodieWriteMetadata<List<WriteStatus>> execute() {
HoodieWriteMetadata<List<WriteStatus>> result = new HoodieWriteMetadata<>();
// First group by target file id.
HashMap<Pair<String, String>, List<HoodieRecord<T>>> recordsByFileId = new HashMap<>();
List<HoodieRecord<T>> insertedRecords = new LinkedList<>();
// Split records into inserts and updates.
for (HoodieRecord<T> record : preppedInputRecords) {
if (!record.isCurrentLocationKnown()) {
insertedRecords.add(record);
} else {
Pair<String, String> fileIdPartitionPath = Pair.of(record.getCurrentLocation().getFileId(), record.getPartitionPath());
if (!recordsByFileId.containsKey(fileIdPartitionPath)) {
recordsByFileId.put(fileIdPartitionPath, new LinkedList<>());
}
recordsByFileId.get(fileIdPartitionPath).add(record);
}
}
LOG.info(String.format("Total update fileIDs %s, total inserts %s for commit %s", recordsByFileId.size(), insertedRecords.size(), instantTime));
List<WriteStatus> allWriteStatuses = new ArrayList<>();
try {
recordsByFileId.forEach((k, v) -> {
HoodieAppendHandle<?, ?, ?, ?> appendHandle = new HoodieAppendHandle(config, instantTime, table, k.getRight(), k.getLeft(), v.iterator(), taskContextSupplier);
appendHandle.doAppend();
allWriteStatuses.addAll(appendHandle.close());
});
if (insertedRecords.size() > 0) {
HoodieWriteMetadata<List<WriteStatus>> insertResult = JavaBulkInsertHelper.newInstance().bulkInsert(insertedRecords, instantTime, table, config, this, false, Option.empty());
allWriteStatuses.addAll(insertResult.getWriteStatuses());
}
} catch (Throwable e) {
if (e instanceof HoodieUpsertException) {
throw e;
}
throw new HoodieUpsertException("Failed to upsert for commit time " + instantTime, e);
}
updateIndex(allWriteStatuses, result);
return result;
}
use of org.apache.hudi.exception.HoodieUpsertException in project hudi by apache.
the class FlinkConcatHandle method write.
/**
* Write old record as is w/o merging with incoming record.
*/
@Override
public void write(GenericRecord oldRecord) {
String key = KeyGenUtils.getRecordKeyFromGenericRecord(oldRecord, keyGeneratorOpt);
try {
fileWriter.writeAvro(key, oldRecord);
} catch (IOException | RuntimeException e) {
String errMsg = String.format("Failed to write old record into new file for key %s from old file %s to new file %s with writerSchema %s", key, getOldFilePath(), newFilePath, writeSchemaWithMetaFields.toString(true));
LOG.debug("Old record is " + oldRecord);
throw new HoodieUpsertException(errMsg, e);
}
recordsWritten++;
}
use of org.apache.hudi.exception.HoodieUpsertException in project hudi by apache.
the class BaseSparkCommitActionExecutor method handleUpsertPartition.
@SuppressWarnings("unchecked")
protected Iterator<List<WriteStatus>> handleUpsertPartition(String instantTime, Integer partition, Iterator recordItr, Partitioner partitioner) {
SparkHoodiePartitioner upsertPartitioner = (SparkHoodiePartitioner) partitioner;
BucketInfo binfo = upsertPartitioner.getBucketInfo(partition);
BucketType btype = binfo.bucketType;
try {
if (btype.equals(BucketType.INSERT)) {
return handleInsert(binfo.fileIdPrefix, recordItr);
} else if (btype.equals(BucketType.UPDATE)) {
return handleUpdate(binfo.partitionPath, binfo.fileIdPrefix, recordItr);
} else {
throw new HoodieUpsertException("Unknown bucketType " + btype + " for partition :" + partition);
}
} catch (Throwable t) {
String msg = "Error upserting bucketType " + btype + " for partition :" + partition;
LOG.error(msg, t);
throw new HoodieUpsertException(msg, t);
}
}
Aggregations