use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class HoodieAvroDataBlock method compress.
private static byte[] compress(String text) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try {
OutputStream out = new DeflaterOutputStream(baos);
out.write(text.getBytes(StandardCharsets.UTF_8));
out.close();
} catch (IOException e) {
throw new HoodieIOException("IOException while compressing text " + text, e);
}
return baos.toByteArray();
}
use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class HoodieHFileDataBlock method serializeRecords.
@Override
protected byte[] serializeRecords(List<IndexedRecord> records) throws IOException {
HFileContext context = new HFileContextBuilder().withBlockSize(DEFAULT_BLOCK_SIZE).withCompression(compressionAlgorithm.get()).build();
Configuration conf = new Configuration();
CacheConfig cacheConfig = new CacheConfig(conf);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
FSDataOutputStream ostream = new FSDataOutputStream(baos, null);
// Use simple incrementing counter as a key
boolean useIntegerKey = !getRecordKey(records.get(0)).isPresent();
// This is set here to avoid re-computing this in the loop
int keyWidth = useIntegerKey ? (int) Math.ceil(Math.log(records.size())) + 1 : -1;
// Serialize records into bytes
Map<String, byte[]> sortedRecordsMap = new TreeMap<>();
Iterator<IndexedRecord> itr = records.iterator();
int id = 0;
while (itr.hasNext()) {
IndexedRecord record = itr.next();
String recordKey;
if (useIntegerKey) {
recordKey = String.format("%" + keyWidth + "s", id++);
} else {
recordKey = getRecordKey(record).get();
}
final byte[] recordBytes = serializeRecord(record);
ValidationUtils.checkState(!sortedRecordsMap.containsKey(recordKey), "Writing multiple records with same key not supported for " + this.getClass().getName());
sortedRecordsMap.put(recordKey, recordBytes);
}
HFile.Writer writer = HFile.getWriterFactory(conf, cacheConfig).withOutputStream(ostream).withFileContext(context).withComparator(new HoodieHBaseKVComparator()).create();
// Write the records
sortedRecordsMap.forEach((recordKey, recordBytes) -> {
try {
KeyValue kv = new KeyValue(recordKey.getBytes(), null, null, recordBytes);
writer.append(kv);
} catch (IOException e) {
throw new HoodieIOException("IOException serializing records", e);
}
});
writer.close();
ostream.flush();
ostream.close();
return baos.toByteArray();
}
use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class HoodieTableConfig method modify.
private static void modify(FileSystem fs, Path metadataFolder, Properties modifyProps, BiConsumer<Properties, Properties> modifyFn) {
Path cfgPath = new Path(metadataFolder, HOODIE_PROPERTIES_FILE);
Path backupCfgPath = new Path(metadataFolder, HOODIE_PROPERTIES_FILE_BACKUP);
try {
// 0. do any recovery from prior attempts.
recoverIfNeeded(fs, cfgPath, backupCfgPath);
// 1. backup the existing properties.
try (FSDataInputStream in = fs.open(cfgPath);
FSDataOutputStream out = fs.create(backupCfgPath, false)) {
FileIOUtils.copy(in, out);
}
// / 2. delete the properties file, reads will go to the backup, until we are done.
fs.delete(cfgPath, false);
// 3. read current props, upsert and save back.
String checksum;
try (FSDataInputStream in = fs.open(backupCfgPath);
FSDataOutputStream out = fs.create(cfgPath, true)) {
Properties props = new TypedProperties();
props.load(in);
modifyFn.accept(props, modifyProps);
checksum = storeProperties(props, out);
}
// 4. verify and remove backup.
try (FSDataInputStream in = fs.open(cfgPath)) {
Properties props = new TypedProperties();
props.load(in);
if (!props.containsKey(TABLE_CHECKSUM.key()) || !props.getProperty(TABLE_CHECKSUM.key()).equals(checksum)) {
// delete the properties file and throw exception indicating update failure
// subsequent writes will recover and update, reads will go to the backup until then
fs.delete(cfgPath, false);
throw new HoodieIOException("Checksum property missing or does not match.");
}
}
fs.delete(backupCfgPath, false);
} catch (IOException e) {
throw new HoodieIOException("Error updating table configs.", e);
}
}
use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class TimelineUtils method getMetadataValue.
private static Option<String> getMetadataValue(HoodieTableMetaClient metaClient, String extraMetadataKey, HoodieInstant instant) {
try {
LOG.info("reading checkpoint info for:" + instant + " key: " + extraMetadataKey);
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata.fromBytes(metaClient.getCommitsTimeline().getInstantDetails(instant).get(), HoodieCommitMetadata.class);
return Option.ofNullable(commitMetadata.getExtraMetadata().get(extraMetadataKey));
} catch (IOException e) {
throw new HoodieIOException("Unable to parse instant metadata " + instant, e);
}
}
use of org.apache.hudi.exception.HoodieIOException in project hudi by apache.
the class HoodieActiveTimeline method deleteInstantFile.
private void deleteInstantFile(HoodieInstant instant) {
LOG.info("Deleting instant " + instant);
Path inFlightCommitFilePath = new Path(metaClient.getMetaPath(), instant.getFileName());
try {
boolean result = metaClient.getFs().delete(inFlightCommitFilePath, false);
if (result) {
LOG.info("Removed instant " + instant);
} else {
throw new HoodieIOException("Could not delete instant " + instant);
}
} catch (IOException e) {
throw new HoodieIOException("Could not remove inflight commit " + inFlightCommitFilePath, e);
}
}
Aggregations