use of org.apache.hudi.exception.HoodieException in project hudi by apache.
the class HoodieTableMetadataUtil method getColumnStats.
private static Stream<HoodieRecord> getColumnStats(final String partitionPath, final String filePathWithPartition, HoodieTableMetaClient datasetMetaClient, List<String> columnsToIndex, boolean isDeleted) {
final String partition = getPartition(partitionPath);
final int offset = partition.equals(NON_PARTITIONED_NAME) ? (filePathWithPartition.startsWith("/") ? 1 : 0) : partition.length() + 1;
final String fileName = filePathWithPartition.substring(offset);
if (filePathWithPartition.endsWith(HoodieFileFormat.PARQUET.getFileExtension())) {
final Path fullFilePath = new Path(datasetMetaClient.getBasePath(), filePathWithPartition);
List<HoodieColumnRangeMetadata<Comparable>> columnRangeMetadataList;
if (!isDeleted) {
columnRangeMetadataList = new ParquetUtils().readRangeFromParquetMetadata(datasetMetaClient.getHadoopConf(), fullFilePath, columnsToIndex);
} else {
// TODO we should delete records instead of stubbing them
columnRangeMetadataList = columnsToIndex.stream().map(entry -> new HoodieColumnRangeMetadata<Comparable>(fileName, entry, null, null, 0, 0, 0, 0)).collect(Collectors.toList());
}
return HoodieMetadataPayload.createColumnStatsRecords(partitionPath, columnRangeMetadataList, isDeleted);
} else {
throw new HoodieException("Column range index not supported for filePathWithPartition " + fileName);
}
}
use of org.apache.hudi.exception.HoodieException in project hudi by apache.
the class HoodieHFileReader method readBloomFilter.
@Override
public BloomFilter readBloomFilter() {
Map<byte[], byte[]> fileInfo;
try {
fileInfo = reader.loadFileInfo();
ByteBuffer serializedFilter = reader.getMetaBlock(KEY_BLOOM_FILTER_META_BLOCK, false);
byte[] filterBytes = new byte[serializedFilter.remaining()];
// read the bytes that were written
serializedFilter.get(filterBytes);
return BloomFilterFactory.fromString(new String(filterBytes), new String(fileInfo.get(KEY_BLOOM_FILTER_TYPE_CODE.getBytes())));
} catch (IOException e) {
throw new HoodieException("Could not read bloom filter from " + path, e);
}
}
use of org.apache.hudi.exception.HoodieException in project hudi by apache.
the class HoodieHFileReader method getSchema.
@Override
public Schema getSchema() {
if (schema == null) {
try {
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
schema = new Schema.Parser().parse(new String(fileInfo.get(KEY_SCHEMA.getBytes())));
} catch (IOException e) {
throw new HoodieException("Could not read schema of file from path", e);
}
}
return schema;
}
use of org.apache.hudi.exception.HoodieException in project hudi by apache.
the class RocksDBDAO method addColumnFamily.
/**
* Add a new column family to store.
*
* @param columnFamilyName Column family name
*/
public void addColumnFamily(String columnFamilyName) {
ValidationUtils.checkArgument(!closed);
managedDescriptorMap.computeIfAbsent(columnFamilyName, colFamilyName -> {
try {
ColumnFamilyDescriptor descriptor = getColumnFamilyDescriptor(colFamilyName.getBytes());
ColumnFamilyHandle handle = getRocksDB().createColumnFamily(descriptor);
managedHandlesMap.put(colFamilyName, handle);
return descriptor;
} catch (RocksDBException e) {
throw new HoodieException(e);
}
});
}
use of org.apache.hudi.exception.HoodieException in project hudi by apache.
the class RocksDBDAO method prefixDelete.
/**
* Perform a prefix delete and return stream of key-value pairs retrieved.
*
* @param columnFamilyName Column Family Name
* @param prefix Prefix Key
* @param <T> Type of value stored
*/
public <T extends Serializable> void prefixDelete(String columnFamilyName, String prefix) {
ValidationUtils.checkArgument(!closed);
LOG.info("Prefix DELETE (query=" + prefix + ") on " + columnFamilyName);
final RocksIterator it = getRocksDB().newIterator(managedHandlesMap.get(columnFamilyName));
it.seek(prefix.getBytes());
// Find first and last keys to be deleted
String firstEntry = null;
String lastEntry = null;
while (it.isValid() && new String(it.key()).startsWith(prefix)) {
String result = new String(it.key());
it.next();
if (firstEntry == null) {
firstEntry = result;
}
lastEntry = result;
}
it.close();
if (null != firstEntry) {
try {
// This will not delete the last entry
getRocksDB().deleteRange(managedHandlesMap.get(columnFamilyName), firstEntry.getBytes(), lastEntry.getBytes());
// Delete the last entry
getRocksDB().delete(lastEntry.getBytes());
} catch (RocksDBException e) {
LOG.error("Got exception performing range delete");
throw new HoodieException(e);
}
}
}
Aggregations