use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class PageReader method readCompressedPageV2.
/**
* Reads a compressed v2 data page which excluded the repetition and definition level
* sections from compression.
* @return decompressed Parquet page data
* @throws IOException
*/
protected DrillBuf readCompressedPageV2() throws IOException {
Stopwatch timer = Stopwatch.createUnstarted();
int inputSize = pageHeader.getCompressed_page_size();
int repLevelSize = pageHeader.data_page_header_v2.getRepetition_levels_byte_length();
int defLevelSize = pageHeader.data_page_header_v2.getDefinition_levels_byte_length();
int compDataOffset = repLevelSize + defLevelSize;
int outputSize = pageHeader.uncompressed_page_size;
long start = dataReader.getPos();
long timeToRead;
DrillBuf inputPageData = null;
DrillBuf outputPageData = this.allocator.buffer(outputSize);
try {
timer.start();
// Read in both the uncompressed and compressed sections
inputPageData = dataReader.getNext(inputSize);
timeToRead = timer.elapsed(TimeUnit.NANOSECONDS);
this.updateStats(pageHeader, "Page Read", start, timeToRead, inputSize, inputSize);
timer.reset();
timer.start();
start = dataReader.getPos();
// Write out the uncompressed section
// Note that the following setBytes call to read the repetition and definition level sections
// advances readerIndex in inputPageData but not writerIndex in outputPageData.
outputPageData.setBytes(0, inputPageData, compDataOffset);
// decompress from the start of compressed data to the end of the input buffer
CompressionCodecName codecName = columnChunkMetaData.getCodec();
BytesInputDecompressor decomp = codecFactory.getDecompressor(codecName);
ByteBuffer input = inputPageData.nioBuffer(compDataOffset, inputSize - compDataOffset);
ByteBuffer output = outputPageData.nioBuffer(compDataOffset, outputSize - compDataOffset);
decomp.decompress(input, inputSize - compDataOffset, output, outputSize - compDataOffset);
outputPageData.writerIndex(outputSize);
timeToRead = timer.elapsed(TimeUnit.NANOSECONDS);
if (logger.isTraceEnabled()) {
logger.trace("Col: {} readPos: {} Uncompressed_size: {} pageData: {}", columnChunkMetaData.toString(), dataReader.getPos(), outputSize, ByteBufUtil.hexDump(outputPageData));
}
this.updateStats(pageHeader, "Decompress", start, timeToRead, inputSize, outputSize);
} finally {
if (inputPageData != null) {
inputPageData.release();
}
}
return outputPageData;
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class PageReader method readPageHeader.
/**
* Reads the next page header available in the backing input stream.
* @throws IOException
*/
protected void readPageHeader() throws IOException {
long start = dataReader.getPos();
Stopwatch timer = Stopwatch.createStarted();
this.pageHeader = Util.readPageHeader(dataReader);
long timeToRead = timer.elapsed(TimeUnit.NANOSECONDS);
long pageHeaderBytes = dataReader.getPos() - start;
this.updateStats(pageHeader, "Page Header", start, timeToRead, pageHeaderBytes, pageHeaderBytes);
if (logger.isTraceEnabled()) {
logger.trace("ParquetTrace,{},{},{},{},{},{},{},{}", "Page Header Read", "", this.parentColumnReader.parentReader.getHadoopPath(), this.columnDescriptor.toString(), start, 0, 0, timeToRead);
}
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class Metadata method readBlockMeta.
/**
* Read the parquet metadata from a file
*
* @param path to metadata file
* @param dirsOnly true for {@link Metadata#METADATA_DIRECTORIES_FILENAME}
* or false for {@link Metadata#OLD_METADATA_FILENAME} files reading
* @param metaContext current metadata context
*/
private void readBlockMeta(Path path, boolean dirsOnly, MetadataContext metaContext, FileSystem fs) {
Stopwatch timer = logger.isDebugEnabled() ? Stopwatch.createStarted() : null;
Path metadataParentDir = Path.getPathWithoutSchemeAndAuthority(path.getParent());
String metadataParentDirPath = metadataParentDir.toUri().getPath();
ObjectMapper mapper = new ObjectMapper();
final SimpleModule serialModule = new SimpleModule();
serialModule.addDeserializer(SchemaPath.class, new SchemaPath.De());
serialModule.addKeyDeserializer(Metadata_V2.ColumnTypeMetadata_v2.Key.class, new Metadata_V2.ColumnTypeMetadata_v2.Key.DeSerializer());
serialModule.addKeyDeserializer(Metadata_V3.ColumnTypeMetadata_v3.Key.class, new Metadata_V3.ColumnTypeMetadata_v3.Key.DeSerializer());
serialModule.addKeyDeserializer(ColumnTypeMetadata_v4.Key.class, new ColumnTypeMetadata_v4.Key.DeSerializer());
AfterburnerModule module = new AfterburnerModule();
module.setUseOptimizedBeanDeserializer(true);
boolean isFileMetadata = path.toString().endsWith(METADATA_FILENAME);
boolean isSummaryFile = path.toString().endsWith(METADATA_SUMMARY_FILENAME);
mapper.registerModule(serialModule);
mapper.registerModule(module);
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
try (InputStream is = fs.open(path)) {
boolean alreadyCheckedModification;
boolean newMetadata = false;
alreadyCheckedModification = metaContext.getStatus(metadataParentDirPath);
if (dirsOnly) {
parquetTableMetadataDirs = mapper.readValue(is, ParquetTableMetadataDirs.class);
if (timer != null) {
logger.debug("Took {} ms to read directories from directory cache file", timer.elapsed(TimeUnit.MILLISECONDS));
timer.stop();
}
parquetTableMetadataDirs.updateRelativePaths(metadataParentDirPath);
if (!alreadyCheckedModification && tableModified(parquetTableMetadataDirs.getDirectories(), path, metadataParentDir, metaContext, fs)) {
parquetTableMetadataDirs = (createMetaFilesRecursivelyAsProcessUser(Path.getPathWithoutSchemeAndAuthority(path.getParent()), fs, true, null, true)).getRight();
newMetadata = true;
}
} else {
if (isFileMetadata) {
parquetTableMetadata.assignFiles((mapper.readValue(is, FileMetadata.class)).getFiles());
if (new MetadataVersion(parquetTableMetadata.getMetadataVersion()).isAtLeast(4, 0)) {
((ParquetTableMetadata_v4) parquetTableMetadata).updateRelativePaths(metadataParentDirPath);
}
if (!alreadyCheckedModification && tableModified(parquetTableMetadata.getDirectories(), path, metadataParentDir, metaContext, fs)) {
parquetTableMetadata = (createMetaFilesRecursivelyAsProcessUser(Path.getPathWithoutSchemeAndAuthority(path.getParent()), fs, true, null, true)).getLeft();
newMetadata = true;
}
} else if (isSummaryFile) {
MetadataSummary metadataSummary = mapper.readValue(is, Metadata_V4.MetadataSummary.class);
parquetTableMetadata = new ParquetTableMetadata_v4(metadataSummary);
} else {
parquetTableMetadata = mapper.readValue(is, ParquetTableMetadataBase.class);
if (new MetadataVersion(parquetTableMetadata.getMetadataVersion()).isAtLeast(3, 0)) {
((Metadata_V3.ParquetTableMetadata_v3) parquetTableMetadata).updateRelativePaths(metadataParentDirPath);
}
if (!alreadyCheckedModification && tableModified((parquetTableMetadata.getDirectories()), path, metadataParentDir, metaContext, fs)) {
parquetTableMetadata = (createMetaFilesRecursivelyAsProcessUser(Path.getPathWithoutSchemeAndAuthority(path.getParent()), fs, true, null, true)).getLeft();
newMetadata = true;
}
}
if (timer != null) {
logger.debug("Took {} ms to read metadata from cache file", timer.elapsed(TimeUnit.MILLISECONDS));
timer.stop();
}
if (!isSummaryFile) {
List<? extends ParquetFileMetadata> files = parquetTableMetadata.getFiles();
if (files != null) {
for (ParquetFileMetadata file : files) {
// DRILL-5009: Remove empty row groups unless it is the only row group
List<? extends RowGroupMetadata> rowGroups = file.getRowGroups();
if (rowGroups.size() == 1) {
continue;
}
rowGroups.removeIf(r -> r.getRowCount() == 0);
}
}
}
if (newMetadata) {
// if new metadata files were created, invalidate the existing metadata context
metaContext.clear();
}
}
} catch (IOException e) {
logger.error("Failed to read '{}' metadata file", path, e);
metaContext.setMetadataCacheCorrupted(true);
}
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class Metadata method createMetaFilesRecursively.
/**
* Create the parquet metadata files for the directory at the given path and for any subdirectories.
* Metadata cache files written to the disk contain relative paths. Returned Pair of metadata contains absolute paths.
*
* @param path to the directory of the parquet table
* @param fs file system
* @param allColumnsInteresting if set, store column metadata for all the columns
* @param columnSet Set of columns for which column metadata has to be stored
* @return Pair of parquet metadata. The left one is a parquet metadata for the table. The right one of the Pair is
* a metadata for all subdirectories (if they are present and there are no any parquet files in the
* {@code path} directory).
* @throws IOException if parquet metadata can't be serialized and written to the json file
*/
private Pair<ParquetTableMetadata_v4, ParquetTableMetadataDirs> createMetaFilesRecursively(Path path, FileSystem fs, boolean allColumnsInteresting, Set<SchemaPath> columnSet) throws IOException {
Stopwatch timer = logger.isDebugEnabled() ? Stopwatch.createStarted() : null;
List<ParquetFileMetadata_v4> metaDataList = Lists.newArrayList();
List<Path> directoryList = Lists.newArrayList();
ConcurrentHashMap<ColumnTypeMetadata_v4.Key, ColumnTypeMetadata_v4> columnTypeInfoSet = new ConcurrentHashMap<>();
FileStatus fileStatus = fs.getFileStatus(path);
long dirTotalRowCount = 0;
assert fileStatus.isDirectory() : "Expected directory";
final Map<FileStatus, FileSystem> childFiles = new LinkedHashMap<>();
for (final FileStatus file : DrillFileSystemUtil.listAll(fs, path, false)) {
if (file.isDirectory()) {
ParquetTableMetadata_v4 subTableMetadata = (createMetaFilesRecursively(file.getPath(), fs, allColumnsInteresting, columnSet)).getLeft();
ConcurrentHashMap<ColumnTypeMetadata_v4.Key, ColumnTypeMetadata_v4> subTableColumnTypeInfo = subTableMetadata.getColumnTypeInfoMap();
metaDataList.addAll((List<ParquetFileMetadata_v4>) subTableMetadata.getFiles());
directoryList.addAll(subTableMetadata.getDirectories());
directoryList.add(file.getPath());
// TODO: We need a merge method that merges two columns with the same name but different types
if (columnTypeInfoSet.isEmpty()) {
columnTypeInfoSet.putAll(subTableColumnTypeInfo);
} else {
for (ColumnTypeMetadata_v4.Key key : subTableColumnTypeInfo.keySet()) {
ColumnTypeMetadata_v4 columnTypeMetadata_v4 = columnTypeInfoSet.get(key);
if (columnTypeMetadata_v4 == null) {
columnTypeMetadata_v4 = subTableColumnTypeInfo.get(key);
} else {
// as unknown
if (subTableColumnTypeInfo.get(key).totalNullCount < 0 || columnTypeMetadata_v4.totalNullCount < 0) {
columnTypeMetadata_v4.totalNullCount = NULL_COUNT_NOT_EXISTS;
} else {
columnTypeMetadata_v4.totalNullCount = columnTypeMetadata_v4.totalNullCount + subTableColumnTypeInfo.get(key).totalNullCount;
}
}
columnTypeInfoSet.put(key, columnTypeMetadata_v4);
}
}
dirTotalRowCount = dirTotalRowCount + subTableMetadata.getTotalRowCount();
} else {
childFiles.put(file, fs);
}
}
Metadata_V4.MetadataSummary metadataSummary = new Metadata_V4.MetadataSummary(SUPPORTED_VERSIONS.last().toString(), DrillVersionInfo.getVersion(), allColumnsInteresting || columnSet == null);
ParquetTableMetadata_v4 parquetTableMetadata = new ParquetTableMetadata_v4(metadataSummary);
if (childFiles.size() > 0) {
List<ParquetFileAndRowCountMetadata> childFileAndRowCountMetadata = getParquetFileMetadata_v4(parquetTableMetadata, childFiles, allColumnsInteresting, columnSet);
// If the columnTypeInfoSet is empty, add the columnTypeInfo from the parquetTableMetadata
if (columnTypeInfoSet.isEmpty()) {
columnTypeInfoSet.putAll(parquetTableMetadata.getColumnTypeInfoMap());
}
for (ParquetFileAndRowCountMetadata parquetFileAndRowCountMetadata : childFileAndRowCountMetadata) {
metaDataList.add(parquetFileAndRowCountMetadata.getFileMetadata());
dirTotalRowCount = dirTotalRowCount + parquetFileAndRowCountMetadata.getFileRowCount();
Map<ColumnTypeMetadata_v4.Key, Long> totalNullCountMap = parquetFileAndRowCountMetadata.getTotalNullCountMap();
for (ColumnTypeMetadata_v4.Key column : totalNullCountMap.keySet()) {
ColumnTypeMetadata_v4 columnTypeMetadata_v4 = columnTypeInfoSet.get(column);
// If the column is not present in columnTypeInfoSet, get it from parquetTableMetadata
if (columnTypeMetadata_v4 == null) {
columnTypeMetadata_v4 = parquetTableMetadata.getColumnTypeInfoMap().get(column);
}
// as unknown
if (columnTypeMetadata_v4.totalNullCount < 0 || totalNullCountMap.get(column) < 0) {
columnTypeMetadata_v4.totalNullCount = NULL_COUNT_NOT_EXISTS;
} else {
columnTypeMetadata_v4.totalNullCount += totalNullCountMap.get(column);
}
columnTypeInfoSet.put(column, columnTypeMetadata_v4);
}
}
}
metadataSummary.directories = directoryList;
parquetTableMetadata.assignFiles(metaDataList);
// TODO: We need a merge method that merges two columns with the same name but different types
if (metadataSummary.columnTypeInfo == null) {
metadataSummary.columnTypeInfo = new ConcurrentHashMap<>();
}
metadataSummary.columnTypeInfo.putAll(columnTypeInfoSet);
metadataSummary.allColumnsInteresting = allColumnsInteresting;
metadataSummary.totalRowCount = dirTotalRowCount;
parquetTableMetadata.metadataSummary = metadataSummary;
for (String oldName : OLD_METADATA_FILENAMES) {
fs.delete(new Path(path, oldName), false);
}
// relative paths in the metadata are only necessary for meta cache files.
ParquetTableMetadata_v4 metadataTableWithRelativePaths = MetadataPathUtils.createMetadataWithRelativePaths(parquetTableMetadata, path);
writeFile(metadataTableWithRelativePaths.fileMetadata, new Path(path, METADATA_FILENAME), fs);
writeFile(metadataTableWithRelativePaths.getSummary(), new Path(path, METADATA_SUMMARY_FILENAME), fs);
Metadata_V4.MetadataSummary metadataSummaryWithRelativePaths = metadataTableWithRelativePaths.getSummary();
// Directories list will be empty at the leaf level directories. For sub-directories with both files and directories,
// only the directories will be included in the list.
writeFile(new ParquetTableMetadataDirs(metadataSummaryWithRelativePaths.directories), new Path(path, METADATA_DIRECTORIES_FILENAME), fs);
if (timer != null) {
logger.debug("Creating metadata files recursively took {} ms", timer.elapsed(TimeUnit.MILLISECONDS));
timer.stop();
}
return Pair.of(parquetTableMetadata, new ParquetTableMetadataDirs(directoryList));
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class AssignmentCreator method getMappings.
/**
* Does the work of creating the mappings for this AssignmentCreator
* @return the minor fragment id to work units mapping
*/
private ListMultimap<Integer, T> getMappings() {
Stopwatch watch = Stopwatch.createStarted();
maxWork = (int) Math.ceil(units.size() / ((float) incomingEndpoints.size()));
LinkedList<WorkEndpointListPair<T>> workList = getWorkList();
LinkedList<WorkEndpointListPair<T>> unassignedWorkList;
Map<DrillbitEndpoint, FragIteratorWrapper> endpointIterators = getEndpointIterators();
// Assign up to maxCount per node based on locality.
unassignedWorkList = assign(workList, endpointIterators, false);
// Assign up to minCount per node in a round robin fashion.
assignLeftovers(unassignedWorkList, endpointIterators, true);
// Assign up to maxCount + leftovers per node based on locality.
unassignedWorkList = assign(unassignedWorkList, endpointIterators, true);
// Assign up to maxCount + leftovers per node in a round robin fashion.
assignLeftovers(unassignedWorkList, endpointIterators, false);
if (unassignedWorkList.size() != 0) {
throw new DrillRuntimeException("There are still unassigned work units");
}
logger.debug("Took {} ms to assign {} work units to {} fragments", watch.elapsed(TimeUnit.MILLISECONDS), units.size(), incomingEndpoints.size());
return mappings;
}
Aggregations