use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class JdbcTestActionBase method testAction.
protected void testAction(JdbcAction action, long rowcount) throws Exception {
final StringBuilder sb = new StringBuilder();
int rows = 0;
Stopwatch watch = Stopwatch.createStarted();
ResultSet r = action.getResult(connection);
boolean first = true;
while (r.next()) {
rows++;
ResultSetMetaData md = r.getMetaData();
if (first == true) {
for (int i = 1; i <= md.getColumnCount(); i++) {
sb.append(md.getColumnName(i));
sb.append('\t');
}
sb.append('\n');
first = false;
}
for (int i = 1; i <= md.getColumnCount(); i++) {
sb.append(r.getObject(i));
sb.append('\t');
}
sb.append('\n');
}
sb.append(String.format("Query completed in %d millis.\n", watch.elapsed(TimeUnit.MILLISECONDS)));
if (rowcount != -1) {
Assert.assertEquals((long) rowcount, (long) rows);
}
sb.append("\n\n\n");
logger.info(sb.toString());
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class TestJdbcDistQuery method testQuery.
private void testQuery(String sql) throws Exception {
final StringBuilder sb = new StringBuilder();
boolean success = false;
try (Connection c = connect()) {
// ???? TODO: What is this currently redundant one-time loop for? (If
// it's kept around to make it easy to switch to looping multiple times
// (e.g., for debugging) then define a constant field or local variable
// for the number of iterations.)
boolean first = true;
for (int x = 0; x < 1; x++) {
Stopwatch watch = Stopwatch.createStarted();
Statement s = c.createStatement();
ResultSet r = s.executeQuery(sql);
ResultSetMetaData md = r.getMetaData();
if (first) {
for (int i = 1; i <= md.getColumnCount(); i++) {
sb.append(md.getColumnName(i));
sb.append('\t');
}
sb.append('\n');
first = false;
}
while (r.next()) {
md = r.getMetaData();
for (int i = 1; i <= md.getColumnCount(); i++) {
sb.append(r.getObject(i));
sb.append('\t');
}
sb.append('\n');
}
sb.append(String.format("Query completed in %d millis.\n", watch.elapsed(TimeUnit.MILLISECONDS)));
}
sb.append("\n\n\n");
success = true;
} finally {
if (!success) {
Thread.sleep(2000);
}
}
logger.info(sb.toString());
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class Metadata method tableModified.
/**
* Check if the parquet metadata needs to be updated by comparing the modification time of the directories with
* the modification time of the metadata file
*
* @param directories List of directories
* @param metaFilePath path of parquet metadata cache file
* @return true if metadata needs to be updated, false otherwise
* @throws IOException if some resources are not accessible
*/
private boolean tableModified(List<Path> directories, Path metaFilePath, Path parentDir, MetadataContext metaContext, FileSystem fs) throws IOException {
Stopwatch timer = logger.isDebugEnabled() ? Stopwatch.createStarted() : null;
metaContext.setStatus(parentDir);
long metaFileModifyTime = fs.getFileStatus(metaFilePath).getModificationTime();
FileStatus directoryStatus = fs.getFileStatus(parentDir);
int numDirs = 1;
if (directoryStatus.getModificationTime() > metaFileModifyTime) {
return logAndStopTimer(true, directoryStatus.getPath().toString(), timer, numDirs);
}
boolean isModified = false;
for (Path directory : directories) {
numDirs++;
metaContext.setStatus(directory);
directoryStatus = fs.getFileStatus(directory);
if (directoryStatus.getModificationTime() > metaFileModifyTime) {
isModified = true;
break;
}
}
return logAndStopTimer(isModified, directoryStatus.getPath().toString(), timer, numDirs);
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class Metadata method getParquetTableMetadata.
/**
* Get the parquet metadata for the parquet files in a directory.
*
* @param path the path of the directory
* @return metadata object for an entire parquet directory structure
* @throws IOException in case of problems during accessing files
*/
private ParquetTableMetadata_v4 getParquetTableMetadata(Path path, FileSystem fs) throws IOException {
FileStatus fileStatus = fs.getFileStatus(path);
Stopwatch watch = logger.isDebugEnabled() ? Stopwatch.createStarted() : null;
List<FileStatus> fileStatuses = new ArrayList<>();
if (fileStatus.isFile()) {
fileStatuses.add(fileStatus);
} else {
// the thing we need!?
fileStatuses.addAll(DrillFileSystemUtil.listFiles(fs, path, true));
}
if (watch != null) {
logger.debug("Took {} ms to get file statuses", watch.elapsed(TimeUnit.MILLISECONDS));
watch.reset();
watch.start();
}
Map<FileStatus, FileSystem> fileStatusMap = fileStatuses.stream().collect(java.util.stream.Collectors.toMap(Function.identity(), s -> fs, (oldFs, newFs) -> newFs, LinkedHashMap::new));
ParquetTableMetadata_v4 metadata_v4 = getParquetTableMetadata(fileStatusMap);
if (watch != null) {
logger.debug("Took {} ms to read file metadata", watch.elapsed(TimeUnit.MILLISECONDS));
watch.stop();
}
return metadata_v4;
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class PageReader method readCompressedPageV1.
/**
* Reads a compressed v1 data page or a dictionary page, both of which are compressed
* in their entirety.
* @return decompressed Parquet page data
* @throws IOException
*/
protected DrillBuf readCompressedPageV1() throws IOException {
Stopwatch timer = Stopwatch.createUnstarted();
int inputSize = pageHeader.getCompressed_page_size();
int outputSize = pageHeader.getUncompressed_page_size();
long start = dataReader.getPos();
long timeToRead;
DrillBuf inputPageData = null;
DrillBuf outputPageData = this.allocator.buffer(outputSize);
try {
timer.start();
inputPageData = dataReader.getNext(inputSize);
timeToRead = timer.elapsed(TimeUnit.NANOSECONDS);
this.updateStats(pageHeader, "Page Read", start, timeToRead, inputSize, inputSize);
timer.reset();
timer.start();
start = dataReader.getPos();
CompressionCodecName codecName = columnChunkMetaData.getCodec();
BytesInputDecompressor decomp = codecFactory.getDecompressor(codecName);
ByteBuffer input = inputPageData.nioBuffer(0, inputSize);
ByteBuffer output = outputPageData.nioBuffer(0, outputSize);
decomp.decompress(input, inputSize, output, outputSize);
outputPageData.writerIndex(outputSize);
timeToRead = timer.elapsed(TimeUnit.NANOSECONDS);
if (logger.isTraceEnabled()) {
logger.trace("Col: {} readPos: {} Uncompressed_size: {} pageData: {}", columnChunkMetaData.toString(), dataReader.getPos(), outputSize, ByteBufUtil.hexDump(outputPageData));
}
this.updateStats(pageHeader, "Decompress", start, timeToRead, inputSize, outputSize);
} finally {
if (inputPageData != null) {
inputPageData.release();
}
}
return outputPageData;
}
Aggregations