use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class FMPPMojo method execute.
@Override
public void execute() throws MojoExecutionException, MojoFailureException {
if (project == null) {
throw new MojoExecutionException("This plugin can only be used inside a project.");
}
String outputPath = output.getAbsolutePath();
if ((!output.exists() && !output.mkdirs()) || !output.isDirectory()) {
throw new MojoFailureException("can not write to output dir: " + outputPath);
}
String templatesPath = templates.getAbsolutePath();
if (!templates.exists() || !templates.isDirectory()) {
throw new MojoFailureException("templates not found in dir: " + outputPath);
}
// add the output directory path to the project source directories
switch(scope) {
case "compile":
project.addCompileSourceRoot(outputPath);
break;
case "test":
project.addTestCompileSourceRoot(outputPath);
break;
default:
throw new MojoFailureException("scope must be compile or test");
}
final Stopwatch sw = Stopwatch.createStarted();
try {
getLog().info(format("Freemarker generation:\n scope: %s,\n config: %s,\n templates: %s", scope, config.getAbsolutePath(), templatesPath));
final File tmp = Files.createTempDirectory("freemarker-tmp").toFile();
String tmpPath = tmp.getAbsolutePath();
final String tmpPathNormalized = tmpPath.endsWith(File.separator) ? tmpPath : tmpPath + File.separator;
Settings settings = new Settings(new File("."));
settings.set(Settings.NAME_SOURCE_ROOT, templatesPath);
settings.set(Settings.NAME_OUTPUT_ROOT, tmp.getAbsolutePath());
settings.load(config);
settings.addProgressListener(new TerseConsoleProgressListener());
settings.addProgressListener(new ProgressListener() {
@Override
public void notifyProgressEvent(Engine engine, int event, File src, int pMode, Throwable error, Object param) throws Exception {
if (event == EVENT_END_PROCESSING_SESSION) {
getLog().info(format("Freemarker generation took %dms", sw.elapsed(TimeUnit.MILLISECONDS)));
sw.reset();
Report report = moveIfChanged(tmp, tmpPathNormalized);
if (!tmp.delete()) {
throw new MojoFailureException(format("can not delete %s", tmp));
}
getLog().info(format("Incremental output update took %dms", sw.elapsed(TimeUnit.MILLISECONDS)));
getLog().info(format("new: %d", report.newFiles));
getLog().info(format("changed: %d", report.changedFiles));
getLog().info(format("unchanged: %d", report.unchangedFiles));
}
}
});
if (addMavenDataLoader) {
getLog().info("Adding maven data loader");
settings.setEngineAttribute(MavenDataLoader.MAVEN_DATA_ATTRIBUTE, new MavenData(project));
settings.add(Settings.NAME_DATA, format("maven: %s()", MavenDataLoader.class.getName()));
}
settings.execute();
} catch (Exception e) {
throw new MojoFailureException(MiscUtil.causeMessages(e), e);
}
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class DefaultSqlHandler method transform.
/**
* Transform RelNode to a new RelNode, targeting the provided set of traits. Also will log the outcome if asked.
*
* @param plannerType The type of Planner to use.
* @param phase The transformation phase we're running.
* @param input The original RelNode
* @param targetTraits The traits we are targeting for output.
* @param log Whether to log the planning phase.
* @return The transformed relnode.
*/
protected RelNode transform(PlannerType plannerType, PlannerPhase phase, RelNode input, RelTraitSet targetTraits, boolean log) {
final Stopwatch watch = Stopwatch.createStarted();
final RuleSet rules = config.getRules(phase);
final RelTraitSet toTraits = targetTraits.simplify();
final RelNode output;
switch(plannerType) {
case HEP_BOTTOM_UP:
case HEP:
{
final HepProgramBuilder hepPgmBldr = new HepProgramBuilder();
if (plannerType == PlannerType.HEP_BOTTOM_UP) {
hepPgmBldr.addMatchOrder(HepMatchOrder.BOTTOM_UP);
}
for (RelOptRule rule : rules) {
hepPgmBldr.addRuleInstance(rule);
}
// Set noDAG = true to avoid caching problems which lead to incorrect Drill work.
final HepPlanner planner = new HepPlanner(hepPgmBldr.build(), context.getPlannerSettings(), true, null, RelOptCostImpl.FACTORY);
JaninoRelMetadataProvider relMetadataProvider = Utilities.registerJaninoRelMetadataProvider();
// Modify RelMetaProvider for every RelNode in the SQL operator Rel tree.
input.accept(new MetaDataProviderModifier(relMetadataProvider));
planner.setRoot(input);
if (!input.getTraitSet().equals(targetTraits)) {
planner.changeTraits(input, toTraits);
}
output = planner.findBestExp();
break;
}
case VOLCANO:
default:
{
// as weird as it seems, the cluster's only planner is the volcano planner.
final RelOptPlanner planner = input.getCluster().getPlanner();
final Program program = Programs.of(rules);
Preconditions.checkArgument(planner instanceof VolcanoPlanner, "Cluster is expected to be constructed using VolcanoPlanner. Was actually of type %s.", planner.getClass().getName());
output = program.run(planner, input, toTraits, ImmutableList.of(), ImmutableList.of());
break;
}
}
if (log) {
log(plannerType, phase, output, logger, watch);
}
return output;
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class DbScanToIndexScanPrule method doOnMatch.
protected void doOnMatch(IndexLogicalPlanCallContext indexContext) {
Stopwatch indexPlanTimer = Stopwatch.createStarted();
final PlannerSettings settings = PrelUtil.getPlannerSettings(indexContext.call.getPlanner());
final IndexCollection indexCollection = getIndexCollection(settings, indexContext.scan);
if (indexCollection == null) {
return;
}
logger.debug("Index Rule {} starts", this.description);
RexBuilder builder = indexContext.filter.getCluster().getRexBuilder();
RexNode condition = null;
if (indexContext.lowerProject == null) {
condition = indexContext.filter.getCondition();
} else {
// get the filter as if it were below the projection.
condition = RelOptUtil.pushFilterPastProject(indexContext.filter.getCondition(), indexContext.lowerProject);
}
// save this pushed down condition, in case it is needed later to build filter when joining back primary table
indexContext.origPushedCondition = condition;
RewriteAsBinaryOperators visitor = new RewriteAsBinaryOperators(true, builder);
condition = condition.accept(visitor);
if (indexCollection.supportsIndexSelection()) {
try {
processWithIndexSelection(indexContext, settings, condition, indexCollection, builder);
} catch (Exception e) {
logger.warn("Exception while doing index planning ", e);
}
} else {
throw new UnsupportedOperationException("Index collection must support index selection");
}
indexPlanTimer.stop();
logger.info("index_plan_info: Index Planning took {} ms", indexPlanTimer.elapsed(TimeUnit.MILLISECONDS));
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class PageReader method next.
/**
* Read the next page in the parent column chunk
*
* @return true if a page was found to read
* @throws IOException
*/
public boolean next() throws IOException {
this.pageValueCount = -1;
this.valuesRead = this.valuesReadyToRead = 0;
this.parentColumnReader.currDefLevel = -1;
long totalValueCount = columnChunkMetaData.getValueCount();
if (parentColumnReader.totalValuesRead >= totalValueCount) {
return false;
}
clearDataBufferAndReaders();
do {
nextInternal();
if (pageHeader == null) {
throw new DrillRuntimeException(String.format("Failed to read another page having read %d of %d values from its " + "column chunk.", parentColumnReader.totalValuesRead, totalValueCount));
}
} while (// Continue until we hit a non-empty data page
pageHeader.uncompressed_page_size == 0 || (pageHeader.getType() != PageType.DATA_PAGE && pageHeader.getType() != PageType.DATA_PAGE_V2));
if (pageData == null) {
throw new DrillRuntimeException(String.format("Failed to read another page having read %d of %d values from its " + "column chunk.", parentColumnReader.totalValuesRead, totalValueCount));
}
dataPageInfo = DataPageHeaderInfoProvider.builder(this.pageHeader);
this.byteLength = this.pageHeader.uncompressed_page_size;
this.pageValueCount = dataPageInfo.getNumValues();
Stopwatch timer = Stopwatch.createStarted();
// readPosInBytes is used for actually reading the values after we determine how many will fit in the vector
// readyToReadPosInBytes serves a similar purpose for the vector types where we must count up the values that will
// fit one record at a time, such as for variable length data. Both operations must start in the same location after the
// definition and repetition level data which is stored alongside the page data itself
this.readyToReadPosInBytes = this.readPosInBytes = decodeLevels();
Encoding valueEncoding = METADATA_CONVERTER.getEncoding(dataPageInfo.getEncoding());
parentColumnReader.usingDictionary = valueEncoding.usesDictionary();
long timeDecode = timer.elapsed(TimeUnit.NANOSECONDS);
stats.numDataPagesDecoded.incrementAndGet();
stats.timeDataPageDecode.addAndGet(timeDecode);
return true;
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class PageReader method readUncompressedPage.
/**
* Reads an uncompressed Parquet page without copying the buffer returned by the backing input stream.
* @return uncompressed Parquet page data
* @throws IOException
*/
protected DrillBuf readUncompressedPage() throws IOException {
int outputSize = pageHeader.getUncompressed_page_size();
long start = dataReader.getPos();
Stopwatch timer = Stopwatch.createStarted();
DrillBuf outputPageData = dataReader.getNext(outputSize);
long timeToRead = timer.elapsed(TimeUnit.NANOSECONDS);
if (logger.isTraceEnabled()) {
logger.trace("Col: {} readPos: {} Uncompressed_size: {} pageData: {}", columnChunkMetaData.toString(), dataReader.getPos(), outputSize, ByteBufUtil.hexDump(outputPageData));
}
this.updateStats(pageHeader, "Page Read", start, timeToRead, outputSize, outputSize);
return outputPageData;
}
Aggregations