use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class TopNBatch method purgeAndResetPriorityQueue.
/**
* Handle schema changes during execution.
* 1. Purge existing batches
* 2. Promote newly created container for new schema.
* 3. Recreate priority queue and reset with coerced container.
*/
public void purgeAndResetPriorityQueue() {
final Stopwatch watch = Stopwatch.createStarted();
final VectorContainer c = priorityQueue.getHyperBatch();
final VectorContainer newContainer = new VectorContainer(oContext);
final SelectionVector4 selectionVector4 = priorityQueue.getSv4();
final SimpleSV4RecordBatch batch = new SimpleSV4RecordBatch(c, selectionVector4, context);
copier = GenericCopierFactory.createAndSetupCopier(batch, newContainer, null);
SortRecordBatchBuilder builder = new SortRecordBatchBuilder(oContext.getAllocator());
try {
// Purge all the existing batches to a new batch which only holds the selected records
copyToPurge(newContainer, builder);
final VectorContainer oldSchemaContainer = new VectorContainer(oContext);
builder.build(oldSchemaContainer);
oldSchemaContainer.setRecordCount(builder.getSv4().getCount());
final VectorContainer newSchemaContainer = SchemaUtil.coerceContainer(oldSchemaContainer, this.schema, oContext);
newSchemaContainer.buildSchema(SelectionVectorMode.FOUR_BYTE);
priorityQueue.cleanup();
priorityQueue = createNewPriorityQueue(newSchemaContainer, config.getLimit());
try {
priorityQueue.resetQueue(newSchemaContainer, builder.getSv4().createNewWrapperCurrent());
} catch (SchemaChangeException e) {
throw schemaChangeException(e, logger);
}
} finally {
builder.clear();
builder.close();
}
logger.debug("Took {} us to purge and recreate queue for new schema", watch.elapsed(TimeUnit.MICROSECONDS));
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class PriorityQueueTemplate method generate.
@Override
public void generate() {
Stopwatch watch = Stopwatch.createStarted();
final DrillBuf drillBuf = allocator.buffer(4 * queueSize);
finalSv4 = new SelectionVector4(drillBuf, queueSize, EST_MAX_QUEUE_SIZE);
for (int i = queueSize - 1; i >= 0; i--) {
finalSv4.set(i, pop());
}
logger.debug("Took {} us to generate output of {}", watch.elapsed(TimeUnit.MICROSECONDS), finalSv4.getTotalCount());
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class ParquetRecordReaderTest method testPerformance.
@Test
@Ignore
public void testPerformance() throws Exception {
final DrillbitContext bitContext = mock(DrillbitContext.class);
final UserClientConnection connection = mock(UserClientConnection.class);
final DrillConfig c = DrillConfig.create();
final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
final FragmentContextImpl context = new FragmentContextImpl(bitContext, BitControl.PlanFragment.getDefaultInstance(), connection, registry);
final Path fileName = new Path("/tmp/parquet_test_performance.parquet");
final HashMap<String, FieldInfo> fields = new HashMap<>();
final ParquetTestProperties props = new ParquetTestProperties(1, 20 * 1000 * 1000, DEFAULT_BYTES_PER_PAGE, fields);
populateFieldInfoMap(props);
final Configuration dfsConfig = new Configuration();
final List<Footer> footers = ParquetFileReader.readFooters(dfsConfig, fileName);
final Footer f = footers.iterator().next();
final List<SchemaPath> columns = Lists.newArrayList();
columns.add(new SchemaPath("_MAP.integer", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.bigInt", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.f", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.d", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.b", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.bin", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.bin2", ExpressionPosition.UNKNOWN));
int totalRowCount = 0;
final FileSystem fs = new CachedSingleFileSystem(fileName);
final BufferAllocator allocator = RootAllocatorFactory.newRoot(c);
for (int i = 0; i < 25; i++) {
CompressionCodecFactory ccf = DrillCompressionCodecFactory.createDirectCodecFactory(dfsConfig, new ParquetDirectByteBufferAllocator(allocator), 0);
final ParquetRecordReader rr = new ParquetRecordReader(context, fileName, 0, fs, ccf, f.getParquetMetadata(), columns, ParquetReaderUtility.DateCorruptionStatus.META_SHOWS_CORRUPTION);
final TestOutputMutator mutator = new TestOutputMutator(allocator);
rr.setup(null, mutator);
final Stopwatch watch = Stopwatch.createStarted();
int rowCount = 0;
while ((rowCount = rr.next()) > 0) {
totalRowCount += rowCount;
}
rr.close();
}
allocator.close();
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class ParquetRecordReaderTest method testFull.
private void testFull(QueryType type, String planText, String filename, int numberOfTimesRead, /* specified in json plan */
int numberOfRowGroups, int recordsPerRowGroup, boolean testValues) throws Exception {
// final RecordBatchLoader batchLoader = new RecordBatchLoader(getAllocator());
final HashMap<String, FieldInfo> fields = new HashMap<>();
final ParquetTestProperties props = new ParquetTestProperties(numberRowGroups, recordsPerRowGroup, DEFAULT_BYTES_PER_PAGE, fields);
TestFileGenerator.populateFieldInfoMap(props);
final ParquetResultListener resultListener = new ParquetResultListener(getAllocator(), props, numberOfTimesRead, testValues);
final Stopwatch watch = Stopwatch.createStarted();
testWithListener(type, planText, resultListener);
resultListener.getResults();
}
use of org.apache.drill.shaded.guava.com.google.common.base.Stopwatch in project drill by apache.
the class FileTest method main.
public static void main(String[] args) throws IOException {
Configuration conf = new Configuration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "sync:///");
logger.info(FileSystem.getDefaultUri(conf).toString());
FileSystem fs = FileSystem.get(conf);
Path path = new Path("/tmp/testFile");
FSDataOutputStream out = fs.create(path);
byte[] s = "hello world".getBytes();
out.write(s);
out.hflush();
FSDataInputStream in = fs.open(path);
byte[] bytes = new byte[s.length];
in.read(bytes);
logger.info(new String(bytes));
File file = new File("/tmp/testFile");
FileOutputStream fos = new FileOutputStream(file);
FileInputStream fis = new FileInputStream(file);
fos.write(s);
fos.getFD().sync();
fis.read(bytes);
logger.info(new String(bytes));
out = fs.create(new Path("/tmp/file"));
for (int i = 0; i < 100; i++) {
bytes = new byte[256 * 1024];
Stopwatch watch = Stopwatch.createStarted();
out.write(bytes);
out.hflush();
long t = watch.elapsed(TimeUnit.MILLISECONDS);
logger.info(String.format("Elapsed: %d. Rate %d.\n", t, (long) ((long) bytes.length * 1000L / t)));
}
}
Aggregations