use of io.trino.spi.Page in project trino by trinodb.
the class RcFilePageSource method getNextPage.
@Override
public Page getNextPage() {
try {
// advance in the current batch
pageId++;
// if the batch has been consumed, read the next batch
int currentPageSize = rcFileReader.advance();
if (currentPageSize < 0) {
close();
return null;
}
Block[] blocks = new Block[hiveColumnIndexes.length];
for (int fieldId = 0; fieldId < blocks.length; fieldId++) {
if (constantBlocks[fieldId] != null) {
blocks[fieldId] = new RunLengthEncodedBlock(constantBlocks[fieldId], currentPageSize);
} else {
blocks[fieldId] = createBlock(currentPageSize, fieldId);
}
}
return new Page(currentPageSize, blocks);
} catch (TrinoException e) {
closeAllSuppress(e, this);
throw e;
} catch (RcFileCorruptionException e) {
closeAllSuppress(e, this);
throw new TrinoException(HIVE_BAD_DATA, format("Corrupted RC file: %s", rcFileReader.getId()), e);
} catch (IOException | RuntimeException e) {
closeAllSuppress(e, this);
throw new TrinoException(HIVE_CURSOR_ERROR, format("Failed to read RC file: %s", rcFileReader.getId()), e);
}
}
use of io.trino.spi.Page in project trino by trinodb.
the class AbstractTestHive method doTestBucketSortedTables.
private void doTestBucketSortedTables(SchemaTableName table) throws IOException {
int bucketCount = 3;
int expectedRowCount = 0;
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
// begin creating the table
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(table, ImmutableList.<ColumnMetadata>builder().add(new ColumnMetadata("id", VARCHAR)).add(new ColumnMetadata("value_asc", VARCHAR)).add(new ColumnMetadata("value_desc", BIGINT)).add(new ColumnMetadata("ds", VARCHAR)).build(), ImmutableMap.<String, Object>builder().put(STORAGE_FORMAT_PROPERTY, RCBINARY).put(PARTITIONED_BY_PROPERTY, ImmutableList.of("ds")).put(BUCKETED_BY_PROPERTY, ImmutableList.of("id")).put(BUCKET_COUNT_PROPERTY, bucketCount).put(SORTED_BY_PROPERTY, ImmutableList.builder().add(new SortingColumn("value_asc", ASCENDING)).add(new SortingColumn("value_desc", DESCENDING)).build()).buildOrThrow());
ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty(), NO_RETRIES);
// write the data
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, outputHandle);
List<Type> types = tableMetadata.getColumns().stream().map(ColumnMetadata::getType).collect(toList());
ThreadLocalRandom random = ThreadLocalRandom.current();
for (int i = 0; i < 50; i++) {
MaterializedResult.Builder builder = MaterializedResult.resultBuilder(session, types);
for (int j = 0; j < 1000; j++) {
builder.row(sha256().hashLong(random.nextLong()).toString(), "test" + random.nextInt(100), random.nextLong(100_000), "2018-04-01");
expectedRowCount++;
}
sink.appendPage(builder.build().toPage());
}
HdfsContext context = new HdfsContext(session);
// verify we have enough temporary files per bucket to require multiple passes
Path stagingPathRoot;
if (isTemporaryStagingDirectoryEnabled(session)) {
stagingPathRoot = new Path(getTemporaryStagingDirectoryPath(session).replace("${USER}", context.getIdentity().getUser()));
} else {
stagingPathRoot = getStagingPathRoot(outputHandle);
}
assertThat(listAllDataFiles(context, stagingPathRoot)).filteredOn(file -> file.contains(".tmp-sort.")).size().isGreaterThan(bucketCount * getHiveConfig().getMaxOpenSortFiles() * 2);
// finish the write
Collection<Slice> fragments = getFutureValue(sink.finish());
// verify there are no temporary files
for (String file : listAllDataFiles(context, stagingPathRoot)) {
assertThat(file).doesNotContain(".tmp-sort.");
}
// finish creating table
metadata.finishCreateTable(session, outputHandle, fragments, ImmutableList.of());
transaction.commit();
}
// verify that bucket files are sorted
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, table);
List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, tableHandle).values());
// verify local sorting property
ConnectorTableProperties properties = metadata.getTableProperties(newSession(ImmutableMap.of("propagate_table_scan_sorting_properties", true, "bucket_execution_enabled", false)), tableHandle);
Map<String, Integer> columnIndex = indexColumns(columnHandles);
assertEquals(properties.getLocalProperties(), ImmutableList.of(new SortingProperty<>(columnHandles.get(columnIndex.get("value_asc")), ASC_NULLS_FIRST), new SortingProperty<>(columnHandles.get(columnIndex.get("value_desc")), DESC_NULLS_LAST)));
assertThat(metadata.getTableProperties(newSession(), tableHandle).getLocalProperties()).isEmpty();
List<ConnectorSplit> splits = getAllSplits(tableHandle, transaction, session);
assertThat(splits).hasSize(bucketCount);
int actualRowCount = 0;
for (ConnectorSplit split : splits) {
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, tableHandle, columnHandles, DynamicFilter.EMPTY)) {
String lastValueAsc = null;
long lastValueDesc = -1;
while (!pageSource.isFinished()) {
Page page = pageSource.getNextPage();
if (page == null) {
continue;
}
for (int i = 0; i < page.getPositionCount(); i++) {
Block blockAsc = page.getBlock(1);
Block blockDesc = page.getBlock(2);
assertFalse(blockAsc.isNull(i));
assertFalse(blockDesc.isNull(i));
String valueAsc = VARCHAR.getSlice(blockAsc, i).toStringUtf8();
if (lastValueAsc != null) {
assertGreaterThanOrEqual(valueAsc, lastValueAsc);
if (valueAsc.equals(lastValueAsc)) {
long valueDesc = BIGINT.getLong(blockDesc, i);
if (lastValueDesc != -1) {
assertLessThanOrEqual(valueDesc, lastValueDesc);
}
lastValueDesc = valueDesc;
} else {
lastValueDesc = -1;
}
}
lastValueAsc = valueAsc;
actualRowCount++;
}
}
}
}
assertThat(actualRowCount).isEqualTo(expectedRowCount);
}
}
use of io.trino.spi.Page in project trino by trinodb.
the class AbstractTestHiveFileFormats method createTestFileTrino.
public static FileSplit createTestFileTrino(String filePath, HiveStorageFormat storageFormat, HiveCompressionCodec compressionCodec, List<TestColumn> testColumns, ConnectorSession session, int numRows, HiveFileWriterFactory fileWriterFactory) {
// filter out partition keys, which are not written to the file
testColumns = testColumns.stream().filter(column -> !column.isPartitionKey()).collect(toImmutableList());
List<Type> types = testColumns.stream().map(TestColumn::getType).map(HiveType::valueOf).map(type -> type.getType(TESTING_TYPE_MANAGER)).collect(toList());
PageBuilder pageBuilder = new PageBuilder(types);
for (int rowNumber = 0; rowNumber < numRows; rowNumber++) {
pageBuilder.declarePosition();
for (int columnNumber = 0; columnNumber < testColumns.size(); columnNumber++) {
serializeObject(types.get(columnNumber), pageBuilder.getBlockBuilder(columnNumber), testColumns.get(columnNumber).getWriteValue(), testColumns.get(columnNumber).getObjectInspector(), false);
}
}
Page page = pageBuilder.build();
JobConf jobConf = new JobConf();
configureCompression(jobConf, compressionCodec);
Properties tableProperties = new Properties();
tableProperties.setProperty("columns", testColumns.stream().map(TestColumn::getName).collect(Collectors.joining(",")));
tableProperties.setProperty("columns.types", testColumns.stream().map(TestColumn::getType).collect(Collectors.joining(",")));
Optional<FileWriter> fileWriter = fileWriterFactory.createFileWriter(new Path(filePath), testColumns.stream().map(TestColumn::getName).collect(toList()), StorageFormat.fromHiveStorageFormat(storageFormat), tableProperties, jobConf, session, OptionalInt.empty(), NO_ACID_TRANSACTION, false, WriterKind.INSERT);
FileWriter hiveFileWriter = fileWriter.orElseThrow(() -> new IllegalArgumentException("fileWriterFactory"));
hiveFileWriter.appendRows(page);
hiveFileWriter.commit();
return new FileSplit(new Path(filePath), 0, new File(filePath).length(), new String[0]);
}
use of io.trino.spi.Page in project trino by trinodb.
the class MergingPageIterator method computeNext.
@Override
protected Page computeNext() {
while (!pageBuilder.isFull() && pagePositions.hasNext()) {
pagePositions.next().appendTo(pageBuilder);
}
if (pageBuilder.isEmpty()) {
return endOfData();
}
Page page = pageBuilder.build();
pageBuilder.reset();
return page;
}
use of io.trino.spi.Page in project trino by trinodb.
the class TestOrcPageSourceMemoryTracking method testScanFilterAndProjectOperator.
@Test
public void testScanFilterAndProjectOperator() {
// Numbers used in assertions in this test may change when implementation is modified,
// feel free to change them if they break in the future
DriverContext driverContext = testPreparer.newDriverContext();
SourceOperator operator = testPreparer.newScanFilterAndProjectOperator(driverContext);
assertEquals(driverContext.getMemoryUsage(), 0);
int totalRows = 0;
while (totalRows < NUM_ROWS) {
assertFalse(operator.isFinished());
Page page = operator.getOutput();
assertNotNull(page);
// memory usage varies depending on stripe alignment
long memoryUsage = driverContext.getMemoryUsage();
assertTrue(memoryUsage < 1000 || (memoryUsage > 150_000 && memoryUsage < 630_000), format("Memory usage (%s) outside of bounds", memoryUsage));
totalRows += page.getPositionCount();
}
// done... in the current implementation finish is not set until output returns a null page
assertNull(operator.getOutput());
assertTrue(operator.isFinished());
assertBetweenInclusive(driverContext.getMemoryUsage(), 0L, 500L);
}
Aggregations