use of com.facebook.presto.spi.type.BigintType.BIGINT in project presto by prestodb.
the class TestOrcReaderPositions method testStripeSkipping.
@Test
public void testStripeSkipping() throws Exception {
try (TempFile tempFile = new TempFile()) {
createMultiStripeFile(tempFile.getFile());
// test reading second and fourth stripes
OrcPredicate predicate = (numberOfRows, statisticsByColumnIndex) -> {
if (numberOfRows == 100) {
return true;
}
IntegerStatistics stats = statisticsByColumnIndex.get(0).getIntegerStatistics();
return ((stats.getMin() == 60) && (stats.getMax() == 117)) || ((stats.getMin() == 180) && (stats.getMax() == 237));
};
OrcRecordReader reader = createCustomOrcRecordReader(tempFile, new OrcMetadataReader(), predicate, BIGINT);
assertEquals(reader.getFileRowCount(), 100);
assertEquals(reader.getReaderRowCount(), 40);
assertEquals(reader.getFilePosition(), 0);
assertEquals(reader.getReaderPosition(), 0);
// second stripe
assertEquals(reader.nextBatch(), 20);
assertEquals(reader.getReaderPosition(), 0);
assertEquals(reader.getFilePosition(), 20);
assertCurrentBatch(reader, 1);
// fourth stripe
assertEquals(reader.nextBatch(), 20);
assertEquals(reader.getReaderPosition(), 20);
assertEquals(reader.getFilePosition(), 60);
assertCurrentBatch(reader, 3);
assertEquals(reader.nextBatch(), -1);
assertEquals(reader.getReaderPosition(), 40);
assertEquals(reader.getFilePosition(), 100);
reader.close();
}
}
use of com.facebook.presto.spi.type.BigintType.BIGINT in project presto by prestodb.
the class TestHashPartitionMaskOperator method testHashPartitionMaskWithMask.
@Test(dataProvider = "hashEnabledValues")
public void testHashPartitionMaskWithMask(boolean hashEnabled) throws Exception {
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), BIGINT, BOOLEAN, BOOLEAN);
List<Page> input = rowPagesBuilder.addSequencePage(ROW_COUNT, 0, 0, 1).build();
OperatorFactory operatorFactory = new HashPartitionMaskOperatorFactory(0, new PlanNodeId("test"), PARTITION_COUNT, rowPagesBuilder.getTypes(), ImmutableList.of(1, 2), ImmutableList.of(0), rowPagesBuilder.getHashChannel());
int[] rowPartition = new int[ROW_COUNT];
Arrays.fill(rowPartition, -1);
for (int partition = 0; partition < PARTITION_COUNT; partition++) {
MaterializedResult.Builder expected = resultBuilder(TEST_SESSION, BIGINT, BOOLEAN, BOOLEAN, BOOLEAN);
for (int i = 0; i < ROW_COUNT; i++) {
long rawHash = BigintOperators.hashCode(i);
// mix the bits so we don't use the same hash used to distribute between stages
rawHash = XxHash64.hash(Long.reverse(rawHash));
rawHash &= Long.MAX_VALUE;
boolean active = (rawHash % PARTITION_COUNT == partition);
boolean maskValue = i % 2 == 0;
expected.row((long) i, active && maskValue, active && !maskValue, active);
if (active) {
assertEquals(rowPartition[i], -1);
rowPartition[i] = partition;
}
}
OperatorAssertion.assertOperatorEqualsIgnoreOrder(operatorFactory, createDriverContext(), input, expected.build(), hashEnabled, Optional.of(3));
}
assertTrue(IntStream.of(rowPartition).noneMatch(partition -> partition == -1));
}
use of com.facebook.presto.spi.type.BigintType.BIGINT in project presto by prestodb.
the class TestHashPartitionMaskOperator method testHashPartitionMask.
@Test(dataProvider = "hashEnabledValues")
public void testHashPartitionMask(boolean hashEnabled) throws Exception {
RowPagesBuilder rowPagesBuilder = rowPagesBuilder(hashEnabled, Ints.asList(0), BIGINT);
List<Page> input = rowPagesBuilder.addSequencePage(ROW_COUNT, 0).build();
OperatorFactory operatorFactory = new HashPartitionMaskOperatorFactory(0, new PlanNodeId("test"), PARTITION_COUNT, rowPagesBuilder.getTypes(), ImmutableList.of(), ImmutableList.of(0), rowPagesBuilder.getHashChannel());
int[] rowPartition = new int[ROW_COUNT];
Arrays.fill(rowPartition, -1);
for (int partition = 0; partition < PARTITION_COUNT; partition++) {
MaterializedResult.Builder expected = resultBuilder(TEST_SESSION, BIGINT, BOOLEAN);
for (int i = 0; i < ROW_COUNT; i++) {
long rawHash = BigintOperators.hashCode(i);
// mix the bits so we don't use the same hash used to distribute between stages
rawHash = XxHash64.hash(Long.reverse(rawHash));
rawHash &= Long.MAX_VALUE;
boolean active = (rawHash % PARTITION_COUNT == partition);
expected.row((long) i, active);
if (active) {
assertEquals(rowPartition[i], -1);
rowPartition[i] = partition;
}
}
OperatorAssertion.assertOperatorEqualsIgnoreOrder(operatorFactory, createDriverContext(), input, expected.build(), hashEnabled, Optional.of(1));
}
assertTrue(IntStream.of(rowPartition).noneMatch(partition -> partition == -1));
}
use of com.facebook.presto.spi.type.BigintType.BIGINT in project presto by prestodb.
the class TestRaptorIntegrationSmokeTest method testTablesSystemTable.
@Test
public void testTablesSystemTable() {
assertUpdate("" + "CREATE TABLE system_tables_test0 (c00 timestamp, c01 varchar, c02 double, c03 bigint, c04 bigint)");
assertUpdate("" + "CREATE TABLE system_tables_test1 (c10 timestamp, c11 varchar, c12 double, c13 bigint, c14 bigint) " + "WITH (temporal_column = 'c10')");
assertUpdate("" + "CREATE TABLE system_tables_test2 (c20 timestamp, c21 varchar, c22 double, c23 bigint, c24 bigint) " + "WITH (temporal_column = 'c20', ordering = ARRAY['c22', 'c21'])");
assertUpdate("" + "CREATE TABLE system_tables_test3 (c30 timestamp, c31 varchar, c32 double, c33 bigint, c34 bigint) " + "WITH (temporal_column = 'c30', bucket_count = 40, bucketed_on = ARRAY ['c34', 'c33'])");
assertUpdate("" + "CREATE TABLE system_tables_test4 (c40 timestamp, c41 varchar, c42 double, c43 bigint, c44 bigint) " + "WITH (temporal_column = 'c40', ordering = ARRAY['c41', 'c42'], distribution_name = 'test_distribution', bucket_count = 50, bucketed_on = ARRAY ['c43', 'c44'])");
assertUpdate("" + "CREATE TABLE system_tables_test5 (c50 timestamp, c51 varchar, c52 double, c53 bigint, c54 bigint) " + "WITH (ordering = ARRAY['c51', 'c52'], distribution_name = 'test_distribution', bucket_count = 50, bucketed_on = ARRAY ['c53', 'c54'], organized = true)");
MaterializedResult actualResults = computeActual("SELECT * FROM system.tables");
assertEquals(actualResults.getTypes(), ImmutableList.builder().add(// table_schema
VARCHAR).add(// table_name
VARCHAR).add(// temporal_column
VARCHAR).add(// ordering_columns
new ArrayType(VARCHAR)).add(// distribution_name
VARCHAR).add(// bucket_count
BIGINT).add(// bucket_columns
new ArrayType(VARCHAR)).add(// organized
BOOLEAN).build());
Map<String, MaterializedRow> map = actualResults.getMaterializedRows().stream().filter(row -> ((String) row.getField(1)).startsWith("system_tables_test")).collect(ImmutableCollectors.toImmutableMap(row -> ((String) row.getField(1))));
assertEquals(map.size(), 6);
assertEquals(map.get("system_tables_test0").getFields(), asList("tpch", "system_tables_test0", null, null, null, null, null, Boolean.FALSE));
assertEquals(map.get("system_tables_test1").getFields(), asList("tpch", "system_tables_test1", "c10", null, null, null, null, Boolean.FALSE));
assertEquals(map.get("system_tables_test2").getFields(), asList("tpch", "system_tables_test2", "c20", ImmutableList.of("c22", "c21"), null, null, null, Boolean.FALSE));
assertEquals(map.get("system_tables_test3").getFields(), asList("tpch", "system_tables_test3", "c30", null, null, 40L, ImmutableList.of("c34", "c33"), Boolean.FALSE));
assertEquals(map.get("system_tables_test4").getFields(), asList("tpch", "system_tables_test4", "c40", ImmutableList.of("c41", "c42"), "test_distribution", 50L, ImmutableList.of("c43", "c44"), Boolean.FALSE));
assertEquals(map.get("system_tables_test5").getFields(), asList("tpch", "system_tables_test5", null, ImmutableList.of("c51", "c52"), "test_distribution", 50L, ImmutableList.of("c53", "c54"), Boolean.TRUE));
actualResults = computeActual("SELECT * FROM system.tables WHERE table_schema = 'tpch'");
long actualRowCount = actualResults.getMaterializedRows().stream().filter(row -> ((String) row.getField(1)).startsWith("system_tables_test")).count();
assertEquals(actualRowCount, 6);
actualResults = computeActual("SELECT * FROM system.tables WHERE table_name = 'system_tables_test3'");
assertEquals(actualResults.getMaterializedRows().size(), 1);
actualResults = computeActual("SELECT * FROM system.tables WHERE table_schema = 'tpch' and table_name = 'system_tables_test3'");
assertEquals(actualResults.getMaterializedRows().size(), 1);
actualResults = computeActual("" + "SELECT distribution_name, bucket_count, bucketing_columns, ordering_columns, temporal_column, organized " + "FROM system.tables " + "WHERE table_schema = 'tpch' and table_name = 'system_tables_test3'");
assertEquals(actualResults.getTypes(), ImmutableList.of(VARCHAR, BIGINT, new ArrayType(VARCHAR), new ArrayType(VARCHAR), VARCHAR, BOOLEAN));
assertEquals(actualResults.getMaterializedRows().size(), 1);
assertUpdate("DROP TABLE system_tables_test0");
assertUpdate("DROP TABLE system_tables_test1");
assertUpdate("DROP TABLE system_tables_test2");
assertUpdate("DROP TABLE system_tables_test3");
assertUpdate("DROP TABLE system_tables_test4");
assertUpdate("DROP TABLE system_tables_test5");
assertEquals(computeActual("SELECT * FROM system.tables WHERE table_schema IN ('foo', 'bar')").getRowCount(), 0);
}
use of com.facebook.presto.spi.type.BigintType.BIGINT in project presto by prestodb.
the class AbstractTestHiveClientS3 method doCreateTable.
private void doCreateTable(SchemaTableName tableName, HiveStorageFormat storageFormat) throws Exception {
List<ColumnMetadata> columns = ImmutableList.<ColumnMetadata>builder().add(new ColumnMetadata("id", BIGINT)).build();
MaterializedResult data = MaterializedResult.resultBuilder(newSession(), BIGINT).row(1L).row(3L).row(2L).build();
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
// begin creating the table
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, columns, createTableProperties(storageFormat));
ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty());
// write the records
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, outputHandle);
sink.appendPage(data.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
// commit the table
metadata.finishCreateTable(session, outputHandle, fragments);
transaction.commit();
// Hack to work around the metastore not being configured for S3.
// The metastore tries to validate the location when creating the
// table, which fails without explicit configuration for S3.
// We work around that by using a dummy location when creating the
// table and update it here to the correct S3 location.
metastoreClient.updateTableLocation(database, tableName.getTableName(), locationService.writePathRoot(((HiveOutputTableHandle) outputHandle).getLocationHandle()).get().toString());
}
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
// load the new table
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
// verify the metadata
ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, getTableHandle(metadata, tableName));
assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), columns);
// verify the data
List<ConnectorTableLayoutResult> tableLayoutResults = metadata.getTableLayouts(session, tableHandle, new Constraint<>(TupleDomain.all(), bindings -> true), Optional.empty());
HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) getOnlyElement(tableLayoutResults).getTableLayout().getHandle();
assertEquals(layoutHandle.getPartitions().get().size(), 1);
ConnectorSplitSource splitSource = splitManager.getSplits(transaction.getTransactionHandle(), session, layoutHandle);
ConnectorSplit split = getOnlyElement(getAllSplits(splitSource));
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, columnHandles)) {
MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
assertEqualsIgnoreOrder(result.getMaterializedRows(), data.getMaterializedRows());
}
}
}
Aggregations