use of com.facebook.presto.spi.ConnectorSession in project presto by prestodb.
the class AbstractTestHiveClient method assertEmptyFile.
private void assertEmptyFile(HiveStorageFormat format) throws Exception {
SchemaTableName tableName = temporaryTable("empty_file");
try {
List<Column> columns = ImmutableList.of(new Column("test", HIVE_STRING, Optional.empty(), Optional.empty()));
createEmptyTable(tableName, format, columns, ImmutableList.of());
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), getMetastoreHeaders(session), false, DEFAULT_COLUMN_CONVERTER_PROVIDER);
Table table = transaction.getMetastore().getTable(metastoreContext, tableName.getSchemaName(), tableName.getTableName()).orElseThrow(AssertionError::new);
// verify directory is empty
HdfsContext context = new HdfsContext(session, tableName.getSchemaName(), tableName.getTableName(), table.getStorage().getLocation(), false);
Path location = new Path(table.getStorage().getLocation());
assertTrue(listDirectory(context, location).isEmpty());
// read table with empty directory
readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.of(0), Optional.of(ORC));
// create empty file
FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, location);
assertTrue(fileSystem.createNewFile(new Path(location, "empty-file")));
assertEquals(listDirectory(context, location), ImmutableList.of("empty-file"));
// read table with empty file
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.of(1), Optional.empty());
assertEquals(result.getRowCount(), 0);
}
} finally {
dropTable(tableName);
}
}
use of com.facebook.presto.spi.ConnectorSession in project presto by prestodb.
the class AbstractTestHiveClient method testPartitionSchemaNonCanonical.
// TODO coercion of non-canonical values should be supported
@Test(enabled = false)
public void testPartitionSchemaNonCanonical() throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle table = getTableHandle(metadata, tablePartitionSchemaChangeNonCanonical);
ColumnHandle column = metadata.getColumnHandles(session, table).get("t_boolean");
assertNotNull(column);
ConnectorTableLayoutHandle layoutHandle = getTableLayout(session, metadata, table, new Constraint<>(withColumnDomains(ImmutableMap.of(intColumn, Domain.singleValue(BIGINT, 5L)))), transaction).getHandle();
assertEquals(getAllPartitions(layoutHandle).size(), 1);
assertEquals(getPartitionId(getAllPartitions(layoutHandle).get(0)), "t_boolean=0");
ConnectorSplitSource splitSource = splitManager.getSplits(transaction.getTransactionHandle(), session, layoutHandle, SPLIT_SCHEDULING_CONTEXT);
ConnectorSplit split = getOnlyElement(getAllSplits(splitSource));
ImmutableList<ColumnHandle> columnHandles = ImmutableList.of(column);
try (ConnectorPageSource ignored = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, layoutHandle, columnHandles, NON_CACHEABLE)) {
fail("expected exception");
} catch (PrestoException e) {
assertEquals(e.getErrorCode(), HIVE_INVALID_PARTITION_VALUE.toErrorCode());
}
}
}
use of com.facebook.presto.spi.ConnectorSession in project presto by prestodb.
the class AbstractTestHiveClient method testIllegalStorageFormatDuringTableScan.
/**
* During table scan, the illegal storage format for some specific table should not fail the whole table scan
*/
@Test
public void testIllegalStorageFormatDuringTableScan() {
SchemaTableName schemaTableName = temporaryTable("test_illegal_storage_format");
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
List<Column> columns = ImmutableList.of(new Column("pk", HIVE_STRING, Optional.empty(), Optional.empty()));
String tableOwner = session.getUser();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
LocationHandle locationHandle = getLocationService().forNewTable(transaction.getMetastore(), session, schemaName, tableName, false);
Path targetPath = getLocationService().getQueryWriteInfo(locationHandle).getTargetPath();
// create table whose storage format is null
Table.Builder tableBuilder = Table.builder().setDatabaseName(schemaName).setTableName(tableName).setOwner(tableOwner).setTableType(PrestoTableType.MANAGED_TABLE).setParameters(ImmutableMap.of(PRESTO_VERSION_NAME, TEST_SERVER_VERSION, PRESTO_QUERY_ID_NAME, session.getQueryId())).setDataColumns(columns).withStorage(storage -> storage.setLocation(targetPath.toString()).setStorageFormat(StorageFormat.createNullable(null, null, null)).setSerdeParameters(ImmutableMap.of()));
PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(tableOwner, session.getUser());
transaction.getMetastore().createTable(session, tableBuilder.build(), principalPrivileges, Optional.empty(), true, EMPTY_TABLE_STATISTICS);
transaction.commit();
}
// to make sure it can still be retrieved instead of throwing exception.
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
Map<SchemaTableName, List<ColumnMetadata>> allColumns = metadata.listTableColumns(newSession(), new SchemaTablePrefix(schemaTableName.getSchemaName(), schemaTableName.getTableName()));
assertTrue(allColumns.containsKey(schemaTableName));
} finally {
dropTable(schemaTableName);
}
}
use of com.facebook.presto.spi.ConnectorSession in project presto by prestodb.
the class AbstractTestHiveClient method doCreateEmptyTable.
protected void doCreateEmptyTable(SchemaTableName tableName, HiveStorageFormat storageFormat, List<ColumnMetadata> createTableColumns) throws Exception {
List<String> partitionedBy = createTableColumns.stream().filter(column -> column.getName().equals("ds")).map(ColumnMetadata::getName).collect(toList());
String queryId;
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
queryId = session.getQueryId();
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, createTableColumns, createTableProperties(storageFormat, partitionedBy));
metadata.createTable(session, tableMetadata, false);
transaction.commit();
}
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), getMetastoreHeaders(session), false, DEFAULT_COLUMN_CONVERTER_PROVIDER);
// load the new table
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
// verify the metadata
ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, getTableHandle(metadata, tableName));
List<ColumnMetadata> expectedColumns = createTableColumns.stream().map(column -> new ColumnMetadata(column.getName(), column.getType(), column.getComment(), columnExtraInfo(partitionedBy.contains(column.getName())), false)).collect(toList());
assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), expectedColumns);
// verify table format
Table table = transaction.getMetastore().getTable(metastoreContext, tableName.getSchemaName(), tableName.getTableName()).get();
assertEquals(table.getStorage().getStorageFormat().getInputFormat(), storageFormat.getInputFormat());
// verify the node version and query ID
assertEquals(table.getParameters().get(PRESTO_VERSION_NAME), TEST_SERVER_VERSION);
assertEquals(table.getParameters().get(PRESTO_QUERY_ID_NAME), queryId);
// verify the table is empty
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
assertEquals(result.getRowCount(), 0);
// verify basic statistics
if (partitionedBy.isEmpty()) {
assertEmptyTableStatistics(metastoreContext, tableName, transaction);
}
}
}
use of com.facebook.presto.spi.ConnectorSession in project presto by prestodb.
the class AbstractTestHiveClient method doTestBucketedTableEvolutionWithDifferentReadCount.
private void doTestBucketedTableEvolutionWithDifferentReadCount(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
int rowCount = 100;
int bucketCount = 16;
// Produce a table with a partition with bucket count different but compatible with the table bucket count
createEmptyTable(tableName, storageFormat, ImmutableList.of(new Column("id", HIVE_LONG, Optional.empty(), Optional.empty()), new Column("name", HIVE_STRING, Optional.empty(), Optional.empty())), ImmutableList.of(new Column("pk", HIVE_STRING, Optional.empty(), Optional.empty())), Optional.of(new HiveBucketProperty(ImmutableList.of("id"), 4, ImmutableList.of(), HIVE_COMPATIBLE, Optional.empty())));
// write a 4-bucket partition
MaterializedResult.Builder bucket8Builder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
IntStream.range(0, rowCount).forEach(i -> bucket8Builder.row((long) i, String.valueOf(i), "four"));
insertData(tableName, bucket8Builder.build());
// Alter the bucket count to 16
alterBucketProperty(tableName, Optional.of(new HiveBucketProperty(ImmutableList.of("id"), bucketCount, ImmutableList.of(), HIVE_COMPATIBLE, Optional.empty())));
MaterializedResult result;
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle hiveTableHandle = getTableHandle(metadata, tableName);
// read entire table
List<ColumnHandle> columnHandles = ImmutableList.<ColumnHandle>builder().addAll(metadata.getColumnHandles(session, hiveTableHandle).values()).build();
HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) getTableLayout(session, transaction.getMetadata(), hiveTableHandle, Constraint.alwaysTrue(), transaction).getHandle();
HiveBucketHandle bucketHandle = layoutHandle.getBucketHandle().get();
HiveTableLayoutHandle modifiedReadBucketCountLayoutHandle = new HiveTableLayoutHandle(layoutHandle.getSchemaTableName(), layoutHandle.getTablePath(), layoutHandle.getPartitionColumns(), layoutHandle.getDataColumns(), layoutHandle.getTableParameters(), layoutHandle.getPartitions().get(), layoutHandle.getDomainPredicate(), layoutHandle.getRemainingPredicate(), layoutHandle.getPredicateColumns(), layoutHandle.getPartitionColumnPredicate(), Optional.of(new HiveBucketHandle(bucketHandle.getColumns(), bucketHandle.getTableBucketCount(), 2)), layoutHandle.getBucketFilter(), false, "layout", Optional.empty(), false);
List<ConnectorSplit> splits = getAllSplits(session, transaction, modifiedReadBucketCountLayoutHandle);
assertEquals(splits.size(), 16);
TableHandle tableHandle = toTableHandle(transaction, hiveTableHandle, modifiedReadBucketCountLayoutHandle);
ImmutableList.Builder<MaterializedRow> allRows = ImmutableList.builder();
for (ConnectorSplit split : splits) {
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, tableHandle.getLayout().get(), columnHandles, NON_CACHEABLE)) {
MaterializedResult intermediateResult = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
allRows.addAll(intermediateResult.getMaterializedRows());
}
}
result = new MaterializedResult(allRows.build(), getTypes(columnHandles));
assertEquals(result.getRowCount(), rowCount);
Map<String, Integer> columnIndex = indexColumns(columnHandles);
int nameColumnIndex = columnIndex.get("name");
int bucketColumnIndex = columnIndex.get(BUCKET_COLUMN_NAME);
for (MaterializedRow row : result.getMaterializedRows()) {
String name = (String) row.getField(nameColumnIndex);
int bucket = (int) row.getField(bucketColumnIndex);
assertEquals(bucket, Integer.parseInt(name) % bucketCount);
}
}
}
Aggregations