use of com.facebook.presto.spi.ConnectorTableHandle in project presto by prestodb.
the class AbstractTestHiveClient method insertData.
/**
* @return query id
*/
private String insertData(SchemaTableName tableName, MaterializedResult data) throws Exception {
Path writePath;
Path targetPath;
String queryId;
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
HiveInsertTableHandle insertTableHandle = (HiveInsertTableHandle) metadata.beginInsert(session, tableHandle);
queryId = session.getQueryId();
writePath = getStagingPathRoot(insertTableHandle);
targetPath = getTargetPathRoot(insertTableHandle);
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle, TEST_HIVE_PAGE_SINK_CONTEXT);
// write data
sink.appendPage(data.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
// commit the insert
metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
transaction.commit();
}
// check that temporary files are removed
if (!writePath.equals(targetPath)) {
HdfsContext context = new HdfsContext(newSession(), tableName.getSchemaName(), tableName.getTableName(), targetPath.toString(), false);
FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, writePath);
assertFalse(fileSystem.exists(writePath));
}
return queryId;
}
use of com.facebook.presto.spi.ConnectorTableHandle in project presto by prestodb.
the class AbstractTestHiveClient method doTestBucketedTableEvolution.
private void doTestBucketedTableEvolution(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
int rowCount = 100;
//
// Produce a table with 8 buckets.
// The table has 3 partitions of 3 different bucket count (4, 8, 16).
createEmptyTable(tableName, storageFormat, ImmutableList.of(new Column("id", HIVE_LONG, Optional.empty(), Optional.empty()), new Column("name", HIVE_STRING, Optional.empty(), Optional.empty())), ImmutableList.of(new Column("pk", HIVE_STRING, Optional.empty(), Optional.empty())), Optional.of(new HiveBucketProperty(ImmutableList.of("id"), 4, ImmutableList.of(), HIVE_COMPATIBLE, Optional.empty())));
// write a 4-bucket partition
MaterializedResult.Builder bucket4Builder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
IntStream.range(0, rowCount).forEach(i -> bucket4Builder.row((long) i, String.valueOf(i), "four"));
insertData(tableName, bucket4Builder.build());
// write a 16-bucket partition
alterBucketProperty(tableName, Optional.of(new HiveBucketProperty(ImmutableList.of("id"), 16, ImmutableList.of(), HIVE_COMPATIBLE, Optional.empty())));
MaterializedResult.Builder bucket16Builder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
IntStream.range(0, rowCount).forEach(i -> bucket16Builder.row((long) i, String.valueOf(i), "sixteen"));
insertData(tableName, bucket16Builder.build());
// write an 8-bucket partition
alterBucketProperty(tableName, Optional.of(new HiveBucketProperty(ImmutableList.of("id"), 8, ImmutableList.of(), HIVE_COMPATIBLE, Optional.empty())));
MaterializedResult.Builder bucket8Builder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
IntStream.range(0, rowCount).forEach(i -> bucket8Builder.row((long) i, String.valueOf(i), "eight"));
insertData(tableName, bucket8Builder.build());
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
// read entire table
List<ColumnHandle> columnHandles = ImmutableList.<ColumnHandle>builder().addAll(metadata.getColumnHandles(session, tableHandle).values()).build();
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
assertBucketTableEvolutionResult(result, columnHandles, ImmutableSet.of(0, 1, 2, 3, 4, 5, 6, 7), rowCount);
// read single bucket (table/logical bucket)
NullableValue singleBucket = NullableValue.of(INTEGER, 6L);
ConnectorTableLayoutHandle layoutHandle;
if (HiveSessionProperties.isPushdownFilterEnabled(session)) {
TupleDomain<VariableReferenceExpression> bucketDomain = TupleDomain.fromFixedValues(ImmutableMap.of(new VariableReferenceExpression(Optional.empty(), BUCKET_COLUMN_NAME, BIGINT), singleBucket));
RowExpression predicate = ROW_EXPRESSION_SERVICE.getDomainTranslator().toPredicate(bucketDomain);
layoutHandle = pushdownFilter(session, metadata, transaction.getMetastore(), ROW_EXPRESSION_SERVICE, FUNCTION_RESOLUTION, hivePartitionManager, METADATA.getFunctionAndTypeManager(), tableHandle, predicate, Optional.empty()).getLayout().getHandle();
} else {
layoutHandle = getOnlyElement(metadata.getTableLayouts(session, tableHandle, new Constraint<>(TupleDomain.fromFixedValues(ImmutableMap.of(bucketColumnHandle(), singleBucket))), Optional.empty())).getTableLayout().getHandle();
}
result = readTable(transaction, tableHandle, layoutHandle, columnHandles, session, OptionalInt.empty(), Optional.empty());
assertBucketTableEvolutionResult(result, columnHandles, ImmutableSet.of(6), rowCount);
// read single bucket, without selecting the bucketing column (i.e. id column)
columnHandles = ImmutableList.<ColumnHandle>builder().addAll(metadata.getColumnHandles(session, tableHandle).values().stream().filter(columnHandle -> !"id".equals(((HiveColumnHandle) columnHandle).getName())).collect(toImmutableList())).build();
result = readTable(transaction, tableHandle, layoutHandle, columnHandles, session, OptionalInt.empty(), Optional.empty());
assertBucketTableEvolutionResult(result, columnHandles, ImmutableSet.of(6), rowCount);
}
}
use of com.facebook.presto.spi.ConnectorTableHandle in project presto by prestodb.
the class AbstractTestHiveClient method readTable.
private MaterializedResult readTable(Transaction transaction, ConnectorTableHandle hiveTableHandle, ConnectorTableLayoutHandle hiveTableLayoutHandle, List<ColumnHandle> columnHandles, ConnectorSession session, OptionalInt expectedSplitCount, Optional<HiveStorageFormat> expectedStorageFormat) throws Exception {
List<ConnectorSplit> splits = getAllSplits(session, transaction, hiveTableLayoutHandle);
if (expectedSplitCount.isPresent()) {
assertEquals(splits.size(), expectedSplitCount.getAsInt());
}
TableHandle tableHandle = toTableHandle(transaction, hiveTableHandle, hiveTableLayoutHandle);
ImmutableList.Builder<MaterializedRow> allRows = ImmutableList.builder();
for (ConnectorSplit split : splits) {
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, tableHandle.getLayout().get(), columnHandles, NON_CACHEABLE)) {
expectedStorageFormat.ifPresent(format -> assertPageSourceType(pageSource, format));
MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
allRows.addAll(result.getMaterializedRows());
}
}
return new MaterializedResult(allRows.build(), getTypes(columnHandles));
}
use of com.facebook.presto.spi.ConnectorTableHandle in project presto by prestodb.
the class AbstractTestHiveClient method dropTable.
protected void dropTable(SchemaTableName table) {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle handle = metadata.getTableHandle(session, table);
if (handle == null) {
return;
}
metadata.dropTable(session, handle);
try {
// todo I have no idea why this is needed... maybe there is a propagation delay in the metastore?
metadata.dropTable(session, handle);
fail("expected NotFoundException");
} catch (TableNotFoundException expected) {
}
transaction.commit();
} catch (Exception e) {
Logger.get(getClass()).warn(e, "failed to drop table");
}
}
use of com.facebook.presto.spi.ConnectorTableHandle in project presto by prestodb.
the class AbstractTestHiveFileSystem method getTableHandle.
private ConnectorTableHandle getTableHandle(ConnectorMetadata metadata, SchemaTableName tableName) {
ConnectorTableHandle handle = metadata.getTableHandle(newSession(), tableName);
checkArgument(handle != null, "table not found: %s", tableName);
return handle;
}
Aggregations