use of com.facebook.presto.spi.ColumnHandle in project presto by prestodb.
the class TestCassandraClusteringPredicatesExtractor method testBuildClusteringPredicate.
@Test
public void testBuildClusteringPredicate() {
TupleDomain<ColumnHandle> tupleDomain = TupleDomain.withColumnDomains(ImmutableMap.of(col1, Domain.singleValue(BIGINT, 23L), col2, Domain.singleValue(BIGINT, 34L), col4, Domain.singleValue(BIGINT, 26L)));
CassandraClusteringPredicatesExtractor predicatesExtractor = new CassandraClusteringPredicatesExtractor(cassandraTable.getClusteringKeyColumns(), tupleDomain, cassandraVersion);
String predicate = predicatesExtractor.getClusteringKeyPredicates();
assertEquals(predicate, new StringBuilder("\"clusteringKey1\" = 34").toString());
}
use of com.facebook.presto.spi.ColumnHandle in project presto by prestodb.
the class AbstractTestHiveClient method doTestTransactionDeleteInsert.
private void doTestTransactionDeleteInsert(HiveStorageFormat storageFormat, SchemaTableName tableName, Domain domainToDrop, MaterializedResult insertData, MaterializedResult expectedData, TransactionDeleteInsertTestTag tag, boolean expectQuerySucceed, Optional<ConflictTrigger> conflictTrigger) throws Exception {
Path writePath = null;
Path targetPath = null;
try (Transaction transaction = newTransaction()) {
try {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
ConnectorSession session;
rollbackIfEquals(tag, ROLLBACK_RIGHT_AWAY);
// Query 1: delete
session = newSession();
HiveColumnHandle dsColumnHandle = (HiveColumnHandle) metadata.getColumnHandles(session, tableHandle).get("pk2");
TupleDomain<ColumnHandle> tupleDomain = withColumnDomains(ImmutableMap.of(dsColumnHandle, domainToDrop));
Constraint<ColumnHandle> constraint = new Constraint<>(tupleDomain, convertToPredicate(tupleDomain));
ConnectorTableLayoutHandle tableLayoutHandle = getTableLayout(session, metadata, tableHandle, constraint, transaction).getHandle();
metadata.metadataDelete(session, tableHandle, tableLayoutHandle);
rollbackIfEquals(tag, ROLLBACK_AFTER_DELETE);
// Query 2: insert
session = newSession();
ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle);
rollbackIfEquals(tag, ROLLBACK_AFTER_BEGIN_INSERT);
writePath = getStagingPathRoot(insertTableHandle);
targetPath = getTargetPathRoot(insertTableHandle);
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle, TEST_HIVE_PAGE_SINK_CONTEXT);
sink.appendPage(insertData.toPage());
rollbackIfEquals(tag, ROLLBACK_AFTER_APPEND_PAGE);
Collection<Slice> fragments = getFutureValue(sink.finish());
rollbackIfEquals(tag, ROLLBACK_AFTER_SINK_FINISH);
metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
rollbackIfEquals(tag, ROLLBACK_AFTER_FINISH_INSERT);
assertEquals(tag, COMMIT);
if (conflictTrigger.isPresent()) {
List<PartitionUpdate> partitionUpdates = fragments.stream().map(Slice::getBytes).map(HiveTestUtils.PARTITION_UPDATE_CODEC::fromJson).collect(toList());
conflictTrigger.get().triggerConflict(session, tableName, insertTableHandle, partitionUpdates);
}
transaction.commit();
if (conflictTrigger.isPresent()) {
assertTrue(expectQuerySucceed);
conflictTrigger.get().verifyAndCleanup(tableName);
}
} catch (TestingRollbackException e) {
transaction.rollback();
} catch (PrestoException e) {
assertFalse(expectQuerySucceed);
if (conflictTrigger.isPresent()) {
conflictTrigger.get().verifyAndCleanup(tableName);
}
}
}
// check that temporary files are removed
if (writePath != null && !writePath.equals(targetPath)) {
HdfsContext context = new HdfsContext(newSession(), tableName.getSchemaName(), tableName.getTableName(), writePath.toString(), false);
FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, writePath);
assertFalse(fileSystem.exists(writePath));
}
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), getMetastoreHeaders(session), false, DEFAULT_COLUMN_CONVERTER_PROVIDER);
// verify partitions
List<String> partitionNames = transaction.getMetastore().getPartitionNames(metastoreContext, tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new AssertionError("Table does not exist: " + tableName));
assertEqualsIgnoreOrder(partitionNames, expectedData.getMaterializedRows().stream().map(row -> format("pk1=%s/pk2=%s", row.getField(1), row.getField(2))).distinct().collect(toList()));
// load the new table
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
// verify the data
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
assertEqualsIgnoreOrder(result.getMaterializedRows(), expectedData.getMaterializedRows());
}
}
use of com.facebook.presto.spi.ColumnHandle in project presto by prestodb.
the class AbstractTestHiveClient method assertGetRecords.
protected void assertGetRecords(String tableName, HiveStorageFormat hiveStorageFormat) throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, new SchemaTableName(database, tableName));
ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, tableHandle);
ConnectorTableLayoutHandle layoutHandle = getLayout(session, transaction, tableHandle, TupleDomain.all());
List<ConnectorSplit> splits = getAllSplits(session, transaction, layoutHandle);
assertEquals(splits.size(), 1);
HiveSplit hiveSplit = (HiveSplit) getOnlyElement(splits);
List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, tableHandle).values());
ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, hiveSplit, layoutHandle, columnHandles, NON_CACHEABLE);
assertGetRecords(hiveStorageFormat, tableMetadata, hiveSplit, pageSource, columnHandles);
}
}
use of com.facebook.presto.spi.ColumnHandle in project presto by prestodb.
the class AbstractTestHiveClient method assertEmptyFile.
private void assertEmptyFile(HiveStorageFormat format) throws Exception {
SchemaTableName tableName = temporaryTable("empty_file");
try {
List<Column> columns = ImmutableList.of(new Column("test", HIVE_STRING, Optional.empty(), Optional.empty()));
createEmptyTable(tableName, format, columns, ImmutableList.of());
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), getMetastoreHeaders(session), false, DEFAULT_COLUMN_CONVERTER_PROVIDER);
Table table = transaction.getMetastore().getTable(metastoreContext, tableName.getSchemaName(), tableName.getTableName()).orElseThrow(AssertionError::new);
// verify directory is empty
HdfsContext context = new HdfsContext(session, tableName.getSchemaName(), tableName.getTableName(), table.getStorage().getLocation(), false);
Path location = new Path(table.getStorage().getLocation());
assertTrue(listDirectory(context, location).isEmpty());
// read table with empty directory
readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.of(0), Optional.of(ORC));
// create empty file
FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, location);
assertTrue(fileSystem.createNewFile(new Path(location, "empty-file")));
assertEquals(listDirectory(context, location), ImmutableList.of("empty-file"));
// read table with empty file
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.of(1), Optional.empty());
assertEquals(result.getRowCount(), 0);
}
} finally {
dropTable(tableName);
}
}
use of com.facebook.presto.spi.ColumnHandle in project presto by prestodb.
the class AbstractTestHiveClient method testCreateTemporaryTable.
private void testCreateTemporaryTable(List<ColumnMetadata> columns, int bucketCount, List<String> bucketingColumns, MaterializedResult inputRows, ConnectorSession session, boolean commit) throws Exception {
List<Path> insertLocations = new ArrayList<>();
HiveTableHandle tableHandle;
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
// prepare temporary table schema
List<Type> types = columns.stream().map(ColumnMetadata::getType).collect(toImmutableList());
ConnectorPartitioningMetadata partitioning = new ConnectorPartitioningMetadata(metadata.getPartitioningHandleForExchange(session, bucketCount, types), bucketingColumns);
// create temporary table
tableHandle = (HiveTableHandle) metadata.createTemporaryTable(session, columns, Optional.of(partitioning));
// begin insert into temporary table
HiveInsertTableHandle firstInsert = (HiveInsertTableHandle) metadata.beginInsert(session, tableHandle);
insertLocations.add(firstInsert.getLocationHandle().getTargetPath());
insertLocations.add(firstInsert.getLocationHandle().getWritePath());
// insert into temporary table
ConnectorPageSink firstSink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, firstInsert, TEST_HIVE_PAGE_SINK_CONTEXT);
firstSink.appendPage(inputRows.toPage());
Collection<Slice> firstFragments = getFutureValue(firstSink.finish());
if (inputRows.getRowCount() == 0) {
assertThat(firstFragments).isEmpty();
}
// begin second insert into temporary table
HiveInsertTableHandle secondInsert = (HiveInsertTableHandle) metadata.beginInsert(session, tableHandle);
insertLocations.add(secondInsert.getLocationHandle().getTargetPath());
insertLocations.add(secondInsert.getLocationHandle().getWritePath());
// insert into temporary table
ConnectorPageSink secondSink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, secondInsert, TEST_HIVE_PAGE_SINK_CONTEXT);
secondSink.appendPage(inputRows.toPage());
Collection<Slice> secondFragments = getFutureValue(secondSink.finish());
if (inputRows.getRowCount() == 0) {
assertThat(secondFragments).isEmpty();
}
// finish only second insert
metadata.finishInsert(session, secondInsert, secondFragments, ImmutableList.of());
// no splits for empty buckets if zero row file is not created
assertLessThanOrEqual(getAllSplits(transaction, tableHandle, TupleDomain.all()).size(), bucketCount);
// verify written data
Map<String, ColumnHandle> allColumnHandles = metadata.getColumnHandles(session, tableHandle);
List<ColumnHandle> dataColumnHandles = columns.stream().map(ColumnMetadata::getName).map(allColumnHandles::get).collect(toImmutableList());
// check that all columns are regular columns (not partition columns)
dataColumnHandles.stream().map(HiveColumnHandle.class::cast).forEach(handle -> {
if (handle.isPartitionKey()) {
fail("partitioning column found: " + handle.getName());
}
});
MaterializedResult outputRows = readTable(transaction, tableHandle, dataColumnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
assertEqualsIgnoreOrder(inputRows.getMaterializedRows(), outputRows.getMaterializedRows());
if (commit) {
transaction.commit();
} else {
transaction.rollback();
}
}
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
assertThatThrownBy(() -> metadata.getColumnHandles(session, tableHandle)).isInstanceOf(TableNotFoundException.class);
}
HdfsContext context = new HdfsContext(session, tableHandle.getSchemaName(), tableHandle.getTableName(), "test_path", false);
for (Path location : insertLocations) {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, location);
assertFalse(fileSystem.exists(location));
}
}
Aggregations