use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class AbstractTestHive method dropTable.
protected void dropTable(SchemaTableName table) {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle handle = metadata.getTableHandle(session, table);
if (handle == null) {
return;
}
metadata.dropTable(session, handle);
try {
// todo I have no idea why this is needed... maybe there is a propagation delay in the metastore?
metadata.dropTable(session, handle);
fail("expected NotFoundException");
} catch (TableNotFoundException expected) {
}
transaction.commit();
}
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class AbstractTestHive method assertEmptyFile.
private void assertEmptyFile(HiveStorageFormat format) throws Exception {
SchemaTableName tableName = temporaryTable("empty_file");
try {
List<Column> columns = ImmutableList.of(new Column("test", HIVE_STRING, Optional.empty()));
createEmptyTable(tableName, format, columns, ImmutableList.of());
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
Table table = transaction.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(AssertionError::new);
// verify directory is empty
HdfsContext context = new HdfsContext(session);
Path location = new Path(table.getStorage().getLocation());
assertTrue(listDirectory(context, location).isEmpty());
// read table with empty directory
readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.of(0), Optional.of(ORC));
// create empty file
FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, location);
assertTrue(fileSystem.createNewFile(new Path(location, "empty-file")));
assertEquals(listDirectory(context, location), ImmutableList.of("empty-file"));
// read table with empty file
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.of(0), Optional.empty());
assertEquals(result.getRowCount(), 0);
}
} finally {
dropTable(tableName);
}
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class AbstractTestHive method testGetRecords.
@Test
public void testGetRecords() throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tablePartitionFormat);
ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, tableHandle);
List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, tableHandle).values());
Map<String, Integer> columnIndex = indexColumns(columnHandles);
List<ConnectorSplit> splits = getAllSplits(tableHandle, transaction, session);
assertEquals(splits.size(), tablePartitionFormatPartitions.size());
for (ConnectorSplit split : splits) {
HiveSplit hiveSplit = (HiveSplit) split;
List<HivePartitionKey> partitionKeys = hiveSplit.getPartitionKeys();
String ds = partitionKeys.get(0).getValue();
String fileFormat = partitionKeys.get(1).getValue();
HiveStorageFormat fileType = HiveStorageFormat.valueOf(fileFormat.toUpperCase(ENGLISH));
int dummyPartition = Integer.parseInt(partitionKeys.get(2).getValue());
long rowNumber = 0;
long completedBytes = 0;
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, hiveSplit, tableHandle, columnHandles, DynamicFilter.EMPTY)) {
MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
assertPageSourceType(pageSource, fileType);
for (MaterializedRow row : result) {
try {
assertValueTypes(row, tableMetadata.getColumns());
} catch (RuntimeException e) {
throw new RuntimeException("row " + rowNumber, e);
}
rowNumber++;
Object value;
value = row.getField(columnIndex.get("t_string"));
if (rowNumber % 19 == 0) {
assertNull(value);
} else if (rowNumber % 19 == 1) {
assertEquals(value, "");
} else {
assertEquals(value, "test");
}
assertEquals(row.getField(columnIndex.get("t_tinyint")), (byte) (1 + rowNumber));
assertEquals(row.getField(columnIndex.get("t_smallint")), (short) (2 + rowNumber));
assertEquals(row.getField(columnIndex.get("t_int")), 3 + (int) rowNumber);
if (rowNumber % 13 == 0) {
assertNull(row.getField(columnIndex.get("t_bigint")));
} else {
assertEquals(row.getField(columnIndex.get("t_bigint")), 4 + rowNumber);
}
assertEquals((Float) row.getField(columnIndex.get("t_float")), 5.1f + rowNumber, 0.001);
assertEquals(row.getField(columnIndex.get("t_double")), 6.2 + rowNumber);
if (rowNumber % 3 == 2) {
assertNull(row.getField(columnIndex.get("t_boolean")));
} else {
assertEquals(row.getField(columnIndex.get("t_boolean")), rowNumber % 3 != 0);
}
assertEquals(row.getField(columnIndex.get("ds")), ds);
assertEquals(row.getField(columnIndex.get("file_format")), fileFormat);
assertEquals(row.getField(columnIndex.get("dummy")), dummyPartition);
long newCompletedBytes = pageSource.getCompletedBytes();
assertTrue(newCompletedBytes >= completedBytes);
assertTrue(newCompletedBytes <= hiveSplit.getLength());
completedBytes = newCompletedBytes;
}
assertTrue(completedBytes <= hiveSplit.getLength());
assertEquals(rowNumber, 100);
}
}
}
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class AbstractTestHive method doInsertUnsupportedWriteType.
private void doInsertUnsupportedWriteType(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
List<Column> columns = ImmutableList.of(new Column("dummy", HiveType.valueOf("uniontype<smallint,tinyint>"), Optional.empty()));
List<Column> partitionColumns = ImmutableList.of(new Column("name", HIVE_STRING, Optional.empty()));
createEmptyTable(tableName, storageFormat, columns, partitionColumns);
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
metadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES);
fail("expected failure");
} catch (TrinoException e) {
assertThat(e).hasMessageMatching("Inserting into Hive table .* with column type uniontype<smallint,tinyint> not supported");
}
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class AbstractTestHive method doTestBucketedTableValidation.
private void doTestBucketedTableValidation(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
prepareInvalidBuckets(storageFormat, tableName);
// read succeeds when validation is disabled
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession(ImmutableMap.of("validate_bucketing", false));
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
// fewer rows due to deleted file
assertEquals(result.getRowCount(), 87);
}
// read fails due to validation failure
assertReadFailsWithMessageMatching(storageFormat, tableName, "Hive table is corrupt\\. File '.*/000002_0_.*' is for bucket 2, but contains a row for bucket 5.");
}
Aggregations