use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHive method doInsertUnsupportedWriteType.
private void doInsertUnsupportedWriteType(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
List<Column> columns = ImmutableList.of(new Column("dummy", HiveType.valueOf("uniontype<smallint,tinyint>"), Optional.empty()));
List<Column> partitionColumns = ImmutableList.of(new Column("name", HIVE_STRING, Optional.empty()));
createEmptyTable(tableName, storageFormat, columns, partitionColumns);
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
metadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES);
fail("expected failure");
} catch (TrinoException e) {
assertThat(e).hasMessageMatching("Inserting into Hive table .* with column type uniontype<smallint,tinyint> not supported");
}
}
use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHive method doTestBucketedTableValidation.
private void doTestBucketedTableValidation(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
prepareInvalidBuckets(storageFormat, tableName);
// read succeeds when validation is disabled
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession(ImmutableMap.of("validate_bucketing", false));
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
// fewer rows due to deleted file
assertEquals(result.getRowCount(), 87);
}
// read fails due to validation failure
assertReadFailsWithMessageMatching(storageFormat, tableName, "Hive table is corrupt\\. File '.*/000002_0_.*' is for bucket 2, but contains a row for bucket 5.");
}
use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHive method testNewDirectoryPermissions.
@Test
public void testNewDirectoryPermissions() throws Exception {
SchemaTableName tableName = temporaryTable("empty_file");
List<Column> columns = ImmutableList.of(new Column("test", HIVE_STRING, Optional.empty()));
createEmptyTable(tableName, ORC, columns, ImmutableList.of(), Optional.empty());
try {
Transaction transaction = newTransaction();
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
Table table = transaction.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow();
// create new directory and set directory permission after creation
HdfsContext context = new HdfsContext(session);
Path location = new Path(table.getStorage().getLocation());
Path defaultPath = new Path(location + "/defaultperms");
createDirectory(context, hdfsEnvironment, defaultPath);
FileStatus defaultFsStatus = hdfsEnvironment.getFileSystem(context, defaultPath).getFileStatus(defaultPath);
assertEquals(defaultFsStatus.getPermission().toOctal(), 777);
// use hdfs config that skips setting directory permissions after creation
HdfsConfig configWithSkip = new HdfsConfig();
configWithSkip.setNewDirectoryPermissions(HdfsConfig.SKIP_DIR_PERMISSIONS);
HdfsEnvironment hdfsEnvironmentWithSkip = new HdfsEnvironment(createTestHdfsConfiguration(), configWithSkip, new NoHdfsAuthentication());
Path skipPath = new Path(location + "/skipperms");
createDirectory(context, hdfsEnvironmentWithSkip, skipPath);
FileStatus skipFsStatus = hdfsEnvironmentWithSkip.getFileSystem(context, skipPath).getFileStatus(skipPath);
assertEquals(skipFsStatus.getPermission().toOctal(), 755);
} finally {
dropTable(tableName);
}
}
use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHive method testGetTableSchemaOfflinePartition.
@Test
public void testGetTableSchemaOfflinePartition() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableOfflinePartition);
ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(newSession(), tableHandle);
Map<String, ColumnMetadata> map = uniqueIndex(tableMetadata.getColumns(), ColumnMetadata::getName);
assertPrimitiveField(map, "t_string", createUnboundedVarcharType(), false);
}
}
use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHive method testGetTableSchemaPartitionFormat.
@Test
public void testGetTableSchemaPartitionFormat() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(newSession(), getTableHandle(metadata, tablePartitionFormat));
Map<String, ColumnMetadata> map = uniqueIndex(tableMetadata.getColumns(), ColumnMetadata::getName);
assertPrimitiveField(map, "t_string", createUnboundedVarcharType(), false);
assertPrimitiveField(map, "t_tinyint", TINYINT, false);
assertPrimitiveField(map, "t_smallint", SMALLINT, false);
assertPrimitiveField(map, "t_int", INTEGER, false);
assertPrimitiveField(map, "t_bigint", BIGINT, false);
assertPrimitiveField(map, "t_float", REAL, false);
assertPrimitiveField(map, "t_double", DOUBLE, false);
assertPrimitiveField(map, "t_boolean", BOOLEAN, false);
assertPrimitiveField(map, "ds", createUnboundedVarcharType(), true);
assertPrimitiveField(map, "file_format", createUnboundedVarcharType(), true);
assertPrimitiveField(map, "dummy", INTEGER, true);
}
}
Aggregations