use of io.trino.plugin.hive.metastore.Column in project trino by trinodb.
the class AbstractTestHive method assertEmptyFile.
private void assertEmptyFile(HiveStorageFormat format) throws Exception {
SchemaTableName tableName = temporaryTable("empty_file");
try {
List<Column> columns = ImmutableList.of(new Column("test", HIVE_STRING, Optional.empty()));
createEmptyTable(tableName, format, columns, ImmutableList.of());
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
Table table = transaction.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(AssertionError::new);
// verify directory is empty
HdfsContext context = new HdfsContext(session);
Path location = new Path(table.getStorage().getLocation());
assertTrue(listDirectory(context, location).isEmpty());
// read table with empty directory
readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.of(0), Optional.of(ORC));
// create empty file
FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, location);
assertTrue(fileSystem.createNewFile(new Path(location, "empty-file")));
assertEquals(listDirectory(context, location), ImmutableList.of("empty-file"));
// read table with empty file
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.of(0), Optional.empty());
assertEquals(result.getRowCount(), 0);
}
} finally {
dropTable(tableName);
}
}
use of io.trino.plugin.hive.metastore.Column in project trino by trinodb.
the class AbstractTestHive method doInsertUnsupportedWriteType.
private void doInsertUnsupportedWriteType(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
List<Column> columns = ImmutableList.of(new Column("dummy", HiveType.valueOf("uniontype<smallint,tinyint>"), Optional.empty()));
List<Column> partitionColumns = ImmutableList.of(new Column("name", HIVE_STRING, Optional.empty()));
createEmptyTable(tableName, storageFormat, columns, partitionColumns);
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
metadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES);
fail("expected failure");
} catch (TrinoException e) {
assertThat(e).hasMessageMatching("Inserting into Hive table .* with column type uniontype<smallint,tinyint> not supported");
}
}
use of io.trino.plugin.hive.metastore.Column in project trino by trinodb.
the class AbstractTestHive method testNewDirectoryPermissions.
@Test
public void testNewDirectoryPermissions() throws Exception {
SchemaTableName tableName = temporaryTable("empty_file");
List<Column> columns = ImmutableList.of(new Column("test", HIVE_STRING, Optional.empty()));
createEmptyTable(tableName, ORC, columns, ImmutableList.of(), Optional.empty());
try {
Transaction transaction = newTransaction();
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
Table table = transaction.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow();
// create new directory and set directory permission after creation
HdfsContext context = new HdfsContext(session);
Path location = new Path(table.getStorage().getLocation());
Path defaultPath = new Path(location + "/defaultperms");
createDirectory(context, hdfsEnvironment, defaultPath);
FileStatus defaultFsStatus = hdfsEnvironment.getFileSystem(context, defaultPath).getFileStatus(defaultPath);
assertEquals(defaultFsStatus.getPermission().toOctal(), 777);
// use hdfs config that skips setting directory permissions after creation
HdfsConfig configWithSkip = new HdfsConfig();
configWithSkip.setNewDirectoryPermissions(HdfsConfig.SKIP_DIR_PERMISSIONS);
HdfsEnvironment hdfsEnvironmentWithSkip = new HdfsEnvironment(createTestHdfsConfiguration(), configWithSkip, new NoHdfsAuthentication());
Path skipPath = new Path(location + "/skipperms");
createDirectory(context, hdfsEnvironmentWithSkip, skipPath);
FileStatus skipFsStatus = hdfsEnvironmentWithSkip.getFileSystem(context, skipPath).getFileStatus(skipPath);
assertEquals(skipFsStatus.getPermission().toOctal(), 755);
} finally {
dropTable(tableName);
}
}
use of io.trino.plugin.hive.metastore.Column in project trino by trinodb.
the class TestBackgroundHiveSplitLoader method table.
private static Table table(String location, List<Column> partitionColumns, Optional<HiveBucketProperty> bucketProperty, Map<String, String> tableParameters, StorageFormat storageFormat) {
Table.Builder tableBuilder = Table.builder();
tableBuilder.getStorageBuilder().setStorageFormat(storageFormat).setLocation(location).setSkewed(false).setBucketProperty(bucketProperty);
return tableBuilder.setDatabaseName("test_dbname").setOwner(Optional.of("testOwner")).setTableName("test_table").setTableType(TableType.MANAGED_TABLE.toString()).setDataColumns(ImmutableList.of(new Column("col1", HIVE_STRING, Optional.empty()))).setParameters(tableParameters).setPartitionColumns(partitionColumns).build();
}
use of io.trino.plugin.hive.metastore.Column in project trino by trinodb.
the class TestProtoUtils method testTable.
@Test
public void testTable() {
alluxio.grpc.table.TableInfo.Builder table = TestingAlluxioMetastoreObjects.getTestingTableInfo();
alluxio.grpc.table.FieldSchema fieldSchema = TestingAlluxioMetastoreObjects.getTestingFieldSchema().build();
Table t = ProtoUtils.fromProto(table.build());
Column c = t.getColumn(TestingAlluxioMetastoreObjects.COLUMN_NAME).get();
assertEquals(table.getDbName(), t.getDatabaseName());
assertEquals(table.getTableName(), t.getTableName());
assertEquals(table.getOwner(), t.getOwner().orElse(null));
assertEquals(table.getType().toString(), t.getTableType());
assertEquals(0, t.getDataColumns().size());
assertEquals(1, t.getPartitionColumns().size());
assertEquals(table.getParametersMap(), t.getParameters());
assertEquals(Optional.empty(), t.getViewOriginalText());
assertEquals(Optional.empty(), t.getViewExpandedText());
assertEquals(fieldSchema.getName(), c.getName());
assertEquals(fieldSchema.getComment(), c.getComment().get());
assertEquals(fieldSchema.getType(), c.getType().toString());
Storage s = t.getStorage();
alluxio.grpc.table.layout.hive.Storage storage = TestingAlluxioMetastoreObjects.getTestingPartitionInfo().getStorage();
assertEquals(storage.getSkewed(), s.isSkewed());
assertEquals(ProtoUtils.fromProto(storage.getStorageFormat()), s.getStorageFormat());
assertEquals(storage.getLocation(), s.getLocation());
assertEquals(ProtoUtils.fromProto(table.getParametersMap(), storage.getBucketProperty()), s.getBucketProperty());
assertEquals(storage.getStorageFormat().getSerdelibParametersMap(), s.getSerdeParameters());
}
Aggregations