use of io.trino.tempto.fulfillment.table.hive.HiveDataSource in project trino by trinodb.
the class TestSyncPartitionMetadata method testConflictingMixedCasePartitionNames.
@Test(groups = { HIVE_PARTITIONING, SMOKE })
@Flaky(issue = ERROR_COMMITTING_WRITE_TO_HIVE_ISSUE, match = ERROR_COMMITTING_WRITE_TO_HIVE_MATCH)
public void testConflictingMixedCasePartitionNames() {
String tableName = "test_sync_partition_mixed_case";
prepare(hdfsClient, hdfsDataSourceWriter, tableName);
HiveDataSource dataSource = createResourceDataSource(tableName, "io/trino/tests/product/hive/data/single_int_column/data.orc");
// this conflicts with a partition that already exits in the metastore
hdfsDataSourceWriter.ensureDataOnHdfs(tableLocation(tableName) + "/COL_X=a/cOl_y=1", dataSource);
assertThatThrownBy(() -> onTrino().executeQuery("CALL system.sync_partition_metadata('default', '" + tableName + "', 'ADD', false)")).hasMessageContaining(format("One or more partitions already exist for table 'default.%s'", tableName));
assertPartitions(tableName, row("a", "1"), row("b", "2"));
}
use of io.trino.tempto.fulfillment.table.hive.HiveDataSource in project trino by trinodb.
the class TestHivePartitionProcedures method createDanglingLocationWithData.
private void createDanglingLocationWithData(String path, String tableName) {
hdfsClient.createDirectory(path);
HiveDataSource dataSource = createResourceDataSource(tableName, "io/trino/tests/product/hive/data/single_int_column/data.textfile");
hdfsDataSourceWriter.ensureDataOnHdfs(path, dataSource);
}
use of io.trino.tempto.fulfillment.table.hive.HiveDataSource in project trino by trinodb.
the class TestHivePartitionsTable method partitionedTableDefinition.
private static TableDefinition partitionedTableDefinition() {
String createTableDdl = "CREATE EXTERNAL TABLE %NAME%(col INT) " + "PARTITIONED BY (part_col INT) " + "STORED AS ORC";
HiveDataSource dataSource = createResourceDataSource(PARTITIONED_TABLE, "io/trino/tests/product/hive/data/single_int_column/data.orc");
HiveDataSource invalidData = createStringDataSource(PARTITIONED_TABLE, "INVALID DATA");
return HiveTableDefinition.builder(PARTITIONED_TABLE).setCreateTableDDLTemplate(createTableDdl).addPartition("part_col = 1", invalidData).addPartition("part_col = 2", dataSource).build();
}
use of io.trino.tempto.fulfillment.table.hive.HiveDataSource in project trino by trinodb.
the class TestSyncPartitionMetadata method prepare.
private void prepare(HdfsClient hdfsClient, HdfsDataSourceWriter hdfsDataSourceWriter, String tableName) {
onTrino().executeQuery("DROP TABLE IF EXISTS " + tableName);
onTrino().executeQuery("CREATE TABLE " + tableName + " (payload bigint, col_x varchar, col_y varchar) WITH (format = 'ORC', partitioned_by = ARRAY[ 'col_x', 'col_y' ])");
onTrino().executeQuery("INSERT INTO " + tableName + " VALUES (1, 'a', '1'), (2, 'b', '2')");
String tableLocation = tableLocation(tableName);
// remove partition col_x=b/col_y=2
hdfsClient.delete(tableLocation + "/col_x=b/col_y=2");
// add partition directory col_x=f/col_y=9 with single_int_column/data.orc file
hdfsClient.createDirectory(tableLocation + "/col_x=f/col_y=9");
HiveDataSource dataSource = createResourceDataSource(tableName, "io/trino/tests/product/hive/data/single_int_column/data.orc");
hdfsDataSourceWriter.ensureDataOnHdfs(tableLocation + "/col_x=f/col_y=9", dataSource);
// should only be picked up when not in case sensitive mode
hdfsClient.createDirectory(tableLocation + "/COL_X=g/col_y=10");
hdfsDataSourceWriter.ensureDataOnHdfs(tableLocation + "/COL_X=g/col_y=10", dataSource);
// add invalid partition path
hdfsClient.createDirectory(tableLocation + "/col_x=d");
hdfsClient.createDirectory(tableLocation + "/col_y=3/col_x=h");
hdfsClient.createDirectory(tableLocation + "/col_y=3");
hdfsClient.createDirectory(tableLocation + "/xyz");
assertPartitions(tableName, row("a", "1"), row("b", "2"));
}
use of io.trino.tempto.fulfillment.table.hive.HiveDataSource in project trino by trinodb.
the class TestSyncPartitionMetadata method testMixedCasePartitionNames.
@Test(groups = { HIVE_PARTITIONING, SMOKE })
@Flaky(issue = ERROR_COMMITTING_WRITE_TO_HIVE_ISSUE, match = ERROR_COMMITTING_WRITE_TO_HIVE_MATCH)
public void testMixedCasePartitionNames() {
String tableName = "test_sync_partition_mixed_case";
prepare(hdfsClient, hdfsDataSourceWriter, tableName);
String tableLocation = tableLocation(tableName);
HiveDataSource dataSource = createResourceDataSource(tableName, "io/trino/tests/product/hive/data/single_int_column/data.orc");
hdfsDataSourceWriter.ensureDataOnHdfs(tableLocation + "/col_x=h/col_Y=11", dataSource);
hdfsClient.createDirectory(tableLocation + "/COL_X=UPPER/COL_Y=12");
hdfsDataSourceWriter.ensureDataOnHdfs(tableLocation + "/COL_X=UPPER/COL_Y=12", dataSource);
onTrino().executeQuery("CALL system.sync_partition_metadata('default', '" + tableName + "', 'FULL', false)");
assertPartitions(tableName, row("UPPER", "12"), row("a", "1"), row("f", "9"), row("g", "10"), row("h", "11"));
assertData(tableName, row(1, "a", "1"), row(42, "UPPER", "12"), row(42, "f", "9"), row(42, "g", "10"), row(42, "h", "11"));
}
Aggregations