use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class HiveTableTest method testListTables.
@Test
public void testListTables() throws TException, IOException {
List<TableIdentifier> tableIdents = catalog.listTables(TABLE_IDENTIFIER.namespace());
List<TableIdentifier> expectedIdents = tableIdents.stream().filter(t -> t.namespace().level(0).equals(DB_NAME) && t.name().equals(TABLE_NAME)).collect(Collectors.toList());
Assert.assertEquals(1, expectedIdents.size());
Assert.assertTrue(catalog.tableExists(TABLE_IDENTIFIER));
// create a hive table
String hiveTableName = "test_hive_table";
org.apache.hadoop.hive.metastore.api.Table hiveTable = createHiveTable(hiveTableName);
metastoreClient.createTable(hiveTable);
List<TableIdentifier> tableIdents1 = catalog.listTables(TABLE_IDENTIFIER.namespace());
Assert.assertEquals("should only 1 iceberg table .", 1, tableIdents1.size());
Assert.assertTrue(catalog.tableExists(TABLE_IDENTIFIER));
metastoreClient.dropTable(DB_NAME, hiveTableName);
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class HiveTableTest method testNonDefaultDatabaseLocation.
@Test
public void testNonDefaultDatabaseLocation() throws IOException, TException {
Namespace namespace = Namespace.of(NON_DEFAULT_DATABASE);
// Create a new location and a non-default database / namespace for it
File nonDefaultLocation = createTempDirectory(NON_DEFAULT_DATABASE, asFileAttribute(fromString("rwxrwxrwx"))).toFile();
catalog.createNamespace(namespace, Collections.singletonMap("location", nonDefaultLocation.getPath()));
Map<String, String> namespaceMeta = catalog.loadNamespaceMetadata(namespace);
// Make sure that we are testing a namespace with a non default location :)
Assert.assertEquals(namespaceMeta.get("location"), "file:" + nonDefaultLocation.getPath());
TableIdentifier tableIdentifier = TableIdentifier.of(namespace, TABLE_NAME);
catalog.createTable(tableIdentifier, schema);
// Let's check the location loaded through the catalog
Table table = catalog.loadTable(tableIdentifier);
Assert.assertEquals(namespaceMeta.get("location") + "/" + TABLE_NAME, table.location());
// Drop the database and purge the files
metastoreClient.dropDatabase(NON_DEFAULT_DATABASE, true, true, true);
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveCatalog method testDropNamespace.
@Test
public void testDropNamespace() throws TException {
Namespace namespace = Namespace.of("dbname_drop");
TableIdentifier identifier = TableIdentifier.of(namespace, "table");
Schema schema = new Schema(Types.StructType.of(required(1, "id", Types.LongType.get())).fields());
catalog.createNamespace(namespace, meta);
catalog.createTable(identifier, schema);
Map<String, String> nameMata = catalog.loadNamespaceMetadata(namespace);
Assert.assertTrue(nameMata.get("owner").equals("apache"));
Assert.assertTrue(nameMata.get("group").equals("iceberg"));
AssertHelpers.assertThrows("Should fail to drop namespace is not empty" + namespace, NamespaceNotEmptyException.class, "Namespace dbname_drop is not empty. One or more tables exist.", () -> {
catalog.dropNamespace(namespace);
});
Assert.assertTrue(catalog.dropTable(identifier, true));
Assert.assertTrue("Should fail to drop namespace if it is not empty", catalog.dropNamespace(namespace));
Assert.assertFalse("Should fail to drop when namespace doesn't exist", catalog.dropNamespace(Namespace.of("db.ns1")));
AssertHelpers.assertThrows("Should fail to drop namespace exist" + namespace, NoSuchNamespaceException.class, "Namespace does not exist: ", () -> {
catalog.loadNamespaceMetadata(namespace);
});
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveCatalog method testCreateTableDefaultSortOrder.
@Test
public void testCreateTableDefaultSortOrder() {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()));
PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build();
TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
try {
Table table = catalog.createTable(tableIdent, schema, spec);
Assert.assertEquals("Order ID must match", 0, table.sortOrder().orderId());
Assert.assertTrue("Order must unsorted", table.sortOrder().isUnsorted());
} finally {
catalog.dropTable(tableIdent);
}
}
use of org.apache.iceberg.catalog.TableIdentifier in project hive by apache.
the class TestHiveCatalog method testCreateTableCustomSortOrder.
@Test
public void testCreateTableCustomSortOrder() {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get(), "unique ID"), required(2, "data", Types.StringType.get()));
PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build();
SortOrder order = SortOrder.builderFor(schema).asc("id", NULLS_FIRST).build();
TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
try {
Table table = catalog.buildTable(tableIdent, schema).withPartitionSpec(spec).withSortOrder(order).create();
SortOrder sortOrder = table.sortOrder();
Assert.assertEquals("Order ID must match", 1, sortOrder.orderId());
Assert.assertEquals("Order must have 1 field", 1, sortOrder.fields().size());
Assert.assertEquals("Direction must match ", ASC, sortOrder.fields().get(0).direction());
Assert.assertEquals("Null order must match ", NULLS_FIRST, sortOrder.fields().get(0).nullOrder());
Transform<?, ?> transform = Transforms.identity(Types.IntegerType.get());
Assert.assertEquals("Transform must match", transform, sortOrder.fields().get(0).transform());
} finally {
catalog.dropTable(tableIdent);
}
}
Aggregations