use of org.apache.iceberg.TableOperations in project presto by prestodb.
the class IcebergUtil method getHiveIcebergTable.
public static Table getHiveIcebergTable(ExtendedHiveMetastore metastore, HdfsEnvironment hdfsEnvironment, ConnectorSession session, SchemaTableName table) {
HdfsContext hdfsContext = new HdfsContext(session, table.getSchemaName(), table.getTableName());
TableOperations operations = new HiveTableOperations(metastore, new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), Optional.empty(), false, HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER), hdfsEnvironment, hdfsContext, table.getSchemaName(), table.getTableName());
return new BaseTable(operations, quotedTableName(table));
}
use of org.apache.iceberg.TableOperations in project hive by apache.
the class TestInputFormatReaderDeletes method createTable.
@Override
protected Table createTable(String name, Schema schema, PartitionSpec spec) throws IOException {
Table table;
File location = temp.newFolder(inputFormat, fileFormat.name());
Assert.assertTrue(location.delete());
helper = new TestHelper(conf, tables, location.toString(), schema, spec, fileFormat, temp);
table = helper.createTable();
TableOperations ops = ((BaseTable) table).operations();
TableMetadata meta = ops.current();
ops.commit(meta, meta.upgradeToFormatVersion(2));
return table;
}
use of org.apache.iceberg.TableOperations in project hive by apache.
the class HiveCatalog method dropTable.
@Override
public boolean dropTable(TableIdentifier identifier, boolean purge) {
if (!isValidIdentifier(identifier)) {
return false;
}
String database = identifier.namespace().level(0);
TableOperations ops = newTableOps(identifier);
TableMetadata lastMetadata;
if (purge && ops.current() != null) {
lastMetadata = ops.current();
} else {
lastMetadata = null;
}
try {
clients.run(client -> {
client.dropTable(database, identifier.name(), false, /* do not delete data */
false);
return null;
});
if (purge && lastMetadata != null) {
CatalogUtil.dropTableData(ops.io(), lastMetadata);
}
LOG.info("Dropped table: {}", identifier);
return true;
} catch (NoSuchTableException | NoSuchObjectException e) {
LOG.info("Skipping drop, table does not exist: {}", identifier, e);
return false;
} catch (TException e) {
throw new RuntimeException("Failed to drop " + identifier, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to dropTable", e);
}
}
use of org.apache.iceberg.TableOperations in project presto by prestodb.
the class IcebergHiveMetadata method beginCreateTable.
@Override
public ConnectorOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorNewTableLayout> layout) {
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Schema schema = toIcebergSchema(tableMetadata.getColumns());
PartitionSpec partitionSpec = parsePartitionFields(schema, getPartitioning(tableMetadata.getProperties()));
MetastoreContext metastoreContext = new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), Optional.empty(), false, HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER);
Database database = metastore.getDatabase(metastoreContext, schemaName).orElseThrow(() -> new SchemaNotFoundException(schemaName));
HdfsContext hdfsContext = new HdfsContext(session, schemaName, tableName);
String targetPath = getTableLocation(tableMetadata.getProperties());
if (targetPath == null) {
Optional<String> location = database.getLocation();
if (!location.isPresent() || location.get().isEmpty()) {
throw new PrestoException(NOT_SUPPORTED, "Database " + schemaName + " location is not set");
}
Path databasePath = new Path(location.get());
Path resultPath = new Path(databasePath, tableName);
targetPath = resultPath.toString();
}
TableOperations operations = new HiveTableOperations(metastore, new MetastoreContext(session.getIdentity(), session.getQueryId(), session.getClientInfo(), session.getSource(), Optional.empty(), false, HiveColumnConverterProvider.DEFAULT_COLUMN_CONVERTER_PROVIDER), hdfsEnvironment, hdfsContext, schemaName, tableName, session.getUser(), targetPath);
if (operations.current() != null) {
throw new TableAlreadyExistsException(schemaTableName);
}
ImmutableMap.Builder<String, String> propertiesBuilder = ImmutableMap.builderWithExpectedSize(2);
FileFormat fileFormat = getFileFormat(tableMetadata.getProperties());
propertiesBuilder.put(DEFAULT_FILE_FORMAT, fileFormat.toString());
if (tableMetadata.getComment().isPresent()) {
propertiesBuilder.put(TABLE_COMMENT, tableMetadata.getComment().get());
}
TableMetadata metadata = newTableMetadata(schema, partitionSpec, targetPath, propertiesBuilder.build());
transaction = createTableTransaction(tableName, operations, metadata);
return new IcebergWritableTableHandle(schemaName, tableName, SchemaParser.toJson(metadata.schema()), PartitionSpecParser.toJson(metadata.spec()), getColumns(metadata.schema(), typeManager), targetPath, fileFormat, metadata.properties());
}
Aggregations