use of io.prestosql.plugin.hive.metastore.Database in project boostkit-bigdata by kunpengcompute.
the class HiveMetadata method createSchema.
@Override
public void createSchema(ConnectorSession session, String schemaName, Map<String, Object> properties) {
Optional<String> location = HiveSchemaProperties.getLocation(properties).map(locationUri -> {
try {
hdfsEnvironment.getFileSystem(new HdfsContext(session, schemaName), new Path(locationUri));
} catch (IOException e) {
throw new PrestoException(INVALID_SCHEMA_PROPERTY, "Invalid location URI: " + locationUri, e);
}
return locationUri;
});
Database database = Database.builder().setDatabaseName(schemaName).setLocation(location).setOwnerType(USER).setOwnerName(session.getUser()).build();
metastore.createDatabase(new HiveIdentity(session), database);
}
use of io.prestosql.plugin.hive.metastore.Database in project hetu-core by openlookeng.
the class CarbondataMetadata method beginCreateTable.
@Override
public CarbondataOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorNewTableLayout> layout) {
// get the root directory for the database
SchemaTableName finalSchemaTableName = tableMetadata.getTable();
String finalSchemaName = finalSchemaTableName.getSchemaName();
String finalTableName = finalSchemaTableName.getTableName();
this.user = session.getUser();
this.schemaName = finalSchemaName;
currentState = State.CREATE_TABLE_AS;
List<String> partitionedBy = new ArrayList<String>();
List<SortingColumn> sortBy = new ArrayList<SortingColumn>();
List<HiveColumnHandle> columnHandles = new ArrayList<HiveColumnHandle>();
Map<String, String> tableProperties = new HashMap<String, String>();
getParametersForCreateTable(session, tableMetadata, partitionedBy, sortBy, columnHandles, tableProperties);
metastore.getDatabase(finalSchemaName).orElseThrow(() -> new SchemaNotFoundException(finalSchemaName));
// to avoid type mismatch between HiveStorageFormat & Carbondata StorageFormat this hack no option
HiveStorageFormat tableStorageFormat = HiveStorageFormat.valueOf("CARBON");
HiveStorageFormat partitionStorageFormat = tableStorageFormat;
Map<String, HiveColumnHandle> columnHandlesByName = Maps.uniqueIndex(columnHandles, HiveColumnHandle::getName);
List<Column> partitionColumns = partitionedBy.stream().map(columnHandlesByName::get).map(column -> new Column(column.getName(), column.getHiveType(), column.getComment())).collect(toList());
checkPartitionTypesSupported(partitionColumns);
// it will get final path to create carbon table
LocationHandle locationHandle = getCarbonDataTableCreationPath(session, tableMetadata, HiveWriteUtils.OpertionType.CREATE_TABLE_AS);
Path targetPath = locationService.getTableWriteInfo(locationHandle, false).getTargetPath();
AbsoluteTableIdentifier finalAbsoluteTableIdentifier = AbsoluteTableIdentifier.from(targetPath.toString(), new CarbonTableIdentifier(finalSchemaName, finalTableName, UUID.randomUUID().toString()));
hdfsEnvironment.doAs(session.getUser(), () -> {
initialConfiguration = ConfigurationUtils.toJobConf(this.hdfsEnvironment.getConfiguration(new HdfsEnvironment.HdfsContext(session, finalSchemaName, finalTableName), new Path(locationHandle.getJsonSerializableTargetPath())));
// Create Carbondata metadata folder and Schema file
CarbondataMetadataUtils.createMetaDataFolderSchemaFile(hdfsEnvironment, session, columnHandles, finalAbsoluteTableIdentifier, partitionedBy, sortBy.stream().map(s -> s.getColumnName().toLowerCase(Locale.ENGLISH)).collect(toList()), targetPath.toString(), initialConfiguration);
this.tableStorageLocation = Optional.of(targetPath.toString());
Path outputPath = new Path(locationHandle.getJsonSerializableTargetPath());
Properties schema = readSchemaForCarbon(finalSchemaName, finalTableName, targetPath, columnHandles, partitionColumns);
// Create committer object
setupCommitWriter(schema, outputPath, initialConfiguration, false);
});
try {
CarbondataOutputTableHandle result = new CarbondataOutputTableHandle(finalSchemaName, finalTableName, columnHandles, metastore.generatePageSinkMetadata(new HiveIdentity(session), finalSchemaTableName), locationHandle, tableStorageFormat, partitionStorageFormat, partitionedBy, Optional.empty(), session.getUser(), tableProperties, ImmutableMap.<String, String>of(EncodedLoadModel, jobContext.getConfiguration().get(LOAD_MODEL)));
LocationService.WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), finalSchemaTableName);
return result;
} catch (RuntimeException ex) {
throw new PrestoException(GENERIC_INTERNAL_ERROR, format("Error: creating table: %s ", ex.getMessage()), ex);
}
}
use of io.prestosql.plugin.hive.metastore.Database in project hetu-core by openlookeng.
the class FileHiveMetastore method getAllTables.
@Override
public synchronized Optional<List<String>> getAllTables(String databaseName) {
requireNonNull(databaseName, "databaseName is null");
Optional<Database> database = getDatabase(databaseName);
if (!database.isPresent()) {
return Optional.empty();
}
Path databaseMetadataDirectory = getDatabaseMetadataDirectory(databaseName);
List<String> tables = getChildSchemaDirectories(databaseMetadataDirectory).stream().map(Path::getName).collect(toList());
return Optional.of(ImmutableList.copyOf(tables));
}
use of io.prestosql.plugin.hive.metastore.Database in project hetu-core by openlookeng.
the class HiveMetadata method beginCreateTable.
@Override
public HiveOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorNewTableLayout> layout) {
if (getExternalLocation(tableMetadata.getProperties()) != null || isExternalTable(tableMetadata.getProperties())) {
throw new PrestoException(NOT_SUPPORTED, "External tables cannot be created using CREATE TABLE AS");
}
if (HiveTableProperties.getAvroSchemaUrl(tableMetadata.getProperties()) != null) {
throw new PrestoException(NOT_SUPPORTED, "CREATE TABLE AS not supported when Avro schema url is set");
}
HiveStorageFormat tableStorageFormat = HiveTableProperties.getHiveStorageFormat(tableMetadata.getProperties());
List<String> partitionedBy = getPartitionedBy(tableMetadata.getProperties());
Optional<HiveBucketProperty> bucketProperty = HiveTableProperties.getBucketProperty(tableMetadata.getProperties());
// get the root directory for the database
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Map<String, String> tableProperties = getEmptyTableProperties(tableMetadata, bucketProperty, new HdfsContext(session, schemaName, tableName));
List<HiveColumnHandle> columnHandles = getColumnHandles(tableMetadata, ImmutableSet.copyOf(partitionedBy), typeTranslator);
HiveStorageFormat partitionStorageFormat = HiveSessionProperties.isRespectTableFormat(session) ? tableStorageFormat : HiveSessionProperties.getHiveStorageFormat(session);
// unpartitioned tables ignore the partition storage format
HiveStorageFormat actualStorageFormat = partitionedBy.isEmpty() ? tableStorageFormat : partitionStorageFormat;
actualStorageFormat.validateColumns(columnHandles);
Map<String, HiveColumnHandle> columnHandlesByName = Maps.uniqueIndex(columnHandles, HiveColumnHandle::getName);
List<Column> partitionColumns = partitionedBy.stream().map(columnHandlesByName::get).map(column -> new Column(column.getName(), column.getHiveType(), column.getComment())).collect(toList());
checkPartitionTypesSupported(partitionColumns);
Optional<String> location = getLocation(tableMetadata.getProperties());
if (location.isPresent() && !tableCreatesWithLocationAllowed) {
throw new PrestoException(NOT_SUPPORTED, format("Setting %s property is not allowed", LOCATION_PROPERTY));
}
Optional<WriteIdInfo> writeIdInfo = Optional.empty();
if (AcidUtils.isTransactionalTable(tableProperties)) {
// Create the HiveTableHandle for just to obtain writeIds.
List<HiveColumnHandle> partitionColumnHandles = partitionedBy.stream().map(columnHandlesByName::get).collect(toList());
HiveTableHandle tableHandle = new HiveTableHandle(schemaName, tableName, tableProperties, partitionColumnHandles, Optional.empty());
Optional<Long> writeId = metastore.getTableWriteId(session, tableHandle, HiveACIDWriteType.INSERT);
if (!writeId.isPresent()) {
throw new IllegalStateException("No validWriteIds present");
}
writeIdInfo = Optional.of(new WriteIdInfo(writeId.get(), writeId.get(), 0));
}
LocationHandle locationHandle;
if (location.isPresent()) {
Path path = getPath(new HdfsContext(session, schemaName, tableName), location.get(), false);
locationHandle = locationService.forNewTable(metastore, session, schemaName, tableName, writeIdInfo, Optional.of(path), HiveWriteUtils.OpertionType.CREATE_TABLE_AS);
} else {
locationHandle = locationService.forNewTable(metastore, session, schemaName, tableName, writeIdInfo, Optional.empty(), HiveWriteUtils.OpertionType.CREATE_TABLE_AS);
}
HiveOutputTableHandle result = new HiveOutputTableHandle(schemaName, tableName, columnHandles, metastore.generatePageSinkMetadata(new HiveIdentity(session), schemaTableName), locationHandle, tableStorageFormat, partitionStorageFormat, partitionedBy, bucketProperty, session.getUser(), tableProperties);
LocationService.WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), schemaTableName);
return result;
}
use of io.prestosql.plugin.hive.metastore.Database in project hetu-core by openlookeng.
the class HiveMetadata method createSchema.
@Override
public void createSchema(ConnectorSession session, String schemaName, Map<String, Object> properties) {
Optional<String> location = HiveSchemaProperties.getLocation(properties).map(locationUri -> {
try {
hdfsEnvironment.getFileSystem(new HdfsContext(session, schemaName), new Path(locationUri));
} catch (IOException e) {
throw new PrestoException(INVALID_SCHEMA_PROPERTY, "Invalid location URI: " + locationUri, e);
}
return locationUri;
});
Database database = Database.builder().setDatabaseName(schemaName).setLocation(location).setOwnerType(USER).setOwnerName(session.getUser()).build();
metastore.createDatabase(new HiveIdentity(session), database);
}
Aggregations