use of io.prestosql.plugin.hive.LocationHandle in project hetu-core by openlookeng.
the class CarbondataMetadata method getCarbonDataTableCreationPath.
private LocationHandle getCarbonDataTableCreationPath(ConnectorSession session, ConnectorTableMetadata tableMetadata, HiveWriteUtils.OpertionType opertionType) throws PrestoException {
Path targetPath = null;
SchemaTableName finalSchemaTableName = tableMetadata.getTable();
String finalSchemaName = finalSchemaTableName.getSchemaName();
String tableName = finalSchemaTableName.getTableName();
Optional<String> location = getCarbondataLocation(tableMetadata.getProperties());
LocationHandle locationHandle;
FileSystem fileSystem;
String targetLocation = null;
try {
// User specifies the location property
if (location.isPresent()) {
if (!tableCreatesWithLocationAllowed) {
throw new PrestoException(NOT_SUPPORTED, format("Setting %s property is not allowed", LOCATION_PROPERTY));
}
/* if path not having prefix with filesystem type, than we will take fileSystem type from core-site.xml using below methods */
fileSystem = hdfsEnvironment.getFileSystem(new HdfsEnvironment.HdfsContext(session, finalSchemaName), new Path(location.get()));
targetLocation = fileSystem.getFileStatus(new Path(location.get())).getPath().toString();
targetPath = getPath(new HdfsEnvironment.HdfsContext(session, finalSchemaName, tableName), targetLocation, false);
} else {
updateEmptyCarbondataTableStorePath(session, finalSchemaName);
targetLocation = carbondataTableStore;
targetLocation = targetLocation + File.separator + finalSchemaName + File.separator + tableName;
targetPath = new Path(targetLocation);
}
} catch (IllegalArgumentException | IOException e) {
throw new PrestoException(NOT_SUPPORTED, format("Error %s store path %s ", e.getMessage(), targetLocation));
}
locationHandle = locationService.forNewTable(metastore, session, finalSchemaName, tableName, Optional.empty(), Optional.of(targetPath), opertionType);
return locationHandle;
}
use of io.prestosql.plugin.hive.LocationHandle in project hetu-core by openlookeng.
the class CarbondataMetadata method createTable.
@Override
public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, boolean ignoreExisting) {
SchemaTableName localSchemaTableName = tableMetadata.getTable();
String localSchemaName = localSchemaTableName.getSchemaName();
String tableName = localSchemaTableName.getTableName();
this.user = session.getUser();
this.schemaName = localSchemaName;
currentState = State.CREATE_TABLE;
List<String> partitionedBy = new ArrayList<String>();
List<SortingColumn> sortBy = new ArrayList<SortingColumn>();
List<HiveColumnHandle> columnHandles = new ArrayList<HiveColumnHandle>();
Map<String, String> tableProperties = new HashMap<String, String>();
getParametersForCreateTable(session, tableMetadata, partitionedBy, sortBy, columnHandles, tableProperties);
metastore.getDatabase(localSchemaName).orElseThrow(() -> new SchemaNotFoundException(localSchemaName));
BaseStorageFormat hiveStorageFormat = CarbondataTableProperties.getCarbondataStorageFormat(tableMetadata.getProperties());
// it will get final path to create carbon table
LocationHandle locationHandle = getCarbonDataTableCreationPath(session, tableMetadata, HiveWriteUtils.OpertionType.CREATE_TABLE);
Path targetPath = locationService.getQueryWriteInfo(locationHandle).getTargetPath();
AbsoluteTableIdentifier finalAbsoluteTableIdentifier = AbsoluteTableIdentifier.from(targetPath.toString(), new CarbonTableIdentifier(localSchemaName, tableName, UUID.randomUUID().toString()));
hdfsEnvironment.doAs(session.getUser(), () -> {
initialConfiguration = ConfigurationUtils.toJobConf(this.hdfsEnvironment.getConfiguration(new HdfsEnvironment.HdfsContext(session, localSchemaName, tableName), new Path(locationHandle.getJsonSerializableTargetPath())));
CarbondataMetadataUtils.createMetaDataFolderSchemaFile(hdfsEnvironment, session, columnHandles, finalAbsoluteTableIdentifier, partitionedBy, sortBy.stream().map(s -> s.getColumnName().toLowerCase(Locale.ENGLISH)).collect(toList()), targetPath.toString(), initialConfiguration);
this.tableStorageLocation = Optional.of(targetPath.toString());
try {
Map<String, String> serdeParameters = initSerDeProperties(tableName);
Table localTable = buildTableObject(session.getQueryId(), localSchemaName, tableName, session.getUser(), columnHandles, hiveStorageFormat, partitionedBy, Optional.empty(), tableProperties, targetPath, // carbon table is set as external table
true, prestoVersion, serdeParameters);
PrincipalPrivileges principalPrivileges = MetastoreUtil.buildInitialPrivilegeSet(localTable.getOwner());
HiveBasicStatistics basicStatistics = localTable.getPartitionColumns().isEmpty() ? HiveBasicStatistics.createZeroStatistics() : HiveBasicStatistics.createEmptyStatistics();
metastore.createTable(session, localTable, principalPrivileges, Optional.empty(), ignoreExisting, new PartitionStatistics(basicStatistics, ImmutableMap.of()));
} catch (RuntimeException ex) {
throw new PrestoException(GENERIC_INTERNAL_ERROR, format("Error: creating table: %s ", ex.getMessage()), ex);
}
});
}
use of io.prestosql.plugin.hive.LocationHandle in project carbondata by apache.
the class CarbonDataLocationService method forExistingTable.
@Override
public LocationHandle forExistingTable(SemiTransactionalHiveMetastore metastore, ConnectorSession session, Table table) {
// TODO: test in cloud scenario in S3/OBS and make it compatible for cloud scenario
super.forExistingTable(metastore, session, table);
Path targetPath = new Path(table.getStorage().getLocation());
return new LocationHandle(targetPath, targetPath, true, LocationHandle.WriteMode.DIRECT_TO_TARGET_EXISTING_DIRECTORY);
}
use of io.prestosql.plugin.hive.LocationHandle in project carbondata by apache.
the class CarbonDataLocationService method forNewTable.
@Override
public LocationHandle forNewTable(SemiTransactionalHiveMetastore metastore, ConnectorSession session, String schemaName, String tableName) {
// TODO: test in cloud scenario in S3/OBS and make it compatible for cloud scenario
super.forNewTable(metastore, session, schemaName, tableName);
HdfsEnvironment.HdfsContext context = new HdfsEnvironment.HdfsContext(session, schemaName, tableName);
Path targetPath = HiveWriteUtils.getTableDefaultLocation(context, metastore, this.hdfsEnvironment, schemaName, tableName);
return new LocationHandle(targetPath, targetPath, false, LocationHandle.WriteMode.DIRECT_TO_TARGET_NEW_DIRECTORY);
}
use of io.prestosql.plugin.hive.LocationHandle in project hetu-core by openlookeng.
the class CarbondataMetadata method beginCreateTable.
@Override
public CarbondataOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional<ConnectorNewTableLayout> layout) {
// get the root directory for the database
SchemaTableName finalSchemaTableName = tableMetadata.getTable();
String finalSchemaName = finalSchemaTableName.getSchemaName();
String finalTableName = finalSchemaTableName.getTableName();
this.user = session.getUser();
this.schemaName = finalSchemaName;
currentState = State.CREATE_TABLE_AS;
List<String> partitionedBy = new ArrayList<String>();
List<SortingColumn> sortBy = new ArrayList<SortingColumn>();
List<HiveColumnHandle> columnHandles = new ArrayList<HiveColumnHandle>();
Map<String, String> tableProperties = new HashMap<String, String>();
getParametersForCreateTable(session, tableMetadata, partitionedBy, sortBy, columnHandles, tableProperties);
metastore.getDatabase(finalSchemaName).orElseThrow(() -> new SchemaNotFoundException(finalSchemaName));
// to avoid type mismatch between HiveStorageFormat & Carbondata StorageFormat this hack no option
HiveStorageFormat tableStorageFormat = HiveStorageFormat.valueOf("CARBON");
HiveStorageFormat partitionStorageFormat = tableStorageFormat;
Map<String, HiveColumnHandle> columnHandlesByName = Maps.uniqueIndex(columnHandles, HiveColumnHandle::getName);
List<Column> partitionColumns = partitionedBy.stream().map(columnHandlesByName::get).map(column -> new Column(column.getName(), column.getHiveType(), column.getComment())).collect(toList());
checkPartitionTypesSupported(partitionColumns);
// it will get final path to create carbon table
LocationHandle locationHandle = getCarbonDataTableCreationPath(session, tableMetadata, HiveWriteUtils.OpertionType.CREATE_TABLE_AS);
Path targetPath = locationService.getTableWriteInfo(locationHandle, false).getTargetPath();
AbsoluteTableIdentifier finalAbsoluteTableIdentifier = AbsoluteTableIdentifier.from(targetPath.toString(), new CarbonTableIdentifier(finalSchemaName, finalTableName, UUID.randomUUID().toString()));
hdfsEnvironment.doAs(session.getUser(), () -> {
initialConfiguration = ConfigurationUtils.toJobConf(this.hdfsEnvironment.getConfiguration(new HdfsEnvironment.HdfsContext(session, finalSchemaName, finalTableName), new Path(locationHandle.getJsonSerializableTargetPath())));
// Create Carbondata metadata folder and Schema file
CarbondataMetadataUtils.createMetaDataFolderSchemaFile(hdfsEnvironment, session, columnHandles, finalAbsoluteTableIdentifier, partitionedBy, sortBy.stream().map(s -> s.getColumnName().toLowerCase(Locale.ENGLISH)).collect(toList()), targetPath.toString(), initialConfiguration);
this.tableStorageLocation = Optional.of(targetPath.toString());
Path outputPath = new Path(locationHandle.getJsonSerializableTargetPath());
Properties schema = readSchemaForCarbon(finalSchemaName, finalTableName, targetPath, columnHandles, partitionColumns);
// Create committer object
setupCommitWriter(schema, outputPath, initialConfiguration, false);
});
try {
CarbondataOutputTableHandle result = new CarbondataOutputTableHandle(finalSchemaName, finalTableName, columnHandles, metastore.generatePageSinkMetadata(new HiveIdentity(session), finalSchemaTableName), locationHandle, tableStorageFormat, partitionStorageFormat, partitionedBy, Optional.empty(), session.getUser(), tableProperties, ImmutableMap.<String, String>of(EncodedLoadModel, jobContext.getConfiguration().get(LOAD_MODEL)));
LocationService.WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), finalSchemaTableName);
return result;
} catch (RuntimeException ex) {
throw new PrestoException(GENERIC_INTERNAL_ERROR, format("Error: creating table: %s ", ex.getMessage()), ex);
}
}
Aggregations