use of io.prestosql.plugin.hive.PartitionStatistics in project hetu-core by openlookeng.
the class CarbondataMetadata method createTable.
@Override
public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, boolean ignoreExisting) {
SchemaTableName localSchemaTableName = tableMetadata.getTable();
String localSchemaName = localSchemaTableName.getSchemaName();
String tableName = localSchemaTableName.getTableName();
this.user = session.getUser();
this.schemaName = localSchemaName;
currentState = State.CREATE_TABLE;
List<String> partitionedBy = new ArrayList<String>();
List<SortingColumn> sortBy = new ArrayList<SortingColumn>();
List<HiveColumnHandle> columnHandles = new ArrayList<HiveColumnHandle>();
Map<String, String> tableProperties = new HashMap<String, String>();
getParametersForCreateTable(session, tableMetadata, partitionedBy, sortBy, columnHandles, tableProperties);
metastore.getDatabase(localSchemaName).orElseThrow(() -> new SchemaNotFoundException(localSchemaName));
BaseStorageFormat hiveStorageFormat = CarbondataTableProperties.getCarbondataStorageFormat(tableMetadata.getProperties());
// it will get final path to create carbon table
LocationHandle locationHandle = getCarbonDataTableCreationPath(session, tableMetadata, HiveWriteUtils.OpertionType.CREATE_TABLE);
Path targetPath = locationService.getQueryWriteInfo(locationHandle).getTargetPath();
AbsoluteTableIdentifier finalAbsoluteTableIdentifier = AbsoluteTableIdentifier.from(targetPath.toString(), new CarbonTableIdentifier(localSchemaName, tableName, UUID.randomUUID().toString()));
hdfsEnvironment.doAs(session.getUser(), () -> {
initialConfiguration = ConfigurationUtils.toJobConf(this.hdfsEnvironment.getConfiguration(new HdfsEnvironment.HdfsContext(session, localSchemaName, tableName), new Path(locationHandle.getJsonSerializableTargetPath())));
CarbondataMetadataUtils.createMetaDataFolderSchemaFile(hdfsEnvironment, session, columnHandles, finalAbsoluteTableIdentifier, partitionedBy, sortBy.stream().map(s -> s.getColumnName().toLowerCase(Locale.ENGLISH)).collect(toList()), targetPath.toString(), initialConfiguration);
this.tableStorageLocation = Optional.of(targetPath.toString());
try {
Map<String, String> serdeParameters = initSerDeProperties(tableName);
Table localTable = buildTableObject(session.getQueryId(), localSchemaName, tableName, session.getUser(), columnHandles, hiveStorageFormat, partitionedBy, Optional.empty(), tableProperties, targetPath, // carbon table is set as external table
true, prestoVersion, serdeParameters);
PrincipalPrivileges principalPrivileges = MetastoreUtil.buildInitialPrivilegeSet(localTable.getOwner());
HiveBasicStatistics basicStatistics = localTable.getPartitionColumns().isEmpty() ? HiveBasicStatistics.createZeroStatistics() : HiveBasicStatistics.createEmptyStatistics();
metastore.createTable(session, localTable, principalPrivileges, Optional.empty(), ignoreExisting, new PartitionStatistics(basicStatistics, ImmutableMap.of()));
} catch (RuntimeException ex) {
throw new PrestoException(GENERIC_INTERNAL_ERROR, format("Error: creating table: %s ", ex.getMessage()), ex);
}
});
}
use of io.prestosql.plugin.hive.PartitionStatistics in project hetu-core by openlookeng.
the class InMemoryThriftMetastore method getTableStatistics.
private PartitionStatistics getTableStatistics(HiveIdentity identity, String databaseName, String tableName) {
SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName);
PartitionStatistics statistics = columnStatistics.get(schemaTableName);
if (statistics == null) {
statistics = new PartitionStatistics(createEmptyStatistics(), ImmutableMap.of());
}
return statistics;
}
use of io.prestosql.plugin.hive.PartitionStatistics in project hetu-core by openlookeng.
the class TestSemiTransactionalHiveMetastore method updatePartitionsStatistics.
private void updatePartitionsStatistics() {
Map<String, Function<PartitionStatistics, PartitionStatistics>> partNamesUpdateMap = new HashMap<>();
List<PartitionStatistics> statistics = ImmutableList.of(STATISTICS_1, STATISTICS_1);
for (int index = 0; index < partitions.size(); index++) {
PartitionStatistics partitionStatistics = statistics.get(index);
partNamesUpdateMap.put(partitions.get(index), actualStatistics -> partitionStatistics);
}
thriftHiveMetastore.updatePartitionsStatistics(IDENTITY, MockThriftMetastoreClient.TEST_DATABASE, MockThriftMetastoreClient.TEST_TABLE_UP_NAME, partNamesUpdateMap);
}
use of io.prestosql.plugin.hive.PartitionStatistics in project hetu-core by openlookeng.
the class ThriftHiveMetastore method storePartitionColumnStatistics.
private void storePartitionColumnStatistics(HiveIdentity identity, String databaseName, String tableName, String partitionName, PartitionWithStatistics partitionWithStatistics) {
PartitionStatistics statistics = partitionWithStatistics.getStatistics();
Map<String, HiveColumnStatistics> columnStatistics = statistics.getColumnStatistics();
if (columnStatistics.isEmpty()) {
return;
}
Map<String, HiveType> columnTypes = partitionWithStatistics.getPartition().getColumns().stream().collect(toImmutableMap(Column::getName, Column::getType));
setPartitionColumnStatistics(identity, databaseName, tableName, partitionName, columnTypes, columnStatistics, statistics.getBasicStatistics().getRowCount());
}
use of io.prestosql.plugin.hive.PartitionStatistics in project hetu-core by openlookeng.
the class ThriftHiveMetastore method getTableStatistics.
@Override
public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) {
List<String> dataColumns = table.getSd().getCols().stream().map(FieldSchema::getName).collect(toImmutableList());
HiveBasicStatistics basicStatistics = ThriftMetastoreUtil.getHiveBasicStatistics(table.getParameters());
Map<String, HiveColumnStatistics> columnStatistics = getTableColumnStatistics(identity, table.getDbName(), table.getTableName(), dataColumns, basicStatistics.getRowCount());
return new PartitionStatistics(basicStatistics, columnStatistics);
}
Aggregations