use of io.prestosql.spi.StandardErrorCode.GENERIC_USER_ERROR in project boostkit-bigdata by kunpengcompute.
the class HiveMetadata method beginInsertUpdateInternal.
private HiveInsertTableHandle beginInsertUpdateInternal(ConnectorSession session, ConnectorTableHandle tableHandle, Optional<String> partition, HiveACIDWriteType writeType) {
HiveIdentity identity = new HiveIdentity(session);
SchemaTableName tableName = ((HiveTableHandle) tableHandle).getSchemaTableName();
Table table = metastore.getTable(identity, tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
verifyStorageFormatForCatalog(table.getStorage().getStorageFormat());
HiveWriteUtils.checkTableIsWritable(table, writesToNonManagedTablesEnabled, writeType);
for (Column column : table.getDataColumns()) {
if (!HiveWriteUtils.isWritableType(column.getType())) {
throw new PrestoException(NOT_SUPPORTED, String.format("Inserting into Hive table %s with column type %s not supported", tableName, column.getType()));
}
}
List<HiveColumnHandle> handles = hiveColumnHandles(table).stream().filter(columnHandle -> !columnHandle.isHidden()).collect(toList());
if (partition.isPresent() && table.getPartitionColumns().isEmpty()) {
throw new PrestoException(GENERIC_USER_ERROR, String.format("Table %s not partitioned", tableName));
}
HiveStorageFormat tableStorageFormat = extractHiveStorageFormat(table);
if (tableStorageFormat == HiveStorageFormat.TEXTFILE) {
if (table.getParameters().containsKey(TEXT_SKIP_HEADER_COUNT_KEY)) {
throw new PrestoException(NOT_SUPPORTED, format("Inserting into Hive table with %s property not supported", TEXT_SKIP_HEADER_COUNT_KEY));
}
if (table.getParameters().containsKey(TEXT_SKIP_FOOTER_COUNT_KEY)) {
throw new PrestoException(NOT_SUPPORTED, format("Inserting into Hive table with %s property not supported", TEXT_SKIP_FOOTER_COUNT_KEY));
}
}
Optional<WriteIdInfo> writeIdInfo = Optional.empty();
if (AcidUtils.isTransactionalTable(((HiveTableHandle) tableHandle).getTableParameters().orElseThrow(() -> new IllegalStateException("tableParameters missing")))) {
Optional<Long> writeId = metastore.getTableWriteId(session, (HiveTableHandle) tableHandle, writeType);
if (!writeId.isPresent()) {
throw new IllegalStateException("No validWriteIds present");
}
writeIdInfo = Optional.of(new WriteIdInfo(writeId.get(), writeId.get(), 0));
}
HiveWriteUtils.OpertionType operationType = HiveWriteUtils.OpertionType.INSERT;
boolean isInsertExistingPartitionsOverwrite = HiveSessionProperties.getInsertExistingPartitionsBehavior(session) == HiveSessionProperties.InsertExistingPartitionsBehavior.OVERWRITE;
if (isInsertExistingPartitionsOverwrite || writeType == HiveACIDWriteType.INSERT_OVERWRITE) {
operationType = HiveWriteUtils.OpertionType.INSERT_OVERWRITE;
}
LocationHandle locationHandle = locationService.forExistingTable(metastore, session, table, writeIdInfo, operationType);
HiveInsertTableHandle result = new HiveInsertTableHandle(tableName.getSchemaName(), tableName.getTableName(), handles, metastore.generatePageSinkMetadata(identity, tableName), locationHandle, table.getStorage().getBucketProperty(), tableStorageFormat, HiveSessionProperties.isRespectTableFormat(session) ? tableStorageFormat : HiveSessionProperties.getHiveStorageFormat(session), writeType == HiveACIDWriteType.INSERT_OVERWRITE);
LocationService.WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), tableName);
return result;
}
use of io.prestosql.spi.StandardErrorCode.GENERIC_USER_ERROR in project hetu-core by openlookeng.
the class HiveMetadata method beginInsertUpdateInternal.
private HiveInsertTableHandle beginInsertUpdateInternal(ConnectorSession session, ConnectorTableHandle tableHandle, Optional<String> partition, HiveACIDWriteType writeType) {
HiveIdentity identity = new HiveIdentity(session);
SchemaTableName tableName = ((HiveTableHandle) tableHandle).getSchemaTableName();
Table table = metastore.getTable(identity, tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
verifyStorageFormatForCatalog(table.getStorage().getStorageFormat());
HiveWriteUtils.checkTableIsWritable(table, writesToNonManagedTablesEnabled, writeType);
for (Column column : table.getDataColumns()) {
if (!HiveWriteUtils.isWritableType(column.getType())) {
throw new PrestoException(NOT_SUPPORTED, String.format("Inserting into Hive table %s with column type %s not supported", tableName, column.getType()));
}
}
List<HiveColumnHandle> handles = hiveColumnHandles(table).stream().filter(columnHandle -> !columnHandle.isHidden()).collect(toList());
if (partition.isPresent() && table.getPartitionColumns().isEmpty()) {
throw new PrestoException(GENERIC_USER_ERROR, String.format("Table %s not partitioned", tableName));
}
HiveStorageFormat tableStorageFormat = extractHiveStorageFormat(table);
if (tableStorageFormat == HiveStorageFormat.TEXTFILE) {
if (table.getParameters().containsKey(TEXT_SKIP_HEADER_COUNT_KEY)) {
throw new PrestoException(NOT_SUPPORTED, format("Inserting into Hive table with %s property not supported", TEXT_SKIP_HEADER_COUNT_KEY));
}
if (table.getParameters().containsKey(TEXT_SKIP_FOOTER_COUNT_KEY)) {
throw new PrestoException(NOT_SUPPORTED, format("Inserting into Hive table with %s property not supported", TEXT_SKIP_FOOTER_COUNT_KEY));
}
}
Optional<WriteIdInfo> writeIdInfo = Optional.empty();
if (AcidUtils.isTransactionalTable(((HiveTableHandle) tableHandle).getTableParameters().orElseThrow(() -> new IllegalStateException("tableParameters missing")))) {
Optional<Long> writeId = metastore.getTableWriteId(session, (HiveTableHandle) tableHandle, writeType);
if (!writeId.isPresent()) {
throw new IllegalStateException("No validWriteIds present");
}
writeIdInfo = Optional.of(new WriteIdInfo(writeId.get(), writeId.get(), 0));
}
HiveWriteUtils.OpertionType operationType = HiveWriteUtils.OpertionType.INSERT;
boolean isInsertExistingPartitionsOverwrite = HiveSessionProperties.getInsertExistingPartitionsBehavior(session) == HiveSessionProperties.InsertExistingPartitionsBehavior.OVERWRITE;
if (isInsertExistingPartitionsOverwrite || writeType == HiveACIDWriteType.INSERT_OVERWRITE) {
operationType = HiveWriteUtils.OpertionType.INSERT_OVERWRITE;
}
LocationHandle locationHandle = locationService.forExistingTable(metastore, session, table, writeIdInfo, operationType);
HiveInsertTableHandle result = new HiveInsertTableHandle(tableName.getSchemaName(), tableName.getTableName(), handles, metastore.generatePageSinkMetadata(identity, tableName), locationHandle, table.getStorage().getBucketProperty(), tableStorageFormat, HiveSessionProperties.isRespectTableFormat(session) ? tableStorageFormat : HiveSessionProperties.getHiveStorageFormat(session), writeType == HiveACIDWriteType.INSERT_OVERWRITE);
LocationService.WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), tableName);
return result;
}
use of io.prestosql.spi.StandardErrorCode.GENERIC_USER_ERROR in project hetu-core by openlookeng.
the class MemoryMetadata method getTableMetadata.
@Override
public ConnectorTableMetadata getTableMetadata(ConnectorSession session, ConnectorTableHandle tableHandle) {
MemoryTableHandle handle = (MemoryTableHandle) tableHandle;
TableInfo info = getTableInfo((handle).getId());
SchemaTableName schema = new SchemaTableName(info.getSchemaName(), info.getTableName());
List<ColumnMetadata> columns = info.getColumns().stream().map(columnInfo -> columnInfo.getMetadata(typeManager)).collect(Collectors.toList());
ImmutableMap.Builder<String, Object> properties = ImmutableMap.builder();
String creationHandleStr = getTableEntity(info.getSchemaTableName(), false).getParameters().get(TABLE_OUTPUT_HANDLE);
if (creationHandleStr == null) {
throw new PrestoException(GENERIC_USER_ERROR, "Table is in an invalid state and should be dropped and recreated.");
}
MemoryWriteTableHandle tableWriteHandle = OUTPUT_TABLE_HANDLE_JSON_CODEC.fromJson(Base64.getDecoder().decode(creationHandleStr));
if (tableWriteHandle.getSortedBy() != null && !tableWriteHandle.getSortedBy().isEmpty()) {
properties.put(MemoryTableProperties.SORTED_BY_PROPERTY, tableWriteHandle.getSortedBy());
}
if (tableWriteHandle.getIndexColumns() != null && !tableWriteHandle.getIndexColumns().isEmpty()) {
properties.put(MemoryTableProperties.INDEX_COLUMNS_PROPERTY, tableWriteHandle.getIndexColumns());
}
properties.put(MemoryTableProperties.SPILL_COMPRESSION_PROPERTY, tableWriteHandle.isCompressionEnabled());
return new ConnectorTableMetadata(schema, columns, properties.build());
}
Aggregations