use of io.prestosql.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR in project hetu-core by openlookeng.
the class CarbondataMetadata method updateSchemaInfoRenameColumn.
private SchemaEvolutionEntry updateSchemaInfoRenameColumn(ColumnHandle source, String target) {
HiveColumnHandle oldColumnHandle = (HiveColumnHandle) source;
String oldColumnName = oldColumnHandle.getColumnName();
String newColumnName = target;
if (!carbonTable.canAllow(carbonTable, TableOperation.ALTER_COLUMN_RENAME, oldColumnHandle.getName())) {
throw new PrestoException(GENERIC_INTERNAL_ERROR, format("Alter table rename column is not supported for index indexschema"));
}
TableSchema tableSchema = tableInfo.getFactTable();
List<ColumnSchema> tableColumns = tableSchema.getListOfColumns();
if (!tableColumns.stream().map(cols -> cols.getColumnName()).collect(toList()).contains(oldColumnHandle.getName())) {
throw new PrestoException(GENERIC_INTERNAL_ERROR, format("Column " + oldColumnHandle.getName() + "does not exist in " + carbonTable.getDatabaseName() + "." + carbonTable.getTableName()));
}
List<ColumnSchema> carbonColumns = carbonTable.getCreateOrderColumn().stream().filter(cols -> !cols.isInvisible()).map(cols -> cols.getColumnSchema()).collect(toList());
ColumnSchema oldCarbonColumn = carbonColumns.stream().filter(cols -> cols.getColumnName().equalsIgnoreCase(oldColumnName)).findFirst().get();
validateColumnsForRenaming(oldCarbonColumn);
TableSchemaBuilder schemaBuilder = new TableSchemaBuilder();
ColumnSchema deletedColumn = schemaBuilder.addColumn(new StructField(oldColumnHandle.getName(), CarbondataHetuFilterUtil.spi2CarbondataTypeMapper(oldColumnHandle)), null, false, false);
SchemaEvolutionEntry schemaEvolutionEntry = new SchemaEvolutionEntry();
tableColumns.forEach(cols -> {
if (cols.getColumnName().equalsIgnoreCase(oldColumnName)) {
cols.setColumnName(newColumnName);
schemaEvolutionEntry.setTimeStamp(timeStamp);
schemaEvolutionEntry.setAdded(Arrays.asList(cols));
schemaEvolutionEntry.setRemoved(Arrays.asList(deletedColumn));
}
});
Map<String, String> tableProperties = tableInfo.getFactTable().getTableProperties();
tableProperties.forEach((tablePropertyKey, tablePropertyValue) -> {
if (tablePropertyKey.equalsIgnoreCase(oldColumnName)) {
tableProperties.put(tablePropertyKey, newColumnName);
}
});
tableInfo.setLastUpdatedTime(System.currentTimeMillis());
tableInfo.setFactTable(tableSchema);
return schemaEvolutionEntry;
}
use of io.prestosql.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR in project hetu-core by openlookeng.
the class CarbondataFileWriter method appendRow.
public void appendRow(Page dataPage, int position) {
FileSinkOperator.RecordWriter finalRecordWriter = null;
if (HiveACIDWriteType.isUpdateOrDelete(acidWriteType)) {
try {
DeleteDeltaBlockDetails deleteDeltaBlockDetails = null;
SegmentUpdateDetails segmentUpdateDetails = null;
String tupleId = getUpdateTupleIdFromRec(dataPage, position);
String blockId = CarbonUpdateUtil.getRequiredFieldFromTID(tupleId, TupleIdEnum.BLOCK_ID);
String blockletId = CarbonUpdateUtil.getRequiredFieldFromTID(tupleId, TupleIdEnum.BLOCKLET_ID);
String pageId = CarbonUpdateUtil.getRequiredFieldFromTID(tupleId, TupleIdEnum.PAGE_ID);
String rowId = CarbonUpdateUtil.getRequiredFieldFromTID(tupleId, TupleIdEnum.OFFSET);
String segmentId = CarbonUpdateUtil.getRequiredFieldFromTID(tupleId, TupleIdEnum.SEGMENT_ID);
String segmentBlockId = CarbonUpdateUtil.getSegmentWithBlockFromTID(tupleId, false);
String blockName = CarbonUpdateUtil.getBlockName(CarbonTablePath.addDataPartPrefix(blockId));
String completeBlockName = CarbonTablePath.addDataPartPrefix(blockId + CarbonCommonConstants.FACT_FILE_EXT);
String blockPath = CarbonUpdateUtil.getTableBlockPath(tupleId, tablePath, true);
String deltaPath = CarbonUpdateUtil.getDeleteDeltaFilePath(blockPath, blockName, txnTimeStamp);
deleteDeltaBlockDetails = deleteDeltaDetailsMap.computeIfAbsent(deltaPath, v -> new DeleteDeltaBlockDetails(blockName));
deltaPathSegmentMap.put(deltaPath, segmentId);
segmentUpdateDetails = segmentUpdateDetailMap.computeIfAbsent(segmentBlockId, v -> new SegmentUpdateDetails() {
{
setSegmentName(segmentId);
setBlockName(blockName);
setActualBlockName(completeBlockName);
setDeleteDeltaEndTimestamp(txnTimeStamp);
setDeleteDeltaStartTimestamp(txnTimeStamp);
setDeletedRowsInBlock(segmentUpdateStatusManager.getDetailsForABlock(segmentBlockId) != null ? segmentUpdateStatusManager.getDetailsForABlock(segmentBlockId).getDeletedRowsInBlock() : "0");
}
});
Long deletedRows = Long.parseLong(segmentUpdateDetails.getDeletedRowsInBlock()) + 1;
segmentUpdateDetails.setDeletedRowsInBlock(Long.toString(deletedRows));
if (!deleteDeltaBlockDetails.addBlocklet(blockletId, rowId, Integer.parseInt(pageId))) {
LOG.error("Multiple input rows matched for same row!");
throw new MultipleMatchingException("Multiple input rows matched for same row!");
}
if (HiveACIDWriteType.DELETE == acidWriteType) {
return;
}
finalRecordWriter = segmentRecordWriterMap.computeIfAbsent(segmentId, v -> {
try {
return getHiveWriter(segmentId, CarbonUpdateUtil.getLatestTaskIdForSegment(new Segment(segmentId), tablePath) + 1);
} catch (Exception e) {
LOG.error("error while getting Carbon :: hiveRecordWriter", e);
throw new RuntimeException("error while getting Carbon :: hiveRecordWriter");
}
});
} catch (Exception e) {
LOG.error("error while initializing writer", e);
throw new PrestoException(GENERIC_INTERNAL_ERROR, "writer class not found", e);
}
} else {
finalRecordWriter = this.recordWriter;
}
for (int field = 0; field < fieldCount; field++) {
Block block = dataPage.getBlock(field);
if (block.isNull(position)) {
tableInspector.setStructFieldData(row, structFields.get(field), null);
} else {
setters[field].setField(block, position);
}
}
try {
if (finalRecordWriter != null) {
finalRecordWriter.write(serDe.serialize(row, tableInspector));
}
} catch (SerDeException | IOException e) {
throw new PrestoException(HIVE_WRITER_DATA_ERROR, e);
}
}
use of io.prestosql.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR in project hetu-core by openlookeng.
the class CarbondataPageSourceProvider method createPageSource.
@Override
public ConnectorPageSource createPageSource(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List<ColumnHandle> columns) {
HiveSplit carbonSplit = Types.checkType(((HiveSplitWrapper) (split)).getSplits().get(0), HiveSplit.class, "split is not class HiveSplit");
this.queryId = carbonSplit.getSchema().getProperty("queryId");
if (this.queryId == null) {
// Fall back to hive pagesource.
return super.createPageSource(transactionHandle, session, split, table, columns);
}
try {
hdfsEnvironment.getFileSystem(new HdfsEnvironment.HdfsContext(session, carbonSplit.getDatabase()), new Path(carbonSplit.getSchema().getProperty("tablePath")));
} catch (IOException e) {
throw new PrestoException(GENERIC_INTERNAL_ERROR, "Failed to get file system: " + e.getMessage());
}
return hdfsEnvironment.doAs(session.getUser(), () -> {
Configuration configuration = this.hdfsEnvironment.getConfiguration(new HdfsEnvironment.HdfsContext(session, carbonSplit.getDatabase(), carbonSplit.getTable()), new Path(carbonSplit.getSchema().getProperty("tablePath")));
CarbonTable carbonTable = getCarbonTable(carbonSplit, configuration);
/* So that CarbonTLS can access it */
ThreadLocalSessionInfo.setConfigurationToCurrentThread(configuration);
boolean isFullACID = isFullAcidTable(Maps.fromProperties(carbonSplit.getSchema()));
boolean isDirectVectorFill = (carbonTableReader.config.getPushRowFilter() == null) || carbonTableReader.config.getPushRowFilter().equalsIgnoreCase("false") || columns.stream().anyMatch(c -> c.getColumnName().equalsIgnoreCase(CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID));
return new CarbondataPageSource(carbonTable, queryId, carbonSplit, columns, table, configuration, isDirectVectorFill, isFullACID, session.getUser(), hdfsEnvironment);
});
}
use of io.prestosql.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR in project hetu-core by openlookeng.
the class HiveSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle tableHandle, SplitSchedulingStrategy splitSchedulingStrategy, Supplier<List<Set<DynamicFilter>>> dynamicFilterSupplier, Optional<QueryType> queryType, Map<String, Object> queryInfo, Set<TupleDomain<ColumnMetadata>> userDefinedCachePredicates, boolean partOfReuse) {
HiveTableHandle hiveTable = (HiveTableHandle) tableHandle;
SchemaTableName tableName = hiveTable.getSchemaTableName();
// get table metadata
SemiTransactionalHiveMetastore metastore = metastoreProvider.apply((HiveTransactionHandle) transaction);
Table table = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
if (table.getStorage().getStorageFormat().getInputFormat().contains("carbon")) {
throw new PrestoException(NOT_SUPPORTED, "Hive connector can't read carbondata tables");
}
// verify table is not marked as non-readable
String tableNotReadable = table.getParameters().get(OBJECT_NOT_READABLE);
if (!isNullOrEmpty(tableNotReadable)) {
throw new HiveNotReadableException(tableName, Optional.empty(), tableNotReadable);
}
// get partitions
List<HivePartition> partitions = partitionManager.getOrLoadPartitions(session, metastore, new HiveIdentity(session), hiveTable);
// short circuit if we don't have any partitions
if (partitions.isEmpty()) {
return new FixedSplitSource(ImmutableList.of());
}
// get buckets from first partition (arbitrary)
Optional<HiveBucketing.HiveBucketFilter> bucketFilter = hiveTable.getBucketFilter();
// validate bucket bucketed execution
Optional<HiveBucketHandle> bucketHandle = hiveTable.getBucketHandle();
if ((splitSchedulingStrategy == GROUPED_SCHEDULING) && !bucketHandle.isPresent()) {
throw new PrestoException(GENERIC_INTERNAL_ERROR, "SchedulingPolicy is bucketed, but BucketHandle is not present");
}
// sort partitions
partitions = Ordering.natural().onResultOf(HivePartition::getPartitionId).reverse().sortedCopy(partitions);
Iterable<HivePartitionMetadata> hivePartitions = getPartitionMetadata(session, metastore, table, tableName, partitions, bucketHandle.map(HiveBucketHandle::toTableBucketProperty));
HiveSplitLoader hiveSplitLoader = new BackgroundHiveSplitLoader(table, hivePartitions, hiveTable.getCompactEffectivePredicate(), BackgroundHiveSplitLoader.BucketSplitInfo.createBucketSplitInfo(bucketHandle, bucketFilter), session, hdfsEnvironment, namenodeStats, directoryLister, executor, splitLoaderConcurrency, recursiveDfsWalkerEnabled, metastore.getValidWriteIds(session, hiveTable, queryType.map(t -> t == QueryType.VACUUM).orElse(false)).map(validTxnWriteIdList -> validTxnWriteIdList.getTableValidWriteIdList(table.getDatabaseName() + "." + table.getTableName())), dynamicFilterSupplier, queryType, queryInfo, typeManager);
HiveSplitSource splitSource;
HiveStorageFormat hiveStorageFormat = HiveMetadata.extractHiveStorageFormat(table);
switch(splitSchedulingStrategy) {
case UNGROUPED_SCHEDULING:
splitSource = HiveSplitSource.allAtOnce(session, table.getDatabaseName(), table.getTableName(), // For reuse, we should make sure to have same split size all time for a table.
partOfReuse ? 0 : maxInitialSplits, maxOutstandingSplits, maxOutstandingSplitsSize, maxSplitsPerSecond, hiveSplitLoader, executor, new CounterStat(), dynamicFilterSupplier, userDefinedCachePredicates, typeManager, hiveConfig, hiveStorageFormat);
break;
case GROUPED_SCHEDULING:
splitSource = HiveSplitSource.bucketed(session, table.getDatabaseName(), table.getTableName(), // For reuse, we should make sure to have same split size all time for a table.
partOfReuse ? 0 : maxInitialSplits, maxOutstandingSplits, maxOutstandingSplitsSize, maxSplitsPerSecond, hiveSplitLoader, executor, new CounterStat(), dynamicFilterSupplier, userDefinedCachePredicates, typeManager, hiveConfig, hiveStorageFormat);
break;
default:
throw new IllegalArgumentException("Unknown splitSchedulingStrategy: " + splitSchedulingStrategy);
}
hiveSplitLoader.start(splitSource);
if (queryType.isPresent() && queryType.get() == QueryType.VACUUM) {
HdfsContext hdfsContext = new HdfsContext(session, table.getDatabaseName(), table.getTableName());
return new HiveVacuumSplitSource(splitSource, (HiveVacuumTableHandle) queryInfo.get("vacuumHandle"), hdfsEnvironment, hdfsContext, session);
}
return splitSource;
}
use of io.prestosql.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR in project hetu-core by openlookeng.
the class CreateTableTask method internalExecute.
@VisibleForTesting
public ListenableFuture<?> internalExecute(CreateTable statement, Metadata metadata, AccessControl accessControl, Session session, List<Expression> parameters) {
checkArgument(!statement.getElements().isEmpty(), "no columns for table");
QualifiedObjectName tableName = createQualifiedObjectName(session, statement, statement.getName());
Optional<TableHandle> tableHandle = metadata.getTableHandle(session, tableName);
if (tableHandle.isPresent()) {
if (!statement.isNotExists()) {
throw new SemanticException(TABLE_ALREADY_EXISTS, statement, "Table '%s' already exists", tableName);
}
return immediateFuture(null);
}
CatalogName catalogName = metadata.getCatalogHandle(session, tableName.getCatalogName()).orElseThrow(() -> new PrestoException(NOT_FOUND, "Catalog does not exist: " + tableName.getCatalogName()));
LinkedHashMap<String, ColumnMetadata> columns = new LinkedHashMap<>();
Map<String, Object> inheritedProperties = ImmutableMap.of();
boolean includingProperties = false;
for (TableElement element : statement.getElements()) {
if (element instanceof ColumnDefinition) {
ColumnDefinition column = (ColumnDefinition) element;
String name = column.getName().getValue().toLowerCase(Locale.ENGLISH);
Type type;
try {
type = metadata.getType(parseTypeSignature(column.getType()));
} catch (TypeNotFoundException e) {
throw new SemanticException(TYPE_MISMATCH, element, "Unknown type '%s' for column '%s'", column.getType(), column.getName());
}
if (type.equals(UNKNOWN)) {
throw new SemanticException(TYPE_MISMATCH, element, "Unknown type '%s' for column '%s'", column.getType(), column.getName());
}
if (columns.containsKey(name)) {
throw new SemanticException(DUPLICATE_COLUMN_NAME, column, "Column name '%s' specified more than once", column.getName());
}
if (!column.isNullable() && !metadata.getConnectorCapabilities(session, catalogName).contains(NOT_NULL_COLUMN_CONSTRAINT)) {
throw new SemanticException(NOT_SUPPORTED, column, "Catalog '%s' does not support non-null column for column name '%s'", catalogName.getCatalogName(), column.getName());
}
Map<String, Expression> sqlProperties = mapFromProperties(column.getProperties());
Map<String, Object> columnProperties = metadata.getColumnPropertyManager().getProperties(catalogName, tableName.getCatalogName(), sqlProperties, session, metadata, parameters);
columns.put(name, new ColumnMetadata(name, type, column.isNullable(), column.getComment().orElse(null), null, false, columnProperties));
} else if (element instanceof LikeClause) {
LikeClause likeClause = (LikeClause) element;
QualifiedObjectName likeTableName = createQualifiedObjectName(session, statement, likeClause.getTableName());
if (!metadata.getCatalogHandle(session, likeTableName.getCatalogName()).isPresent()) {
throw new SemanticException(MISSING_CATALOG, statement, "LIKE table catalog '%s' does not exist", likeTableName.getCatalogName());
}
if (!tableName.getCatalogName().equals(likeTableName.getCatalogName())) {
throw new SemanticException(NOT_SUPPORTED, statement, "LIKE table across catalogs is not supported");
}
TableHandle likeTable = metadata.getTableHandle(session, likeTableName).orElseThrow(() -> new SemanticException(MISSING_TABLE, statement, "LIKE table '%s' does not exist", likeTableName));
TableMetadata likeTableMetadata = metadata.getTableMetadata(session, likeTable);
Optional<LikeClause.PropertiesOption> propertiesOption = likeClause.getPropertiesOption();
if (propertiesOption.isPresent() && propertiesOption.get().equals(LikeClause.PropertiesOption.INCLUDING)) {
if (includingProperties) {
throw new SemanticException(NOT_SUPPORTED, statement, "Only one LIKE clause can specify INCLUDING PROPERTIES");
}
includingProperties = true;
// Don't inherit location property for sql statement "create table like"
inheritedProperties = likeTableMetadata.getMetadata().getInheritableProperties();
}
likeTableMetadata.getColumns().stream().filter(column -> !column.isHidden()).forEach(column -> {
if (columns.containsKey(column.getName().toLowerCase(Locale.ENGLISH))) {
throw new SemanticException(DUPLICATE_COLUMN_NAME, element, "Column name '%s' specified more than once", column.getName());
}
columns.put(column.getName().toLowerCase(Locale.ENGLISH), column);
});
} else {
throw new PrestoException(GENERIC_INTERNAL_ERROR, "Invalid TableElement: " + element.getClass().getName());
}
}
accessControl.checkCanCreateTable(session.getRequiredTransactionId(), session.getIdentity(), tableName);
Map<String, Expression> sqlProperties = mapFromProperties(statement.getProperties());
Map<String, Object> properties = metadata.getTablePropertyManager().getProperties(catalogName, tableName.getCatalogName(), sqlProperties, session, metadata, parameters);
Map<String, Object> finalProperties = combineProperties(sqlProperties.keySet(), properties, inheritedProperties);
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(toSchemaTableName(tableName), ImmutableList.copyOf(columns.values()), finalProperties, statement.getComment());
try {
metadata.createTable(session, tableName.getCatalogName(), tableMetadata, statement.isNotExists());
} catch (PrestoException e) {
// connectors are not required to handle the ignoreExisting flag
if (!e.getErrorCode().equals(ALREADY_EXISTS.toErrorCode()) || !statement.isNotExists()) {
throw e;
}
}
return immediateFuture(null);
}
Aggregations