use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.
the class HiveDynamicTableFactoryTest method getTableSink.
private DynamicTableSink getTableSink(String tableName) throws Exception {
TableEnvironmentInternal tableEnvInternal = (TableEnvironmentInternal) tableEnv;
ObjectIdentifier tableIdentifier = ObjectIdentifier.of(hiveCatalog.getName(), "default", tableName);
CatalogTable catalogTable = (CatalogTable) hiveCatalog.getTable(tableIdentifier.toObjectPath());
return FactoryUtil.createDynamicTableSink((DynamicTableSinkFactory) hiveCatalog.getFactory().orElseThrow(IllegalStateException::new), tableIdentifier, tableEnvInternal.getCatalogManager().resolveCatalogTable(catalogTable), tableEnv.getConfig().getConfiguration(), Thread.currentThread().getContextClassLoader(), false);
}
use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.
the class HiveParserDMLHelper method createInsertOperation.
public Operation createInsertOperation(HiveParserCalcitePlanner analyzer, RelNode queryRelNode) throws SemanticException {
HiveParserQB topQB = analyzer.getQB();
QBMetaData qbMetaData = topQB.getMetaData();
// decide the dest table
Map<String, Table> nameToDestTable = qbMetaData.getNameToDestTable();
Map<String, Partition> nameToDestPart = qbMetaData.getNameToDestPartition();
// for now we only support inserting to a single table
Preconditions.checkState(nameToDestTable.size() <= 1 && nameToDestPart.size() <= 1, "Only support inserting to 1 table");
Table destTable;
String insClauseName;
if (!nameToDestTable.isEmpty()) {
insClauseName = nameToDestTable.keySet().iterator().next();
destTable = nameToDestTable.values().iterator().next();
} else if (!nameToDestPart.isEmpty()) {
insClauseName = nameToDestPart.keySet().iterator().next();
destTable = nameToDestPart.values().iterator().next().getTable();
} else {
// happens for INSERT DIRECTORY
throw new SemanticException("INSERT DIRECTORY is not supported");
}
// decide static partition specs
Map<String, String> staticPartSpec = new LinkedHashMap<>();
if (destTable.isPartitioned()) {
List<String> partCols = HiveCatalog.getFieldNames(destTable.getTTable().getPartitionKeys());
if (!nameToDestPart.isEmpty()) {
// static partition
Partition destPart = nameToDestPart.values().iterator().next();
Preconditions.checkState(partCols.size() == destPart.getValues().size(), "Part cols and static spec doesn't match");
for (int i = 0; i < partCols.size(); i++) {
staticPartSpec.put(partCols.get(i), destPart.getValues().get(i));
}
} else {
// dynamic partition
Map<String, String> spec = qbMetaData.getPartSpecForAlias(insClauseName);
if (spec != null) {
for (String partCol : partCols) {
String val = spec.get(partCol);
if (val != null) {
staticPartSpec.put(partCol, val);
}
}
}
}
}
// decide whether it's overwrite
boolean overwrite = topQB.getParseInfo().getInsertOverwriteTables().keySet().stream().map(String::toLowerCase).collect(Collectors.toSet()).contains(destTable.getDbName() + "." + destTable.getTableName());
Tuple4<ObjectIdentifier, QueryOperation, Map<String, String>, Boolean> insertOperationInfo = createInsertOperationInfo(queryRelNode, destTable, staticPartSpec, analyzer.getDestSchemaForClause(insClauseName), overwrite);
return new SinkModifyOperation(catalogManager.getTableOrError(insertOperationInfo.f0), insertOperationInfo.f1, insertOperationInfo.f2, insertOperationInfo.f3, Collections.emptyMap());
}
use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.
the class LogicalTypeJsonDeserializer method deserializeStructuredType.
private static LogicalType deserializeStructuredType(JsonNode logicalTypeNode, SerdeContext serdeContext) {
// inline structured types have no object identifier
if (!logicalTypeNode.has(FIELD_NAME_OBJECT_IDENTIFIER)) {
return deserializeStructuredTypeFromPlan(logicalTypeNode, serdeContext);
}
// for catalog structured types
final ObjectIdentifier identifier = ObjectIdentifierJsonDeserializer.deserialize(logicalTypeNode.get(FIELD_NAME_OBJECT_IDENTIFIER).asText(), serdeContext);
final CatalogPlanRestore restoreStrategy = serdeContext.getConfiguration().get(TableConfigOptions.PLAN_RESTORE_CATALOG_OBJECTS);
switch(restoreStrategy) {
case ALL:
if (logicalTypeNode.has(FIELD_NAME_ATTRIBUTES)) {
return deserializeStructuredTypeFromPlan(logicalTypeNode, serdeContext);
}
return deserializeUserDefinedTypeFromCatalog(identifier, serdeContext);
case ALL_ENFORCED:
return deserializeStructuredTypeFromPlan(logicalTypeNode, serdeContext);
case IDENTIFIER:
return deserializeUserDefinedTypeFromCatalog(identifier, serdeContext);
default:
throw new TableException("Unsupported catalog restore strategy.");
}
}
use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.
the class LogicalTypeJsonDeserializer method deserializeStructuredTypeFromPlan.
private static LogicalType deserializeStructuredTypeFromPlan(JsonNode logicalTypeNode, SerdeContext serdeContext) {
final ObjectIdentifier identifier;
if (logicalTypeNode.has(FIELD_NAME_OBJECT_IDENTIFIER)) {
identifier = ObjectIdentifierJsonDeserializer.deserialize(logicalTypeNode.get(FIELD_NAME_OBJECT_IDENTIFIER).asText(), serdeContext);
} else {
identifier = null;
}
final Class<?> implementationClass;
if (logicalTypeNode.has(FIELD_NAME_IMPLEMENTATION_CLASS)) {
implementationClass = loadClass(logicalTypeNode.get(FIELD_NAME_IMPLEMENTATION_CLASS).asText(), serdeContext, "structured type");
} else {
implementationClass = null;
}
final StructuredType.Builder builder;
if (identifier != null && implementationClass != null) {
builder = StructuredType.newBuilder(identifier, implementationClass);
} else if (identifier != null) {
builder = StructuredType.newBuilder(identifier);
} else {
builder = StructuredType.newBuilder(implementationClass);
}
if (logicalTypeNode.has(FIELD_NAME_DESCRIPTION)) {
builder.description(logicalTypeNode.get(FIELD_NAME_FIELD_DESCRIPTION).asText());
}
final ArrayNode attributeNodes = (ArrayNode) logicalTypeNode.get(FIELD_NAME_ATTRIBUTES);
final List<StructuredAttribute> attributes = new ArrayList<>();
for (JsonNode attributeNode : attributeNodes) {
final String attributeName = attributeNode.get(FIELD_NAME_ATTRIBUTE_NAME).asText();
final LogicalType attributeType = deserialize(attributeNode.get(FIELD_NAME_ATTRIBUTE_TYPE), serdeContext);
final String attributeDescription;
if (attributeNode.has(FIELD_NAME_ATTRIBUTE_DESCRIPTION)) {
attributeDescription = attributeNode.get(FIELD_NAME_ATTRIBUTE_DESCRIPTION).asText();
} else {
attributeDescription = null;
}
attributes.add(new StructuredAttribute(attributeName, attributeType, attributeDescription));
}
builder.attributes(attributes);
if (logicalTypeNode.has(FIELD_NAME_FINAL)) {
builder.setFinal(logicalTypeNode.get(FIELD_NAME_FINAL).asBoolean());
}
if (logicalTypeNode.has(FIELD_NAME_INSTANTIABLE)) {
builder.setInstantiable(logicalTypeNode.get(FIELD_NAME_INSTANTIABLE).asBoolean());
}
if (logicalTypeNode.has(FIELD_NAME_COMPARISON)) {
builder.comparison(StructuredComparison.valueOf(logicalTypeNode.get(FIELD_NAME_COMPARISON).asText()));
}
if (logicalTypeNode.has(FIELD_NAME_SUPER_TYPE)) {
final StructuredType superType = (StructuredType) deserialize(logicalTypeNode.get(FIELD_NAME_SUPER_TYPE), serdeContext);
builder.superType(superType);
}
return builder.build();
}
use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.
the class LogicalTypeJsonDeserializer method deserializeDistinctType.
private static LogicalType deserializeDistinctType(JsonNode logicalTypeNode, SerdeContext serdeContext) {
final ObjectIdentifier identifier = ObjectIdentifierJsonDeserializer.deserialize(logicalTypeNode.get(FIELD_NAME_OBJECT_IDENTIFIER).asText(), serdeContext);
final CatalogPlanRestore restoreStrategy = serdeContext.getConfiguration().get(TableConfigOptions.PLAN_RESTORE_CATALOG_OBJECTS);
switch(restoreStrategy) {
case ALL:
if (logicalTypeNode.has(FIELD_NAME_SOURCE_TYPE)) {
return deserializeDistinctTypeFromPlan(identifier, logicalTypeNode, serdeContext);
}
return deserializeUserDefinedTypeFromCatalog(identifier, serdeContext);
case ALL_ENFORCED:
return deserializeDistinctTypeFromPlan(identifier, logicalTypeNode, serdeContext);
case IDENTIFIER:
return deserializeUserDefinedTypeFromCatalog(identifier, serdeContext);
default:
throw new TableException("Unsupported catalog restore strategy.");
}
}
Aggregations