use of org.apache.flink.table.catalog.Column in project flink by apache.
the class ColumnJsonDeserializer method deserialize.
@Override
public Column deserialize(JsonParser jsonParser, DeserializationContext ctx) throws IOException {
ObjectNode jsonNode = jsonParser.readValueAsTree();
String columnName = jsonNode.required(NAME).asText();
String columnKind = Optional.ofNullable(jsonNode.get(KIND)).map(JsonNode::asText).orElse(KIND_PHYSICAL);
Column column;
switch(columnKind) {
case KIND_PHYSICAL:
column = deserializePhysicalColumn(columnName, jsonNode, jsonParser.getCodec(), ctx);
break;
case KIND_COMPUTED:
column = deserializeComputedColumn(columnName, jsonNode, jsonParser.getCodec(), ctx);
break;
case KIND_METADATA:
column = deserializeMetadataColumn(columnName, jsonNode, jsonParser.getCodec(), ctx);
break;
default:
throw new ValidationException(String.format("Cannot recognize column type '%s'. Allowed types: %s.", columnKind, SUPPORTED_KINDS));
}
return column.withComment(deserializeOptionalField(jsonNode, COMMENT, String.class, jsonParser.getCodec(), ctx).orElse(null));
}
use of org.apache.flink.table.catalog.Column in project flink by apache.
the class ContextResolvedTableJsonDeserializer method areResolvedSchemasEqual.
private boolean areResolvedSchemasEqual(ResolvedSchema schemaFromPlan, ResolvedSchema schemaFromCatalog) {
// For schema equality we check:
// * Columns size and order
// * For each column: name, kind (class) and type
// * Check partition keys set equality
List<Column> columnsFromPlan = schemaFromPlan.getColumns();
List<Column> columnsFromCatalog = schemaFromCatalog.getColumns();
if (columnsFromPlan.size() != columnsFromCatalog.size()) {
return false;
}
for (int i = 0; i < columnsFromPlan.size(); i++) {
Column columnFromPlan = columnsFromPlan.get(i);
Column columnFromCatalog = columnsFromCatalog.get(i);
if (!Objects.equals(columnFromPlan.getName(), columnFromCatalog.getName()) || !Objects.equals(columnFromPlan.getClass(), columnFromCatalog.getClass()) || !Objects.equals(columnFromPlan.getDataType(), columnFromCatalog.getDataType())) {
return false;
}
}
return Objects.equals(schemaFromPlan.getPrimaryKey(), schemaFromCatalog.getPrimaryKey());
}
use of org.apache.flink.table.catalog.Column in project flink by apache.
the class TableSchema method fromResolvedSchema.
/**
* Helps to migrate to the new {@link ResolvedSchema} to old API methods.
*/
public static TableSchema fromResolvedSchema(ResolvedSchema resolvedSchema) {
final TableSchema.Builder builder = TableSchema.builder();
resolvedSchema.getColumns().stream().map(column -> {
if (column instanceof Column.PhysicalColumn) {
final Column.PhysicalColumn c = (Column.PhysicalColumn) column;
return TableColumn.physical(c.getName(), c.getDataType());
} else if (column instanceof Column.MetadataColumn) {
final Column.MetadataColumn c = (Column.MetadataColumn) column;
return TableColumn.metadata(c.getName(), c.getDataType(), c.getMetadataKey().orElse(null), c.isVirtual());
} else if (column instanceof Column.ComputedColumn) {
final Column.ComputedColumn c = (Column.ComputedColumn) column;
return TableColumn.computed(c.getName(), c.getDataType(), c.getExpression().asSerializableString());
}
throw new IllegalArgumentException("Unsupported column type: " + column);
}).forEach(builder::add);
resolvedSchema.getWatermarkSpecs().forEach(spec -> builder.watermark(spec.getRowtimeAttribute(), spec.getWatermarkExpression().asSerializableString(), spec.getWatermarkExpression().getOutputDataType()));
resolvedSchema.getPrimaryKey().ifPresent(pk -> builder.primaryKey(pk.getName(), pk.getColumns().toArray(new String[0])));
return builder.build();
}
use of org.apache.flink.table.catalog.Column in project flink by apache.
the class TableEnvironmentImpl method executeInternal.
private TableResultInternal executeInternal(List<Transformation<?>> transformations, List<String> sinkIdentifierNames) {
final String defaultJobName = "insert-into_" + String.join(",", sinkIdentifierNames);
Pipeline pipeline = execEnv.createPipeline(transformations, tableConfig.getConfiguration(), defaultJobName);
try {
JobClient jobClient = execEnv.executeAsync(pipeline);
final List<Column> columns = new ArrayList<>();
Long[] affectedRowCounts = new Long[transformations.size()];
for (int i = 0; i < transformations.size(); ++i) {
// use sink identifier name as field name
columns.add(Column.physical(sinkIdentifierNames.get(i), DataTypes.BIGINT()));
affectedRowCounts[i] = -1L;
}
return TableResultImpl.builder().jobClient(jobClient).resultKind(ResultKind.SUCCESS_WITH_CONTENT).schema(ResolvedSchema.of(columns)).resultProvider(new InsertResultProvider(affectedRowCounts).setJobClient(jobClient)).build();
} catch (Exception e) {
throw new TableException("Failed to execute sql", e);
}
}
use of org.apache.flink.table.catalog.Column in project flink by apache.
the class TableauStyle method columnWidthsByType.
// Package private and private static methods to deal with complexity of string writing and
// formatting
/**
* Try to derive column width based on column types. If result set is not small enough to be
* stored in java heap memory, we can't determine column widths based on column values.
*/
static int[] columnWidthsByType(List<Column> columns, int maxColumnWidth, boolean printNullAsEmpty, boolean printRowKind) {
// fill width with field names first
final int[] colWidths = columns.stream().mapToInt(col -> col.getName().length()).toArray();
// determine proper column width based on types
for (int i = 0; i < columns.size(); ++i) {
LogicalType type = columns.get(i).getDataType().getLogicalType();
int len;
switch(type.getTypeRoot()) {
case TINYINT:
// extra for negative value
len = TinyIntType.PRECISION + 1;
break;
case SMALLINT:
// extra for negative value
len = SmallIntType.PRECISION + 1;
break;
case INTEGER:
// extra for negative value
len = IntType.PRECISION + 1;
break;
case BIGINT:
// extra for negative value
len = BigIntType.PRECISION + 1;
break;
case DECIMAL:
len = ((DecimalType) type).getPrecision() + // extra for negative value and decimal point
2;
break;
case BOOLEAN:
// "true" or "false"
len = 5;
break;
case DATE:
// e.g. 9999-12-31
len = 10;
break;
case TIME_WITHOUT_TIME_ZONE:
int precision = ((TimeType) type).getPrecision();
// 23:59:59[.999999999]
len = precision == 0 ? 8 : precision + 9;
break;
case TIMESTAMP_WITHOUT_TIME_ZONE:
precision = ((TimestampType) type).getPrecision();
len = timestampTypeColumnWidth(precision);
break;
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
precision = ((LocalZonedTimestampType) type).getPrecision();
len = timestampTypeColumnWidth(precision);
break;
default:
len = maxColumnWidth;
}
// adjust column width with potential null values
len = printNullAsEmpty ? len : Math.max(len, PrintStyle.NULL_VALUE.length());
colWidths[i] = Math.max(colWidths[i], len);
}
// add an extra column for row kind if necessary
if (printRowKind) {
final int[] ret = new int[columns.size() + 1];
ret[0] = ROW_KIND_COLUMN.length();
System.arraycopy(colWidths, 0, ret, 1, columns.size());
return ret;
} else {
return colWidths;
}
}
Aggregations