use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class JoinQueryOperation method calculateResultingSchema.
private ResolvedSchema calculateResultingSchema(QueryOperation left, QueryOperation right) {
final ResolvedSchema leftSchema = left.getResolvedSchema();
final ResolvedSchema rightSchema = right.getResolvedSchema();
return ResolvedSchema.physical(Stream.concat(leftSchema.getColumnNames().stream(), rightSchema.getColumnNames().stream()).collect(Collectors.toList()), Stream.concat(leftSchema.getColumnDataTypes().stream(), rightSchema.getColumnDataTypes().stream()).collect(Collectors.toList()));
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class DataStreamJavaITCase method testFromAndToDataStreamEventTime.
@Test
public void testFromAndToDataStreamEventTime() throws Exception {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final DataStream<Tuple3<Long, Integer, String>> dataStream = getWatermarkedDataStream();
final Table table = tableEnv.fromDataStream(dataStream, Schema.newBuilder().columnByMetadata("rowtime", "TIMESTAMP_LTZ(3)").watermark("rowtime", "SOURCE_WATERMARK()").build());
testSchema(table, new ResolvedSchema(Arrays.asList(Column.physical("f0", BIGINT().notNull()), Column.physical("f1", INT().notNull()), Column.physical("f2", STRING()), Column.metadata("rowtime", TIMESTAMP_LTZ(3), null, false)), Collections.singletonList(WatermarkSpec.of("rowtime", ResolvedExpressionMock.of(TIMESTAMP_LTZ(3), "`SOURCE_WATERMARK`()"))), null));
tableEnv.createTemporaryView("t", table);
final TableResult result = tableEnv.executeSql("SELECT f2, SUM(f1) FROM t GROUP BY f2, TUMBLE(rowtime, INTERVAL '0.005' SECOND)");
testResult(result, Row.of("a", 47), Row.of("c", 1000), Row.of("c", 1000));
testResult(tableEnv.toDataStream(table).keyBy(k -> k.getField("f2")).window(TumblingEventTimeWindows.of(Time.milliseconds(5))).<Row>apply((key, window, input, out) -> {
int sum = 0;
for (Row row : input) {
sum += row.<Integer>getFieldAs("f1");
}
out.collect(Row.of(key, sum));
}).returns(Types.ROW(Types.STRING, Types.INT)), Row.of("a", 47), Row.of("c", 1000), Row.of("c", 1000));
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class DataStreamJavaITCase method testToDataStreamCustomEventTime.
@Test
public void testToDataStreamCustomEventTime() throws Exception {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final TableConfig tableConfig = tableEnv.getConfig();
// session time zone should not have an impact on the conversion
final ZoneId originalZone = tableConfig.getLocalTimeZone();
tableConfig.setLocalTimeZone(ZoneId.of("Europe/Berlin"));
final LocalDateTime localDateTime1 = LocalDateTime.parse("1970-01-01T00:00:00.000");
final LocalDateTime localDateTime2 = LocalDateTime.parse("1970-01-01T01:00:00.000");
final DataStream<Tuple2<LocalDateTime, String>> dataStream = env.fromElements(new Tuple2<>(localDateTime1, "alice"), new Tuple2<>(localDateTime2, "bob"));
final Table table = tableEnv.fromDataStream(dataStream, Schema.newBuilder().column("f0", "TIMESTAMP(3)").column("f1", "STRING").watermark("f0", "SOURCE_WATERMARK()").build());
testSchema(table, new ResolvedSchema(Arrays.asList(Column.physical("f0", TIMESTAMP(3)), Column.physical("f1", STRING())), Collections.singletonList(WatermarkSpec.of("f0", ResolvedExpressionMock.of(TIMESTAMP(3), "`SOURCE_WATERMARK`()"))), null));
final DataStream<Long> rowtimeStream = tableEnv.toDataStream(table).process(new ProcessFunction<Row, Long>() {
@Override
public void processElement(Row value, Context ctx, Collector<Long> out) {
out.collect(ctx.timestamp());
}
});
testResult(rowtimeStream, localDateTime1.atOffset(ZoneOffset.UTC).toInstant().toEpochMilli(), localDateTime2.atOffset(ZoneOffset.UTC).toInstant().toEpochMilli());
tableConfig.setLocalTimeZone(originalZone);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class ContextResolvedTableJsonDeserializer method deserialize.
@Override
public ContextResolvedTable deserialize(JsonParser jsonParser, DeserializationContext ctx) throws IOException {
final CatalogPlanRestore planRestoreOption = SerdeContext.get(ctx).getConfiguration().get(PLAN_RESTORE_CATALOG_OBJECTS);
final CatalogManager catalogManager = SerdeContext.get(ctx).getFlinkContext().getCatalogManager();
final ObjectNode objectNode = jsonParser.readValueAsTree();
// Deserialize the two fields, if available
final ObjectIdentifier identifier = JsonSerdeUtil.deserializeOptionalField(objectNode, FIELD_NAME_IDENTIFIER, ObjectIdentifier.class, jsonParser.getCodec(), ctx).orElse(null);
ResolvedCatalogTable resolvedCatalogTable = JsonSerdeUtil.deserializeOptionalField(objectNode, FIELD_NAME_CATALOG_TABLE, ResolvedCatalogTable.class, jsonParser.getCodec(), ctx).orElse(null);
if (identifier == null && resolvedCatalogTable == null) {
throw new ValidationException(String.format("The input JSON is invalid because it doesn't contain '%s', nor the '%s'.", FIELD_NAME_IDENTIFIER, FIELD_NAME_CATALOG_TABLE));
}
if (identifier == null) {
if (isLookupForced(planRestoreOption)) {
throw missingIdentifier();
}
return ContextResolvedTable.anonymous(resolvedCatalogTable);
}
Optional<ContextResolvedTable> contextResolvedTableFromCatalog = isLookupEnabled(planRestoreOption) ? catalogManager.getTable(identifier) : Optional.empty();
// If we have a schema from the plan and from the catalog, we need to check they match.
if (contextResolvedTableFromCatalog.isPresent() && resolvedCatalogTable != null) {
ResolvedSchema schemaFromPlan = resolvedCatalogTable.getResolvedSchema();
ResolvedSchema schemaFromCatalog = contextResolvedTableFromCatalog.get().getResolvedSchema();
if (!areResolvedSchemasEqual(schemaFromPlan, schemaFromCatalog)) {
throw schemaNotMatching(identifier, schemaFromPlan, schemaFromCatalog);
}
}
if (resolvedCatalogTable == null || isLookupForced(planRestoreOption)) {
if (!isLookupEnabled(planRestoreOption)) {
throw lookupDisabled(identifier);
}
// We use what is stored inside the catalog
return contextResolvedTableFromCatalog.orElseThrow(() -> missingTableFromCatalog(identifier, isLookupForced(planRestoreOption)));
}
if (contextResolvedTableFromCatalog.isPresent()) {
// SCHEMA, so we just need to return the catalog query result
if (objectNode.at("/" + FIELD_NAME_CATALOG_TABLE + "/" + OPTIONS).isMissingNode()) {
return contextResolvedTableFromCatalog.get();
}
return contextResolvedTableFromCatalog.flatMap(ContextResolvedTable::getCatalog).map(c -> ContextResolvedTable.permanent(identifier, c, resolvedCatalogTable)).orElseGet(() -> ContextResolvedTable.temporary(identifier, resolvedCatalogTable));
}
return ContextResolvedTable.temporary(identifier, resolvedCatalogTable);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project zeppelin by apache.
the class Flink114Shims method rowToString.
@Override
public String[] rowToString(Object row, Object table, Object tableConfig) {
final String zone = ((TableConfig) tableConfig).getConfiguration().get(TableConfigOptions.LOCAL_TIME_ZONE);
ZoneId zoneId = TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone) ? ZoneId.systemDefault() : ZoneId.of(zone);
ResolvedSchema resolvedSchema = ((Table) table).getResolvedSchema();
return PrintUtils.rowToString((Row) row, resolvedSchema, zoneId);
}
Aggregations