Search in sources :

Example 86 with TableException

use of org.apache.flink.table.api.TableException in project flink by apache.

the class RexNodeJsonDeserializer method deserializeCatalogFunction.

private static SqlOperator deserializeCatalogFunction(JsonNode jsonNode, SqlSyntax syntax, SerdeContext serdeContext) {
    final CatalogPlanRestore restoreStrategy = serdeContext.getConfiguration().get(PLAN_RESTORE_CATALOG_OBJECTS);
    final FunctionIdentifier identifier = FunctionIdentifier.of(ObjectIdentifierJsonDeserializer.deserialize(jsonNode.required(FIELD_NAME_CATALOG_NAME).asText(), serdeContext));
    switch(restoreStrategy) {
        case ALL:
            {
                final Optional<SqlOperator> lookupOperator = lookupOptionalSqlOperator(identifier, syntax, serdeContext, false);
                if (lookupOperator.isPresent()) {
                    return lookupOperator.get();
                } else if (jsonNode.has(FIELD_NAME_CLASS)) {
                    return deserializeFunctionClass(jsonNode, serdeContext);
                }
                throw missingFunctionFromCatalog(identifier, false);
            }
        case ALL_ENFORCED:
            {
                if (jsonNode.has(FIELD_NAME_CLASS)) {
                    return deserializeFunctionClass(jsonNode, serdeContext);
                }
                final Optional<SqlOperator> lookupOperator = lookupOptionalSqlOperator(identifier, syntax, serdeContext, false);
                if (lookupOperator.map(RexNodeJsonDeserializer::isTemporary).orElse(false)) {
                    return lookupOperator.get();
                }
                throw lookupDisabled(identifier);
            }
        case IDENTIFIER:
            final Optional<SqlOperator> lookupOperator = lookupOptionalSqlOperator(identifier, syntax, serdeContext, true);
            if (lookupOperator.isPresent()) {
                return lookupOperator.get();
            } else {
                throw missingFunctionFromCatalog(identifier, true);
            }
        default:
            throw new TableException("Unsupported restore strategy: " + restoreStrategy);
    }
}
Also used : FunctionIdentifier(org.apache.flink.table.functions.FunctionIdentifier) TableException(org.apache.flink.table.api.TableException) Optional(java.util.Optional) BuiltInSqlOperator(org.apache.flink.table.planner.functions.sql.BuiltInSqlOperator) SqlOperator(org.apache.calcite.sql.SqlOperator) CatalogPlanRestore(org.apache.flink.table.api.config.TableConfigOptions.CatalogPlanRestore)

Example 87 with TableException

use of org.apache.flink.table.api.TableException in project flink by apache.

the class SortMergeJoinOperator method open.

@Override
public void open() throws Exception {
    super.open();
    Configuration conf = getContainingTask().getJobConfiguration();
    isFinished = new boolean[] { false, false };
    collector = new StreamRecordCollector<>(output);
    ClassLoader cl = getUserCodeClassloader();
    AbstractRowDataSerializer inputSerializer1 = (AbstractRowDataSerializer) getOperatorConfig().getTypeSerializerIn1(cl);
    this.serializer1 = new BinaryRowDataSerializer(inputSerializer1.getArity());
    AbstractRowDataSerializer inputSerializer2 = (AbstractRowDataSerializer) getOperatorConfig().getTypeSerializerIn2(cl);
    this.serializer2 = new BinaryRowDataSerializer(inputSerializer2.getArity());
    this.memManager = this.getContainingTask().getEnvironment().getMemoryManager();
    this.ioManager = this.getContainingTask().getEnvironment().getIOManager();
    long totalMemory = computeMemorySize();
    externalBufferMemory = (long) (totalMemory * externalBufferMemRatio);
    externalBufferMemory = Math.max(externalBufferMemory, ResettableExternalBuffer.MIN_NUM_MEMORY);
    long totalSortMem = totalMemory - (type.equals(FlinkJoinType.FULL) ? externalBufferMemory * 2 : externalBufferMemory);
    if (totalSortMem < 0) {
        throw new TableException("Memory size is too small: " + totalMemory + ", please increase manage memory of task manager.");
    }
    // sorter1
    this.sorter1 = new BinaryExternalSorter(this.getContainingTask(), memManager, totalSortMem / 2, ioManager, inputSerializer1, serializer1, computer1.newInstance(cl), comparator1.newInstance(cl), conf);
    this.sorter1.startThreads();
    // sorter2
    this.sorter2 = new BinaryExternalSorter(this.getContainingTask(), memManager, totalSortMem / 2, ioManager, inputSerializer2, serializer2, computer2.newInstance(cl), comparator2.newInstance(cl), conf);
    this.sorter2.startThreads();
    keyComparator = genKeyComparator.newInstance(cl);
    this.condFunc = condFuncCode.newInstance(cl);
    condFunc.setRuntimeContext(getRuntimeContext());
    condFunc.open(new Configuration());
    projection1 = projectionCode1.newInstance(cl);
    projection2 = projectionCode2.newInstance(cl);
    this.leftNullRow = new GenericRowData(serializer1.getArity());
    this.rightNullRow = new GenericRowData(serializer2.getArity());
    this.joinedRow = new JoinedRowData();
    condFuncCode = null;
    computer1 = null;
    comparator1 = null;
    computer2 = null;
    comparator2 = null;
    projectionCode1 = null;
    projectionCode2 = null;
    genKeyComparator = null;
    getMetricGroup().gauge("memoryUsedSizeInBytes", (Gauge<Long>) () -> sorter1.getUsedMemoryInBytes() + sorter2.getUsedMemoryInBytes());
    getMetricGroup().gauge("numSpillFiles", (Gauge<Long>) () -> sorter1.getNumSpillFiles() + sorter2.getNumSpillFiles());
    getMetricGroup().gauge("spillInBytes", (Gauge<Long>) () -> sorter1.getSpillInBytes() + sorter2.getSpillInBytes());
}
Also used : AbstractRowDataSerializer(org.apache.flink.table.runtime.typeutils.AbstractRowDataSerializer) TableException(org.apache.flink.table.api.TableException) Configuration(org.apache.flink.configuration.Configuration) JoinedRowData(org.apache.flink.table.data.utils.JoinedRowData) BinaryExternalSorter(org.apache.flink.table.runtime.operators.sort.BinaryExternalSorter) GenericRowData(org.apache.flink.table.data.GenericRowData) BinaryRowDataSerializer(org.apache.flink.table.runtime.typeutils.BinaryRowDataSerializer)

Example 88 with TableException

use of org.apache.flink.table.api.TableException in project zeppelin by apache.

the class TableEnvFactory method createJavaFlinkStreamTableEnvironment.

public TableEnvironment createJavaFlinkStreamTableEnvironment(EnvironmentSettings settings, ClassLoader classLoader) {
    try {
        ImmutablePair<Object, Object> pair = flinkShims.createPlannerAndExecutor(classLoader, settings, senv.getJavaEnv(), oldPlannerBatchTableConfig, functionCatalog, catalogManager);
        Planner planner = (Planner) pair.left;
        Executor executor = (Executor) pair.right;
        Class clazz = Class.forName("org.apache.flink.table.api.bridge.java.internal.StreamTableEnvironmentImpl");
        try {
            Constructor constructor = clazz.getConstructor(CatalogManager.class, ModuleManager.class, FunctionCatalog.class, TableConfig.class, org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.class, Planner.class, Executor.class, boolean.class);
            return (TableEnvironment) constructor.newInstance(oldPlannerCatalogManager, moduleManager, oldPlannerFunctionCatalog, oldPlannerStreamTableConfig, senv.getJavaEnv(), planner, executor, settings.isStreamingMode());
        } catch (NoSuchMethodException e) {
            // Flink 1.11.1 change the constructor signature, FLINK-18419
            Constructor constructor = clazz.getConstructor(CatalogManager.class, ModuleManager.class, FunctionCatalog.class, TableConfig.class, org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.class, Planner.class, Executor.class, boolean.class, ClassLoader.class);
            return (TableEnvironment) constructor.newInstance(oldPlannerCatalogManager, moduleManager, oldPlannerFunctionCatalog, oldPlannerStreamTableConfig, senv.getJavaEnv(), planner, executor, settings.isStreamingMode(), classLoader);
        }
    } catch (Exception e) {
        throw new TableException("Fail to createJavaFlinkStreamTableEnvironment", e);
    }
}
Also used : FunctionCatalog(org.apache.flink.table.catalog.FunctionCatalog) TableException(org.apache.flink.table.api.TableException) Constructor(java.lang.reflect.Constructor) TableEnvironment(org.apache.flink.table.api.TableEnvironment) ModuleManager(org.apache.flink.table.module.ModuleManager) CatalogManager(org.apache.flink.table.catalog.CatalogManager) TableException(org.apache.flink.table.api.TableException) Executor(org.apache.flink.table.delegation.Executor) TableConfig(org.apache.flink.table.api.TableConfig) Planner(org.apache.flink.table.delegation.Planner) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)

Example 89 with TableException

use of org.apache.flink.table.api.TableException in project zeppelin by apache.

the class TableEnvFactory method createScalaBlinkStreamTableEnvironment.

public TableEnvironment createScalaBlinkStreamTableEnvironment(EnvironmentSettings settings, ClassLoader classLoader) {
    try {
        ImmutablePair<Object, Object> pair = flinkShims.createPlannerAndExecutor(classLoader, settings, senv.getJavaEnv(), streamTableConfig, functionCatalog, catalogManager);
        Planner planner = (Planner) pair.left;
        Executor executor = (Executor) pair.right;
        Class clazz = Class.forName("org.apache.flink.table.api.bridge.scala.internal.StreamTableEnvironmentImpl");
        try {
            Constructor constructor = clazz.getConstructor(CatalogManager.class, ModuleManager.class, FunctionCatalog.class, TableConfig.class, org.apache.flink.streaming.api.scala.StreamExecutionEnvironment.class, Planner.class, Executor.class, boolean.class);
            return (TableEnvironment) constructor.newInstance(catalogManager, moduleManager, functionCatalog, streamTableConfig, senv, planner, executor, settings.isStreamingMode());
        } catch (NoSuchMethodException e) {
            // Flink 1.11.1 change the constructor signature, FLINK-18419
            Constructor constructor = clazz.getConstructor(CatalogManager.class, ModuleManager.class, FunctionCatalog.class, TableConfig.class, org.apache.flink.streaming.api.scala.StreamExecutionEnvironment.class, Planner.class, Executor.class, boolean.class, ClassLoader.class);
            return (TableEnvironment) constructor.newInstance(catalogManager, moduleManager, functionCatalog, streamTableConfig, senv, planner, executor, settings.isStreamingMode(), classLoader);
        }
    } catch (Exception e) {
        throw new TableException("Fail to createScalaBlinkStreamTableEnvironment", e);
    }
}
Also used : FunctionCatalog(org.apache.flink.table.catalog.FunctionCatalog) TableException(org.apache.flink.table.api.TableException) Constructor(java.lang.reflect.Constructor) TableEnvironment(org.apache.flink.table.api.TableEnvironment) ModuleManager(org.apache.flink.table.module.ModuleManager) CatalogManager(org.apache.flink.table.catalog.CatalogManager) TableException(org.apache.flink.table.api.TableException) Executor(org.apache.flink.table.delegation.Executor) TableConfig(org.apache.flink.table.api.TableConfig) Planner(org.apache.flink.table.delegation.Planner) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)

Example 90 with TableException

use of org.apache.flink.table.api.TableException in project flink by apache.

the class HiveDialectITCase method testShowPartitions.

@Test
public void testShowPartitions() throws Exception {
    tableEnv.executeSql("create table tbl (x int,y binary) partitioned by (dt date, country string)");
    tableEnv.executeSql("alter table tbl add partition (dt='2020-04-30',country='china') partition (dt='2020-04-30',country='us')");
    ObjectPath tablePath = new ObjectPath("default", "tbl");
    assertEquals(2, hiveCatalog.listPartitions(tablePath).size());
    List<Row> partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl").collect());
    assertEquals(2, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30/country=china"));
    assertTrue(partitions.toString().contains("dt=2020-04-30/country=us"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30')").collect());
    assertEquals(2, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30/country=china"));
    assertTrue(partitions.toString().contains("dt=2020-04-30/country=us"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (country='china')").collect());
    assertEquals(1, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30/country=china"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30',country='china')").collect());
    assertEquals(1, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30/country=china"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-05-01',country='japan')").collect());
    assertEquals(0, partitions.size());
    try {
        CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (de='2020-04-30',city='china')").collect());
    } catch (TableException e) {
        assertEquals(String.format("Could not execute SHOW PARTITIONS %s.%s PARTITION (de=2020-04-30, city=china)", hiveCatalog.getName(), tablePath), e.getMessage());
    }
    tableEnv.executeSql("alter table tbl drop partition (dt='2020-04-30',country='china'),partition (dt='2020-04-30',country='us')");
    assertEquals(0, hiveCatalog.listPartitions(tablePath).size());
    tableEnv.executeSql("drop table tbl");
    tableEnv.executeSql("create table tbl (x int,y binary) partitioned by (dt timestamp, country string)");
    tableEnv.executeSql("alter table tbl add partition (dt='2020-04-30 01:02:03',country='china') partition (dt='2020-04-30 04:05:06',country='us')");
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl").collect());
    assertEquals(2, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30 01:02:03/country=china"));
    assertTrue(partitions.toString().contains("dt=2020-04-30 04:05:06/country=us"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30 01:02:03')").collect());
    assertEquals(1, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30 01:02:03/country=china"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30 04:05:06')").collect());
    assertEquals(1, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30 04:05:06/country=us"));
    partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30 01:02:03',country='china')").collect());
    assertEquals(1, partitions.size());
    assertTrue(partitions.toString().contains("dt=2020-04-30 01:02:03/country=china"));
}
Also used : ObjectPath(org.apache.flink.table.catalog.ObjectPath) TableException(org.apache.flink.table.api.TableException) Row(org.apache.flink.types.Row) Test(org.junit.Test)

Aggregations

TableException (org.apache.flink.table.api.TableException)163 RowData (org.apache.flink.table.data.RowData)35 RowType (org.apache.flink.table.types.logical.RowType)35 Transformation (org.apache.flink.api.dag.Transformation)28 ArrayList (java.util.ArrayList)27 ExecEdge (org.apache.flink.table.planner.plan.nodes.exec.ExecEdge)24 LogicalType (org.apache.flink.table.types.logical.LogicalType)24 List (java.util.List)22 DataType (org.apache.flink.table.types.DataType)19 OneInputTransformation (org.apache.flink.streaming.api.transformations.OneInputTransformation)18 ValidationException (org.apache.flink.table.api.ValidationException)17 IOException (java.io.IOException)13 AggregateCall (org.apache.calcite.rel.core.AggregateCall)13 ValueLiteralExpression (org.apache.flink.table.expressions.ValueLiteralExpression)13 RowDataKeySelector (org.apache.flink.table.runtime.keyselector.RowDataKeySelector)13 Optional (java.util.Optional)11 Configuration (org.apache.flink.configuration.Configuration)11 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)11 Constructor (java.lang.reflect.Constructor)10 Arrays (java.util.Arrays)9