use of org.apache.flink.table.api.TableException in project flink by apache.
the class RexNodeJsonDeserializer method deserializeCatalogFunction.
private static SqlOperator deserializeCatalogFunction(JsonNode jsonNode, SqlSyntax syntax, SerdeContext serdeContext) {
final CatalogPlanRestore restoreStrategy = serdeContext.getConfiguration().get(PLAN_RESTORE_CATALOG_OBJECTS);
final FunctionIdentifier identifier = FunctionIdentifier.of(ObjectIdentifierJsonDeserializer.deserialize(jsonNode.required(FIELD_NAME_CATALOG_NAME).asText(), serdeContext));
switch(restoreStrategy) {
case ALL:
{
final Optional<SqlOperator> lookupOperator = lookupOptionalSqlOperator(identifier, syntax, serdeContext, false);
if (lookupOperator.isPresent()) {
return lookupOperator.get();
} else if (jsonNode.has(FIELD_NAME_CLASS)) {
return deserializeFunctionClass(jsonNode, serdeContext);
}
throw missingFunctionFromCatalog(identifier, false);
}
case ALL_ENFORCED:
{
if (jsonNode.has(FIELD_NAME_CLASS)) {
return deserializeFunctionClass(jsonNode, serdeContext);
}
final Optional<SqlOperator> lookupOperator = lookupOptionalSqlOperator(identifier, syntax, serdeContext, false);
if (lookupOperator.map(RexNodeJsonDeserializer::isTemporary).orElse(false)) {
return lookupOperator.get();
}
throw lookupDisabled(identifier);
}
case IDENTIFIER:
final Optional<SqlOperator> lookupOperator = lookupOptionalSqlOperator(identifier, syntax, serdeContext, true);
if (lookupOperator.isPresent()) {
return lookupOperator.get();
} else {
throw missingFunctionFromCatalog(identifier, true);
}
default:
throw new TableException("Unsupported restore strategy: " + restoreStrategy);
}
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class SortMergeJoinOperator method open.
@Override
public void open() throws Exception {
super.open();
Configuration conf = getContainingTask().getJobConfiguration();
isFinished = new boolean[] { false, false };
collector = new StreamRecordCollector<>(output);
ClassLoader cl = getUserCodeClassloader();
AbstractRowDataSerializer inputSerializer1 = (AbstractRowDataSerializer) getOperatorConfig().getTypeSerializerIn1(cl);
this.serializer1 = new BinaryRowDataSerializer(inputSerializer1.getArity());
AbstractRowDataSerializer inputSerializer2 = (AbstractRowDataSerializer) getOperatorConfig().getTypeSerializerIn2(cl);
this.serializer2 = new BinaryRowDataSerializer(inputSerializer2.getArity());
this.memManager = this.getContainingTask().getEnvironment().getMemoryManager();
this.ioManager = this.getContainingTask().getEnvironment().getIOManager();
long totalMemory = computeMemorySize();
externalBufferMemory = (long) (totalMemory * externalBufferMemRatio);
externalBufferMemory = Math.max(externalBufferMemory, ResettableExternalBuffer.MIN_NUM_MEMORY);
long totalSortMem = totalMemory - (type.equals(FlinkJoinType.FULL) ? externalBufferMemory * 2 : externalBufferMemory);
if (totalSortMem < 0) {
throw new TableException("Memory size is too small: " + totalMemory + ", please increase manage memory of task manager.");
}
// sorter1
this.sorter1 = new BinaryExternalSorter(this.getContainingTask(), memManager, totalSortMem / 2, ioManager, inputSerializer1, serializer1, computer1.newInstance(cl), comparator1.newInstance(cl), conf);
this.sorter1.startThreads();
// sorter2
this.sorter2 = new BinaryExternalSorter(this.getContainingTask(), memManager, totalSortMem / 2, ioManager, inputSerializer2, serializer2, computer2.newInstance(cl), comparator2.newInstance(cl), conf);
this.sorter2.startThreads();
keyComparator = genKeyComparator.newInstance(cl);
this.condFunc = condFuncCode.newInstance(cl);
condFunc.setRuntimeContext(getRuntimeContext());
condFunc.open(new Configuration());
projection1 = projectionCode1.newInstance(cl);
projection2 = projectionCode2.newInstance(cl);
this.leftNullRow = new GenericRowData(serializer1.getArity());
this.rightNullRow = new GenericRowData(serializer2.getArity());
this.joinedRow = new JoinedRowData();
condFuncCode = null;
computer1 = null;
comparator1 = null;
computer2 = null;
comparator2 = null;
projectionCode1 = null;
projectionCode2 = null;
genKeyComparator = null;
getMetricGroup().gauge("memoryUsedSizeInBytes", (Gauge<Long>) () -> sorter1.getUsedMemoryInBytes() + sorter2.getUsedMemoryInBytes());
getMetricGroup().gauge("numSpillFiles", (Gauge<Long>) () -> sorter1.getNumSpillFiles() + sorter2.getNumSpillFiles());
getMetricGroup().gauge("spillInBytes", (Gauge<Long>) () -> sorter1.getSpillInBytes() + sorter2.getSpillInBytes());
}
use of org.apache.flink.table.api.TableException in project zeppelin by apache.
the class TableEnvFactory method createJavaFlinkStreamTableEnvironment.
public TableEnvironment createJavaFlinkStreamTableEnvironment(EnvironmentSettings settings, ClassLoader classLoader) {
try {
ImmutablePair<Object, Object> pair = flinkShims.createPlannerAndExecutor(classLoader, settings, senv.getJavaEnv(), oldPlannerBatchTableConfig, functionCatalog, catalogManager);
Planner planner = (Planner) pair.left;
Executor executor = (Executor) pair.right;
Class clazz = Class.forName("org.apache.flink.table.api.bridge.java.internal.StreamTableEnvironmentImpl");
try {
Constructor constructor = clazz.getConstructor(CatalogManager.class, ModuleManager.class, FunctionCatalog.class, TableConfig.class, org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.class, Planner.class, Executor.class, boolean.class);
return (TableEnvironment) constructor.newInstance(oldPlannerCatalogManager, moduleManager, oldPlannerFunctionCatalog, oldPlannerStreamTableConfig, senv.getJavaEnv(), planner, executor, settings.isStreamingMode());
} catch (NoSuchMethodException e) {
// Flink 1.11.1 change the constructor signature, FLINK-18419
Constructor constructor = clazz.getConstructor(CatalogManager.class, ModuleManager.class, FunctionCatalog.class, TableConfig.class, org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.class, Planner.class, Executor.class, boolean.class, ClassLoader.class);
return (TableEnvironment) constructor.newInstance(oldPlannerCatalogManager, moduleManager, oldPlannerFunctionCatalog, oldPlannerStreamTableConfig, senv.getJavaEnv(), planner, executor, settings.isStreamingMode(), classLoader);
}
} catch (Exception e) {
throw new TableException("Fail to createJavaFlinkStreamTableEnvironment", e);
}
}
use of org.apache.flink.table.api.TableException in project zeppelin by apache.
the class TableEnvFactory method createScalaBlinkStreamTableEnvironment.
public TableEnvironment createScalaBlinkStreamTableEnvironment(EnvironmentSettings settings, ClassLoader classLoader) {
try {
ImmutablePair<Object, Object> pair = flinkShims.createPlannerAndExecutor(classLoader, settings, senv.getJavaEnv(), streamTableConfig, functionCatalog, catalogManager);
Planner planner = (Planner) pair.left;
Executor executor = (Executor) pair.right;
Class clazz = Class.forName("org.apache.flink.table.api.bridge.scala.internal.StreamTableEnvironmentImpl");
try {
Constructor constructor = clazz.getConstructor(CatalogManager.class, ModuleManager.class, FunctionCatalog.class, TableConfig.class, org.apache.flink.streaming.api.scala.StreamExecutionEnvironment.class, Planner.class, Executor.class, boolean.class);
return (TableEnvironment) constructor.newInstance(catalogManager, moduleManager, functionCatalog, streamTableConfig, senv, planner, executor, settings.isStreamingMode());
} catch (NoSuchMethodException e) {
// Flink 1.11.1 change the constructor signature, FLINK-18419
Constructor constructor = clazz.getConstructor(CatalogManager.class, ModuleManager.class, FunctionCatalog.class, TableConfig.class, org.apache.flink.streaming.api.scala.StreamExecutionEnvironment.class, Planner.class, Executor.class, boolean.class, ClassLoader.class);
return (TableEnvironment) constructor.newInstance(catalogManager, moduleManager, functionCatalog, streamTableConfig, senv, planner, executor, settings.isStreamingMode(), classLoader);
}
} catch (Exception e) {
throw new TableException("Fail to createScalaBlinkStreamTableEnvironment", e);
}
}
use of org.apache.flink.table.api.TableException in project flink by apache.
the class HiveDialectITCase method testShowPartitions.
@Test
public void testShowPartitions() throws Exception {
tableEnv.executeSql("create table tbl (x int,y binary) partitioned by (dt date, country string)");
tableEnv.executeSql("alter table tbl add partition (dt='2020-04-30',country='china') partition (dt='2020-04-30',country='us')");
ObjectPath tablePath = new ObjectPath("default", "tbl");
assertEquals(2, hiveCatalog.listPartitions(tablePath).size());
List<Row> partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl").collect());
assertEquals(2, partitions.size());
assertTrue(partitions.toString().contains("dt=2020-04-30/country=china"));
assertTrue(partitions.toString().contains("dt=2020-04-30/country=us"));
partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30')").collect());
assertEquals(2, partitions.size());
assertTrue(partitions.toString().contains("dt=2020-04-30/country=china"));
assertTrue(partitions.toString().contains("dt=2020-04-30/country=us"));
partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (country='china')").collect());
assertEquals(1, partitions.size());
assertTrue(partitions.toString().contains("dt=2020-04-30/country=china"));
partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30',country='china')").collect());
assertEquals(1, partitions.size());
assertTrue(partitions.toString().contains("dt=2020-04-30/country=china"));
partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-05-01',country='japan')").collect());
assertEquals(0, partitions.size());
try {
CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (de='2020-04-30',city='china')").collect());
} catch (TableException e) {
assertEquals(String.format("Could not execute SHOW PARTITIONS %s.%s PARTITION (de=2020-04-30, city=china)", hiveCatalog.getName(), tablePath), e.getMessage());
}
tableEnv.executeSql("alter table tbl drop partition (dt='2020-04-30',country='china'),partition (dt='2020-04-30',country='us')");
assertEquals(0, hiveCatalog.listPartitions(tablePath).size());
tableEnv.executeSql("drop table tbl");
tableEnv.executeSql("create table tbl (x int,y binary) partitioned by (dt timestamp, country string)");
tableEnv.executeSql("alter table tbl add partition (dt='2020-04-30 01:02:03',country='china') partition (dt='2020-04-30 04:05:06',country='us')");
partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl").collect());
assertEquals(2, partitions.size());
assertTrue(partitions.toString().contains("dt=2020-04-30 01:02:03/country=china"));
assertTrue(partitions.toString().contains("dt=2020-04-30 04:05:06/country=us"));
partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30 01:02:03')").collect());
assertEquals(1, partitions.size());
assertTrue(partitions.toString().contains("dt=2020-04-30 01:02:03/country=china"));
partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30 04:05:06')").collect());
assertEquals(1, partitions.size());
assertTrue(partitions.toString().contains("dt=2020-04-30 04:05:06/country=us"));
partitions = CollectionUtil.iteratorToList(tableEnv.executeSql("show partitions tbl partition (dt='2020-04-30 01:02:03',country='china')").collect());
assertEquals(1, partitions.size());
assertTrue(partitions.toString().contains("dt=2020-04-30 01:02:03/country=china"));
}
Aggregations