Search in sources :

Example 1 with OutputFileConfig

use of org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig in project flink by apache.

the class FileSystemTableSink method createStreamingSink.

private DataStreamSink<?> createStreamingSink(ProviderContext providerContext, DataStream<RowData> dataStream, Context sinkContext, final int parallelism) {
    FileSystemFactory fsFactory = FileSystem::get;
    RowDataPartitionComputer computer = partitionComputer();
    boolean autoCompaction = tableOptions.getBoolean(AUTO_COMPACTION);
    Object writer = createWriter(sinkContext);
    boolean isEncoder = writer instanceof Encoder;
    TableBucketAssigner assigner = new TableBucketAssigner(computer);
    TableRollingPolicy rollingPolicy = new TableRollingPolicy(!isEncoder || autoCompaction, tableOptions.get(SINK_ROLLING_POLICY_FILE_SIZE).getBytes(), tableOptions.get(SINK_ROLLING_POLICY_ROLLOVER_INTERVAL).toMillis(), tableOptions.get(SINK_ROLLING_POLICY_INACTIVITY_INTERVAL).toMillis());
    String randomPrefix = "part-" + UUID.randomUUID().toString();
    OutputFileConfig.OutputFileConfigBuilder fileNamingBuilder = OutputFileConfig.builder();
    fileNamingBuilder = autoCompaction ? fileNamingBuilder.withPartPrefix(convertToUncompacted(randomPrefix)) : fileNamingBuilder.withPartPrefix(randomPrefix);
    OutputFileConfig fileNamingConfig = fileNamingBuilder.build();
    BucketsBuilder<RowData, String, ? extends BucketsBuilder<RowData, ?, ?>> bucketsBuilder;
    if (isEncoder) {
        // noinspection unchecked
        bucketsBuilder = StreamingFileSink.forRowFormat(path, new ProjectionEncoder((Encoder<RowData>) writer, computer)).withBucketAssigner(assigner).withOutputFileConfig(fileNamingConfig).withRollingPolicy(rollingPolicy);
    } else {
        // noinspection unchecked
        bucketsBuilder = StreamingFileSink.forBulkFormat(path, new ProjectionBulkFactory((BulkWriter.Factory<RowData>) writer, computer)).withBucketAssigner(assigner).withOutputFileConfig(fileNamingConfig).withRollingPolicy(rollingPolicy);
    }
    long bucketCheckInterval = tableOptions.get(SINK_ROLLING_POLICY_CHECK_INTERVAL).toMillis();
    DataStream<PartitionCommitInfo> writerStream;
    if (autoCompaction) {
        long compactionSize = tableOptions.getOptional(COMPACTION_FILE_SIZE).orElse(tableOptions.get(SINK_ROLLING_POLICY_FILE_SIZE)).getBytes();
        CompactReader.Factory<RowData> reader = createCompactReaderFactory(sinkContext).orElseThrow(() -> new TableException("Please implement available reader for compaction:" + " BulkFormat, FileInputFormat."));
        writerStream = StreamingSink.compactionWriter(providerContext, dataStream, bucketCheckInterval, bucketsBuilder, fsFactory, path, reader, compactionSize, parallelism);
    } else {
        writerStream = StreamingSink.writer(providerContext, dataStream, bucketCheckInterval, bucketsBuilder, parallelism, partitionKeys, tableOptions);
    }
    return StreamingSink.sink(providerContext, writerStream, path, tableIdentifier, partitionKeys, new EmptyMetaStoreFactory(path), fsFactory, tableOptions);
}
Also used : TableException(org.apache.flink.table.api.TableException) PartitionCommitInfo(org.apache.flink.connector.file.table.stream.PartitionCommitInfo) OutputFileConfig(org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig) RowData(org.apache.flink.table.data.RowData) CompactReader(org.apache.flink.connector.file.table.stream.compact.CompactReader) Encoder(org.apache.flink.api.common.serialization.Encoder)

Example 2 with OutputFileConfig

use of org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig in project flink by apache.

the class HiveTableSink method createStreamSink.

private DataStreamSink<?> createStreamSink(ProviderContext providerContext, DataStream<RowData> dataStream, StorageDescriptor sd, Properties tableProps, HiveWriterFactory recordWriterFactory, OutputFileConfig.OutputFileConfigBuilder fileNamingBuilder, final int parallelism) {
    org.apache.flink.configuration.Configuration conf = new org.apache.flink.configuration.Configuration();
    catalogTable.getOptions().forEach(conf::setString);
    String commitPolicies = conf.getString(FileSystemConnectorOptions.SINK_PARTITION_COMMIT_POLICY_KIND);
    if (!getPartitionKeys().isEmpty() && StringUtils.isNullOrWhitespaceOnly(commitPolicies)) {
        throw new FlinkHiveException(String.format("Streaming write to partitioned hive table %s without providing a commit policy. " + "Make sure to set a proper value for %s", identifier, FileSystemConnectorOptions.SINK_PARTITION_COMMIT_POLICY_KIND.key()));
    }
    HiveRowDataPartitionComputer partComputer = new HiveRowDataPartitionComputer(hiveShim, JobConfUtils.getDefaultPartitionName(jobConf), tableSchema.getFieldNames(), tableSchema.getFieldDataTypes(), getPartitionKeyArray());
    TableBucketAssigner assigner = new TableBucketAssigner(partComputer);
    HiveRollingPolicy rollingPolicy = new HiveRollingPolicy(conf.get(SINK_ROLLING_POLICY_FILE_SIZE).getBytes(), conf.get(SINK_ROLLING_POLICY_ROLLOVER_INTERVAL).toMillis(), conf.get(SINK_ROLLING_POLICY_INACTIVITY_INTERVAL).toMillis());
    boolean autoCompaction = conf.getBoolean(FileSystemConnectorOptions.AUTO_COMPACTION);
    if (autoCompaction) {
        fileNamingBuilder.withPartPrefix(convertToUncompacted(fileNamingBuilder.build().getPartPrefix()));
    }
    OutputFileConfig outputFileConfig = fileNamingBuilder.build();
    org.apache.flink.core.fs.Path path = new org.apache.flink.core.fs.Path(sd.getLocation());
    BucketsBuilder<RowData, String, ? extends BucketsBuilder<RowData, ?, ?>> builder;
    if (flinkConf.get(HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_WRITER)) {
        builder = bucketsBuilderForMRWriter(recordWriterFactory, sd, assigner, rollingPolicy, outputFileConfig);
        LOG.info("Hive streaming sink: Use MapReduce RecordWriter writer.");
    } else {
        Optional<BulkWriter.Factory<RowData>> bulkFactory = createBulkWriterFactory(getPartitionKeyArray(), sd);
        if (bulkFactory.isPresent()) {
            builder = StreamingFileSink.forBulkFormat(path, new FileSystemTableSink.ProjectionBulkFactory(bulkFactory.get(), partComputer)).withBucketAssigner(assigner).withRollingPolicy(rollingPolicy).withOutputFileConfig(outputFileConfig);
            LOG.info("Hive streaming sink: Use native parquet&orc writer.");
        } else {
            builder = bucketsBuilderForMRWriter(recordWriterFactory, sd, assigner, rollingPolicy, outputFileConfig);
            LOG.info("Hive streaming sink: Use MapReduce RecordWriter writer because BulkWriter Factory not available.");
        }
    }
    long bucketCheckInterval = conf.get(SINK_ROLLING_POLICY_CHECK_INTERVAL).toMillis();
    DataStream<PartitionCommitInfo> writerStream;
    if (autoCompaction) {
        long compactionSize = conf.getOptional(FileSystemConnectorOptions.COMPACTION_FILE_SIZE).orElse(conf.get(SINK_ROLLING_POLICY_FILE_SIZE)).getBytes();
        writerStream = StreamingSink.compactionWriter(providerContext, dataStream, bucketCheckInterval, builder, fsFactory(), path, createCompactReaderFactory(sd, tableProps), compactionSize, parallelism);
    } else {
        writerStream = StreamingSink.writer(providerContext, dataStream, bucketCheckInterval, builder, parallelism, getPartitionKeys(), conf);
    }
    return StreamingSink.sink(providerContext, writerStream, path, identifier, getPartitionKeys(), msFactory(), fsFactory(), conf);
}
Also used : TableBucketAssigner(org.apache.flink.connector.file.table.FileSystemTableSink.TableBucketAssigner) Configuration(org.apache.hadoop.conf.Configuration) ThreadLocalClassLoaderConfiguration(org.apache.flink.orc.writer.ThreadLocalClassLoaderConfiguration) HiveMetastoreClientFactory(org.apache.flink.table.catalog.hive.client.HiveMetastoreClientFactory) HiveBulkWriterFactory(org.apache.flink.connectors.hive.write.HiveBulkWriterFactory) LoggerFactory(org.slf4j.LoggerFactory) HiveWriterFactory(org.apache.flink.connectors.hive.write.HiveWriterFactory) HiveCompactReaderFactory(org.apache.flink.connectors.hive.read.HiveCompactReaderFactory) HiveOutputFormatFactory(org.apache.flink.connectors.hive.write.HiveOutputFormatFactory) OutputFileConfig(org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig) RowData(org.apache.flink.table.data.RowData) Path(org.apache.hadoop.fs.Path) PartitionCommitInfo(org.apache.flink.connector.file.table.stream.PartitionCommitInfo) FileSystemTableSink(org.apache.flink.connector.file.table.FileSystemTableSink)

Example 3 with OutputFileConfig

use of org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig in project flink by apache.

the class HiveTableSink method consume.

private DataStreamSink<?> consume(ProviderContext providerContext, DataStream<RowData> dataStream, boolean isBounded, DataStructureConverter converter) {
    checkAcidTable(catalogTable.getOptions(), identifier.toObjectPath());
    try (HiveMetastoreClientWrapper client = HiveMetastoreClientFactory.create(HiveConfUtils.create(jobConf), hiveVersion)) {
        Table table = client.getTable(identifier.getDatabaseName(), identifier.getObjectName());
        StorageDescriptor sd = table.getSd();
        Class hiveOutputFormatClz = hiveShim.getHiveOutputFormatClass(Class.forName(sd.getOutputFormat()));
        boolean isCompressed = jobConf.getBoolean(HiveConf.ConfVars.COMPRESSRESULT.varname, false);
        HiveWriterFactory writerFactory = new HiveWriterFactory(jobConf, hiveOutputFormatClz, sd.getSerdeInfo(), tableSchema, getPartitionKeyArray(), HiveReflectionUtils.getTableMetadata(hiveShim, table), hiveShim, isCompressed);
        String extension = Utilities.getFileExtension(jobConf, isCompressed, (HiveOutputFormat<?, ?>) hiveOutputFormatClz.newInstance());
        OutputFileConfig.OutputFileConfigBuilder fileNamingBuilder = OutputFileConfig.builder().withPartPrefix("part-" + UUID.randomUUID().toString()).withPartSuffix(extension == null ? "" : extension);
        final int parallelism = Optional.ofNullable(configuredParallelism).orElse(dataStream.getParallelism());
        if (isBounded) {
            OutputFileConfig fileNaming = fileNamingBuilder.build();
            return createBatchSink(dataStream, converter, sd, writerFactory, fileNaming, parallelism);
        } else {
            if (overwrite) {
                throw new IllegalStateException("Streaming mode not support overwrite.");
            }
            Properties tableProps = HiveReflectionUtils.getTableMetadata(hiveShim, table);
            return createStreamSink(providerContext, dataStream, sd, tableProps, writerFactory, fileNamingBuilder, parallelism);
        }
    } catch (TException e) {
        throw new CatalogException("Failed to query Hive metaStore", e);
    } catch (IOException e) {
        throw new FlinkRuntimeException("Failed to create staging dir", e);
    } catch (ClassNotFoundException e) {
        throw new FlinkHiveException("Failed to get output format class", e);
    } catch (IllegalAccessException | InstantiationException e) {
        throw new FlinkHiveException("Failed to instantiate output format instance", e);
    }
}
Also used : TException(org.apache.thrift.TException) CatalogTable(org.apache.flink.table.catalog.CatalogTable) Table(org.apache.hadoop.hive.metastore.api.Table) HiveTableUtil.checkAcidTable(org.apache.flink.table.catalog.hive.util.HiveTableUtil.checkAcidTable) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) CatalogException(org.apache.flink.table.catalog.exceptions.CatalogException) UncheckedIOException(java.io.UncheckedIOException) IOException(java.io.IOException) Properties(java.util.Properties) OutputFileConfig(org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig) HiveMetastoreClientWrapper(org.apache.flink.table.catalog.hive.client.HiveMetastoreClientWrapper) FlinkRuntimeException(org.apache.flink.util.FlinkRuntimeException) HiveWriterFactory(org.apache.flink.connectors.hive.write.HiveWriterFactory)

Example 4 with OutputFileConfig

use of org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig in project flink by apache.

the class HiveTableSink method createBatchSink.

private DataStreamSink<Row> createBatchSink(DataStream<RowData> dataStream, DataStructureConverter converter, StorageDescriptor sd, HiveWriterFactory recordWriterFactory, OutputFileConfig fileNaming, final int parallelism) throws IOException {
    FileSystemOutputFormat.Builder<Row> builder = new FileSystemOutputFormat.Builder<>();
    builder.setPartitionComputer(new HiveRowPartitionComputer(hiveShim, JobConfUtils.getDefaultPartitionName(jobConf), tableSchema.getFieldNames(), tableSchema.getFieldDataTypes(), getPartitionKeyArray()));
    builder.setDynamicGrouped(dynamicGrouping);
    builder.setPartitionColumns(getPartitionKeyArray());
    builder.setFileSystemFactory(fsFactory());
    builder.setFormatFactory(new HiveOutputFormatFactory(recordWriterFactory));
    builder.setMetaStoreFactory(msFactory());
    builder.setOverwrite(overwrite);
    builder.setStaticPartitions(staticPartitionSpec);
    builder.setTempPath(new org.apache.flink.core.fs.Path(toStagingDir(sd.getLocation(), jobConf)));
    builder.setOutputFileConfig(fileNaming);
    return dataStream.map((MapFunction<RowData, Row>) value -> (Row) converter.toExternal(value)).writeUsingOutputFormat(builder.build()).setParallelism(parallelism);
}
Also used : FileSystem(org.apache.hadoop.fs.FileSystem) HiveMetastoreClientFactory(org.apache.flink.table.catalog.hive.client.HiveMetastoreClientFactory) HiveBulkWriterFactory(org.apache.flink.connectors.hive.write.HiveBulkWriterFactory) CatalogTable(org.apache.flink.table.catalog.CatalogTable) LoggerFactory(org.slf4j.LoggerFactory) JobConfUtils(org.apache.flink.connectors.hive.util.JobConfUtils) MapFunction(org.apache.flink.api.common.functions.MapFunction) OrcSplitReaderUtil(org.apache.flink.orc.OrcSplitReaderUtil) PartitionCommitInfo(org.apache.flink.connector.file.table.stream.PartitionCommitInfo) SupportsPartitioning(org.apache.flink.table.connector.sink.abilities.SupportsPartitioning) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) SINK_ROLLING_POLICY_CHECK_INTERVAL(org.apache.flink.connector.file.table.FileSystemConnectorOptions.SINK_ROLLING_POLICY_CHECK_INTERVAL) StreamingFileSink(org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink) Path(org.apache.hadoop.fs.Path) HiveWriterFactory(org.apache.flink.connectors.hive.write.HiveWriterFactory) PartFileInfo(org.apache.flink.streaming.api.functions.sink.filesystem.PartFileInfo) CheckpointRollingPolicy(org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.CheckpointRollingPolicy) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) HiveShimLoader(org.apache.flink.table.catalog.hive.client.HiveShimLoader) HiveCatalogFactoryOptions(org.apache.flink.table.catalog.hive.factories.HiveCatalogFactoryOptions) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) SINK_ROLLING_POLICY_ROLLOVER_INTERVAL(org.apache.flink.connector.file.table.FileSystemConnectorOptions.SINK_ROLLING_POLICY_ROLLOVER_INTERVAL) TableSchema(org.apache.flink.table.api.TableSchema) CompactOperator.convertToUncompacted(org.apache.flink.connector.file.table.stream.compact.CompactOperator.convertToUncompacted) UUID(java.util.UUID) HiveOutputFormat(org.apache.hadoop.hive.ql.io.HiveOutputFormat) Preconditions(org.apache.flink.util.Preconditions) StringUtils(org.apache.flink.util.StringUtils) UncheckedIOException(java.io.UncheckedIOException) List(java.util.List) HiveReflectionUtils(org.apache.flink.table.catalog.hive.util.HiveReflectionUtils) LogicalType(org.apache.flink.table.types.logical.LogicalType) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) Optional(java.util.Optional) Row(org.apache.flink.types.Row) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier) ChangelogMode(org.apache.flink.table.connector.ChangelogMode) RowType(org.apache.flink.table.types.logical.RowType) HiveShim(org.apache.flink.table.catalog.hive.client.HiveShim) ParquetRowDataBuilder(org.apache.flink.formats.parquet.row.ParquetRowDataBuilder) BucketsBuilder(org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink.BucketsBuilder) Utilities(org.apache.hadoop.hive.ql.exec.Utilities) LinkedHashMap(java.util.LinkedHashMap) ReadableConfig(org.apache.flink.configuration.ReadableConfig) ThreadLocalClassLoaderConfiguration(org.apache.flink.orc.writer.ThreadLocalClassLoaderConfiguration) FileSystemConnectorOptions(org.apache.flink.connector.file.table.FileSystemConnectorOptions) SINK_ROLLING_POLICY_INACTIVITY_INTERVAL(org.apache.flink.connector.file.table.FileSystemConnectorOptions.SINK_ROLLING_POLICY_INACTIVITY_INTERVAL) SupportsOverwrite(org.apache.flink.table.connector.sink.abilities.SupportsOverwrite) HiveMetastoreClientWrapper(org.apache.flink.table.catalog.hive.client.HiveMetastoreClientWrapper) Nullable(javax.annotation.Nullable) StreamingSink(org.apache.flink.connector.file.table.stream.StreamingSink) DataStreamSink(org.apache.flink.streaming.api.datastream.DataStreamSink) HiveConfUtils(org.apache.flink.connectors.hive.util.HiveConfUtils) HiveCompactReaderFactory(org.apache.flink.connectors.hive.read.HiveCompactReaderFactory) RowData(org.apache.flink.table.data.RowData) Logger(org.slf4j.Logger) Properties(java.util.Properties) ProviderContext(org.apache.flink.table.connector.ProviderContext) FlinkRuntimeException(org.apache.flink.util.FlinkRuntimeException) BulkWriter(org.apache.flink.api.common.serialization.BulkWriter) HiveConf(org.apache.hadoop.hive.conf.HiveConf) HiveOutputFormatFactory(org.apache.flink.connectors.hive.write.HiveOutputFormatFactory) TypeDescription(org.apache.orc.TypeDescription) TException(org.apache.thrift.TException) IOException(java.io.IOException) HadoopPathBasedBulkFormatBuilder(org.apache.flink.streaming.api.functions.sink.filesystem.HadoopPathBasedBulkFormatBuilder) Table(org.apache.hadoop.hive.metastore.api.Table) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting) DataStream(org.apache.flink.streaming.api.datastream.DataStream) JobConf(org.apache.hadoop.mapred.JobConf) TableBucketAssigner(org.apache.flink.connector.file.table.FileSystemTableSink.TableBucketAssigner) CompactReader(org.apache.flink.connector.file.table.stream.compact.CompactReader) OutputFileConfig(org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig) FileSystemTableSink(org.apache.flink.connector.file.table.FileSystemTableSink) TableSchemaUtils(org.apache.flink.table.utils.TableSchemaUtils) FileSystemOutputFormat(org.apache.flink.connector.file.table.FileSystemOutputFormat) CatalogException(org.apache.flink.table.catalog.exceptions.CatalogException) SINK_ROLLING_POLICY_FILE_SIZE(org.apache.flink.connector.file.table.FileSystemConnectorOptions.SINK_ROLLING_POLICY_FILE_SIZE) HiveTableUtil.checkAcidTable(org.apache.flink.table.catalog.hive.util.HiveTableUtil.checkAcidTable) RowData(org.apache.flink.table.data.RowData) FileSystemOutputFormat(org.apache.flink.connector.file.table.FileSystemOutputFormat) ParquetRowDataBuilder(org.apache.flink.formats.parquet.row.ParquetRowDataBuilder) BucketsBuilder(org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink.BucketsBuilder) HadoopPathBasedBulkFormatBuilder(org.apache.flink.streaming.api.functions.sink.filesystem.HadoopPathBasedBulkFormatBuilder) Row(org.apache.flink.types.Row) HiveOutputFormatFactory(org.apache.flink.connectors.hive.write.HiveOutputFormatFactory)

Example 5 with OutputFileConfig

use of org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig in project flink by apache.

the class FileWriterTest method testCorrectTimestampPassingInContext.

private void testCorrectTimestampPassingInContext(Long timestamp, long watermark, long processingTime) throws Exception {
    final File outDir = TEMP_FOLDER.newFolder();
    final Path path = new Path(outDir.toURI());
    // Create the processing timer service starts from 10.
    ManuallyTriggeredProcessingTimeService processingTimeService = new ManuallyTriggeredProcessingTimeService();
    processingTimeService.advanceTo(processingTime);
    FileWriter<String> fileWriter = createWriter(path, new VerifyingBucketAssigner(timestamp, watermark, processingTime), DefaultRollingPolicy.builder().withRolloverInterval(Duration.ofMillis(10)).build(), new OutputFileConfig("part-", ""), processingTimeService, 5);
    fileWriter.initializeState(Collections.emptyList());
    fileWriter.write("test", new ContextImpl(watermark, timestamp));
}
Also used : Path(org.apache.flink.core.fs.Path) OutputFileConfig(org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig) File(java.io.File)

Aggregations

OutputFileConfig (org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig)11 File (java.io.File)7 Path (org.apache.flink.core.fs.Path)7 Test (org.junit.Test)6 PartitionCommitInfo (org.apache.flink.connector.file.table.stream.PartitionCommitInfo)3 HiveWriterFactory (org.apache.flink.connectors.hive.write.HiveWriterFactory)3 RowData (org.apache.flink.table.data.RowData)3 IOException (java.io.IOException)2 UncheckedIOException (java.io.UncheckedIOException)2 Properties (java.util.Properties)2 FileSystemTableSink (org.apache.flink.connector.file.table.FileSystemTableSink)2 TableBucketAssigner (org.apache.flink.connector.file.table.FileSystemTableSink.TableBucketAssigner)2 CompactReader (org.apache.flink.connector.file.table.stream.compact.CompactReader)2 HiveCompactReaderFactory (org.apache.flink.connectors.hive.read.HiveCompactReaderFactory)2 HiveBulkWriterFactory (org.apache.flink.connectors.hive.write.HiveBulkWriterFactory)2 HiveOutputFormatFactory (org.apache.flink.connectors.hive.write.HiveOutputFormatFactory)2 ThreadLocalClassLoaderConfiguration (org.apache.flink.orc.writer.ThreadLocalClassLoaderConfiguration)2 CatalogTable (org.apache.flink.table.catalog.CatalogTable)2 CatalogException (org.apache.flink.table.catalog.exceptions.CatalogException)2 HiveMetastoreClientFactory (org.apache.flink.table.catalog.hive.client.HiveMetastoreClientFactory)2