use of org.apache.beam.vendor.guava.v26_0_jre.com.google.common.net.HostAndPort in project beam by apache.
the class ServerFactoryTest method testCreatingEpollServer.
@Test
public void testCreatingEpollServer() throws Exception {
assumeTrue(Epoll.isAvailable());
// tcnative only supports the ipv4 address family
assumeTrue(InetAddress.getLoopbackAddress() instanceof Inet4Address);
Endpoints.ApiServiceDescriptor apiServiceDescriptor = runTestUsing(ServerFactory.createEpollSocket(), ManagedChannelFactory.createEpoll());
HostAndPort hostAndPort = HostAndPort.fromString(apiServiceDescriptor.getUrl());
assertThat(hostAndPort.getHost(), anyOf(equalTo(InetAddress.getLoopbackAddress().getHostName()), equalTo(InetAddress.getLoopbackAddress().getHostAddress())));
assertThat(hostAndPort.getPort(), allOf(greaterThan(0), lessThan(65536)));
}
use of org.apache.beam.vendor.guava.v26_0_jre.com.google.common.net.HostAndPort in project beam by apache.
the class ServerFactoryTest method defaultServerWorks.
@Test
public void defaultServerWorks() throws Exception {
Endpoints.ApiServiceDescriptor apiServiceDescriptor = runTestUsing(ServerFactory.createDefault(), ManagedChannelFactory.createDefault());
HostAndPort hostAndPort = HostAndPort.fromString(apiServiceDescriptor.getUrl());
assertThat(hostAndPort.getHost(), anyOf(equalTo(InetAddress.getLoopbackAddress().getHostName()), equalTo(InetAddress.getLoopbackAddress().getHostAddress())));
assertThat(hostAndPort.getPort(), allOf(greaterThan(0), lessThan(65536)));
}
use of org.apache.beam.vendor.guava.v26_0_jre.com.google.common.net.HostAndPort in project beam by apache.
the class FlinkExecutionEnvironments method createStreamExecutionEnvironment.
@VisibleForTesting
static StreamExecutionEnvironment createStreamExecutionEnvironment(FlinkPipelineOptions options, List<String> filesToStage, @Nullable String confDir) {
LOG.info("Creating a Streaming Environment.");
// Although Flink uses Rest, it expects the address not to contain a http scheme
String masterUrl = stripHttpSchema(options.getFlinkMaster());
Configuration flinkConfiguration = getFlinkConfiguration(confDir);
StreamExecutionEnvironment flinkStreamEnv;
// depending on the master, create the right environment.
if ("[local]".equals(masterUrl)) {
setManagedMemoryByFraction(flinkConfiguration);
disableClassLoaderLeakCheck(flinkConfiguration);
flinkStreamEnv = StreamExecutionEnvironment.createLocalEnvironment(getDefaultLocalParallelism(), flinkConfiguration);
} else if ("[auto]".equals(masterUrl)) {
flinkStreamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
if (flinkStreamEnv instanceof LocalStreamEnvironment) {
disableClassLoaderLeakCheck(flinkConfiguration);
flinkStreamEnv = StreamExecutionEnvironment.createLocalEnvironment(getDefaultLocalParallelism(), flinkConfiguration);
}
} else {
int defaultPort = flinkConfiguration.getInteger(RestOptions.PORT);
HostAndPort hostAndPort = HostAndPort.fromString(masterUrl).withDefaultPort(defaultPort);
flinkConfiguration.setInteger(RestOptions.PORT, hostAndPort.getPort());
final SavepointRestoreSettings savepointRestoreSettings;
if (options.getSavepointPath() != null) {
savepointRestoreSettings = SavepointRestoreSettings.forPath(options.getSavepointPath(), options.getAllowNonRestoredState());
} else {
savepointRestoreSettings = SavepointRestoreSettings.none();
}
flinkStreamEnv = new RemoteStreamEnvironment(hostAndPort.getHost(), hostAndPort.getPort(), flinkConfiguration, filesToStage.toArray(new String[filesToStage.size()]), null, savepointRestoreSettings);
LOG.info("Using Flink Master URL {}:{}.", hostAndPort.getHost(), hostAndPort.getPort());
}
// Set the parallelism, required by UnboundedSourceWrapper to generate consistent splits.
final int parallelism = determineParallelism(options.getParallelism(), flinkStreamEnv.getParallelism(), flinkConfiguration);
flinkStreamEnv.setParallelism(parallelism);
if (options.getMaxParallelism() > 0) {
flinkStreamEnv.setMaxParallelism(options.getMaxParallelism());
}
// set parallelism in the options (required by some execution code)
options.setParallelism(parallelism);
if (options.getObjectReuse()) {
flinkStreamEnv.getConfig().enableObjectReuse();
} else {
flinkStreamEnv.getConfig().disableObjectReuse();
}
// default to event time
flinkStreamEnv.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
// for the following 2 parameters, a value of -1 means that Flink will use
// the default values as specified in the configuration.
int numRetries = options.getNumberOfExecutionRetries();
if (numRetries != -1) {
flinkStreamEnv.setNumberOfExecutionRetries(numRetries);
}
long retryDelay = options.getExecutionRetryDelay();
if (retryDelay != -1) {
flinkStreamEnv.getConfig().setExecutionRetryDelay(retryDelay);
}
configureCheckpointing(options, flinkStreamEnv);
applyLatencyTrackingInterval(flinkStreamEnv.getConfig(), options);
if (options.getAutoWatermarkInterval() != null) {
flinkStreamEnv.getConfig().setAutoWatermarkInterval(options.getAutoWatermarkInterval());
}
configureStateBackend(options, flinkStreamEnv);
return flinkStreamEnv;
}
use of org.apache.beam.vendor.guava.v26_0_jre.com.google.common.net.HostAndPort in project beam by apache.
the class FlinkExecutionEnvironments method createBatchExecutionEnvironment.
static ExecutionEnvironment createBatchExecutionEnvironment(FlinkPipelineOptions options, List<String> filesToStage, @Nullable String confDir) {
LOG.info("Creating a Batch Execution Environment.");
// Although Flink uses Rest, it expects the address not to contain a http scheme
String flinkMasterHostPort = stripHttpSchema(options.getFlinkMaster());
Configuration flinkConfiguration = getFlinkConfiguration(confDir);
ExecutionEnvironment flinkBatchEnv;
// depending on the master, create the right environment.
if ("[local]".equals(flinkMasterHostPort)) {
setManagedMemoryByFraction(flinkConfiguration);
disableClassLoaderLeakCheck(flinkConfiguration);
flinkBatchEnv = ExecutionEnvironment.createLocalEnvironment(flinkConfiguration);
} else if ("[collection]".equals(flinkMasterHostPort)) {
flinkBatchEnv = new CollectionEnvironment();
} else if ("[auto]".equals(flinkMasterHostPort)) {
flinkBatchEnv = ExecutionEnvironment.getExecutionEnvironment();
if (flinkBatchEnv instanceof LocalEnvironment) {
disableClassLoaderLeakCheck(flinkConfiguration);
flinkBatchEnv = ExecutionEnvironment.createLocalEnvironment(flinkConfiguration);
flinkBatchEnv.setParallelism(getDefaultLocalParallelism());
}
} else {
int defaultPort = flinkConfiguration.getInteger(RestOptions.PORT);
HostAndPort hostAndPort = HostAndPort.fromString(flinkMasterHostPort).withDefaultPort(defaultPort);
flinkConfiguration.setInteger(RestOptions.PORT, hostAndPort.getPort());
flinkBatchEnv = ExecutionEnvironment.createRemoteEnvironment(hostAndPort.getHost(), hostAndPort.getPort(), flinkConfiguration, filesToStage.toArray(new String[filesToStage.size()]));
LOG.info("Using Flink Master URL {}:{}.", hostAndPort.getHost(), hostAndPort.getPort());
}
// Set the execution mode for data exchange.
flinkBatchEnv.getConfig().setExecutionMode(ExecutionMode.valueOf(options.getExecutionModeForBatch()));
// set the correct parallelism.
if (options.getParallelism() != -1 && !(flinkBatchEnv instanceof CollectionEnvironment)) {
flinkBatchEnv.setParallelism(options.getParallelism());
}
// Set the correct parallelism, required by UnboundedSourceWrapper to generate consistent
// splits.
final int parallelism;
if (flinkBatchEnv instanceof CollectionEnvironment) {
parallelism = 1;
} else {
parallelism = determineParallelism(options.getParallelism(), flinkBatchEnv.getParallelism(), flinkConfiguration);
}
flinkBatchEnv.setParallelism(parallelism);
// set parallelism in the options (required by some execution code)
options.setParallelism(parallelism);
if (options.getObjectReuse()) {
flinkBatchEnv.getConfig().enableObjectReuse();
} else {
flinkBatchEnv.getConfig().disableObjectReuse();
}
applyLatencyTrackingInterval(flinkBatchEnv.getConfig(), options);
return flinkBatchEnv;
}
use of org.apache.beam.vendor.guava.v26_0_jre.com.google.common.net.HostAndPort in project beam by apache.
the class StreamingDataflowWorker method getConfigFromDataflowService.
/**
* Sends a request to get configuration from Dataflow, either for a specific computation (if
* computation is not null) or global configuration (if computation is null).
*
* @throws IOException if the RPC fails.
*/
private void getConfigFromDataflowService(@Nullable String computation) throws IOException {
Optional<WorkItem> workItem;
if (computation != null) {
workItem = workUnitClient.getStreamingConfigWorkItem(computation);
} else {
workItem = workUnitClient.getGlobalStreamingConfigWorkItem();
}
if (workItem == null || !workItem.isPresent() || workItem.get() == null) {
return;
}
StreamingConfigTask config = workItem.get().getStreamingConfigTask();
Preconditions.checkState(config != null);
if (config.getUserStepToStateFamilyNameMap() != null) {
stateNameMap.putAll(config.getUserStepToStateFamilyNameMap());
}
if (computation == null) {
if (config.getMaxWorkItemCommitBytes() != null && config.getMaxWorkItemCommitBytes() > 0 && config.getMaxWorkItemCommitBytes() <= Integer.MAX_VALUE) {
setMaxWorkItemCommitBytes(config.getMaxWorkItemCommitBytes().intValue());
} else {
setMaxWorkItemCommitBytes(180 << 20);
}
}
List<StreamingComputationConfig> configs = config.getStreamingComputationConfigs();
if (configs != null) {
for (StreamingComputationConfig computationConfig : configs) {
MapTask mapTask = new MapTask();
mapTask.setSystemName(computationConfig.getSystemName());
mapTask.setStageName(computationConfig.getStageName());
mapTask.setInstructions(computationConfig.getInstructions());
addComputation(computationConfig.getComputationId(), mapTask, computationConfig.getTransformUserNameToStateFamily());
}
}
if (config.getWindmillServiceEndpoint() != null && !config.getWindmillServiceEndpoint().isEmpty()) {
int port = 443;
if (config.getWindmillServicePort() != null && config.getWindmillServicePort() != 0) {
port = config.getWindmillServicePort().intValue();
}
HashSet<HostAndPort> endpoints = new HashSet<>();
for (String endpoint : Splitter.on(',').split(config.getWindmillServiceEndpoint())) {
endpoints.add(HostAndPort.fromString(endpoint).withDefaultPort(port));
}
windmillServer.setWindmillServiceEndpoints(endpoints);
}
}
Aggregations