use of org.apache.flink.kubernetes.kubeclient.parameters.KubernetesJobManagerParameters in project flink by apache.
the class Fabric8FlinkKubeClientTest method onSetup.
@Override
protected void onSetup() throws Exception {
super.onSetup();
KubernetesTestUtils.createTemporyFile("some data", flinkConfDir, CONFIG_FILE_LOGBACK_NAME);
KubernetesTestUtils.createTemporyFile("some data", flinkConfDir, CONFIG_FILE_LOG4J_NAME);
final ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder().setMasterMemoryMB(JOB_MANAGER_MEMORY).setTaskManagerMemoryMB(1000).setSlotsPerTaskManager(3).createClusterSpecification();
final KubernetesJobManagerParameters kubernetesJobManagerParameters = new KubernetesJobManagerParameters(flinkConfig, clusterSpecification);
this.kubernetesJobManagerSpecification = KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(new FlinkPod.Builder().build(), kubernetesJobManagerParameters);
}
use of org.apache.flink.kubernetes.kubeclient.parameters.KubernetesJobManagerParameters in project flink by apache.
the class KubernetesJobManagerFactoryWithPodTemplateTest method getResultPod.
@Override
protected Pod getResultPod(FlinkPod podTemplate) throws Exception {
final KubernetesJobManagerParameters kubernetesJobManagerParameters = new KubernetesJobManagerParameters(flinkConfig, new KubernetesClusterClientFactory().getClusterSpecification(flinkConfig));
final KubernetesJobManagerSpecification kubernetesJobManagerSpecification = KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(podTemplate, kubernetesJobManagerParameters);
final PodTemplateSpec podTemplateSpec = kubernetesJobManagerSpecification.getDeployment().getSpec().getTemplate();
return new PodBuilder().withMetadata(podTemplateSpec.getMetadata()).withSpec(podTemplateSpec.getSpec()).build();
}
use of org.apache.flink.kubernetes.kubeclient.parameters.KubernetesJobManagerParameters in project flink by apache.
the class InitJobManagerDecoratorWithPodTemplateTest method getResultPod.
@Override
public FlinkPod getResultPod(FlinkPod podTemplate) {
final KubernetesJobManagerParameters kubernetesJobManagerParameters = new KubernetesJobManagerParameters(flinkConfig, new KubernetesClusterClientFactory().getClusterSpecification(flinkConfig));
final InitJobManagerDecorator initJobManagerDecorator = new InitJobManagerDecorator(kubernetesJobManagerParameters);
return initJobManagerDecorator.decorateFlinkPod(podTemplate);
}
use of org.apache.flink.kubernetes.kubeclient.parameters.KubernetesJobManagerParameters in project flink by apache.
the class KubernetesJobManagerTestBase method onSetup.
@Override
protected void onSetup() throws Exception {
final ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder().setMasterMemoryMB(JOB_MANAGER_MEMORY).setTaskManagerMemoryMB(1024).setSlotsPerTaskManager(3).createClusterSpecification();
this.kubernetesJobManagerParameters = new KubernetesJobManagerParameters(flinkConfig, clusterSpecification);
}
use of org.apache.flink.kubernetes.kubeclient.parameters.KubernetesJobManagerParameters in project flink by apache.
the class KubernetesClusterDescriptor method deployClusterInternal.
private ClusterClientProvider<String> deployClusterInternal(String entryPoint, ClusterSpecification clusterSpecification, boolean detached) throws ClusterDeploymentException {
final ClusterEntrypoint.ExecutionMode executionMode = detached ? ClusterEntrypoint.ExecutionMode.DETACHED : ClusterEntrypoint.ExecutionMode.NORMAL;
flinkConfig.setString(ClusterEntrypoint.INTERNAL_CLUSTER_EXECUTION_MODE, executionMode.toString());
flinkConfig.setString(KubernetesConfigOptionsInternal.ENTRY_POINT_CLASS, entryPoint);
// Rpc, blob, rest, taskManagerRpc ports need to be exposed, so update them to fixed values.
KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, BlobServerOptions.PORT, Constants.BLOB_SERVER_PORT);
KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, TaskManagerOptions.RPC_PORT, Constants.TASK_MANAGER_RPC_PORT);
KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, RestOptions.BIND_PORT, Constants.REST_PORT);
if (HighAvailabilityMode.isHighAvailabilityModeActivated(flinkConfig)) {
flinkConfig.setString(HighAvailabilityOptions.HA_CLUSTER_ID, clusterId);
KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, HighAvailabilityOptions.HA_JOB_MANAGER_PORT_RANGE, flinkConfig.get(JobManagerOptions.PORT));
}
try {
final KubernetesJobManagerParameters kubernetesJobManagerParameters = new KubernetesJobManagerParameters(flinkConfig, clusterSpecification);
final FlinkPod podTemplate = kubernetesJobManagerParameters.getPodTemplateFilePath().map(file -> KubernetesUtils.loadPodFromTemplateFile(client, file, Constants.MAIN_CONTAINER_NAME)).orElse(new FlinkPod.Builder().build());
final KubernetesJobManagerSpecification kubernetesJobManagerSpec = KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(podTemplate, kubernetesJobManagerParameters);
client.createJobManagerComponent(kubernetesJobManagerSpec);
return createClusterClientProvider(clusterId);
} catch (Exception e) {
try {
LOG.warn("Failed to create the Kubernetes cluster \"{}\", try to clean up the residual resources.", clusterId);
client.stopAndCleanupCluster(clusterId);
} catch (Exception e1) {
LOG.info("Failed to stop and clean up the Kubernetes cluster \"{}\".", clusterId, e1);
}
throw new ClusterDeploymentException("Could not create Kubernetes cluster \"" + clusterId + "\".", e);
}
}
Aggregations