Search in sources :

Example 11 with ProgramInvocationException

use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.

the class RemoteExecutorHostnameResolutionTest method testUnresolvableHostname2.

@Test
public void testUnresolvableHostname2() {
    InetSocketAddress add = new InetSocketAddress(nonExistingHostname, port);
    RemoteExecutor exec = new RemoteExecutor(add, new Configuration(), Collections.<URL>emptyList(), Collections.<URL>emptyList());
    try {
        exec.executePlan(getProgram());
        fail("This should fail with an ProgramInvocationException");
    } catch (ProgramInvocationException e) {
        // that is what we want!
        assertTrue(e.getCause() instanceof UnknownHostException);
    } catch (Exception e) {
        System.err.println("Wrong exception!");
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : Configuration(org.apache.flink.configuration.Configuration) UnknownHostException(java.net.UnknownHostException) InetSocketAddress(java.net.InetSocketAddress) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) UnknownHostException(java.net.UnknownHostException) Test(org.junit.Test)

Example 12 with ProgramInvocationException

use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.

the class KafkaConsumerTestBase method runSimpleConcurrentProducerConsumerTopology.

/**
	 * Ensure Kafka is working on both producer and consumer side.
	 * This executes a job that contains two Flink pipelines.
	 *
	 * <pre>
	 * (generator source) --> (kafka sink)-[KAFKA-TOPIC]-(kafka source) --> (validating sink)
	 * </pre>
	 * 
	 * We need to externally retry this test. We cannot let Flink's retry mechanism do it, because the Kafka producer
	 * does not guarantee exactly-once output. Hence a recovery would introduce duplicates that
	 * cause the test to fail.
	 *
	 * This test also ensures that FLINK-3156 doesn't happen again:
	 *
	 * The following situation caused a NPE in the FlinkKafkaConsumer
	 *
	 * topic-1 <-- elements are only produced into topic1.
	 * topic-2
	 *
	 * Therefore, this test is consuming as well from an empty topic.
	 *
	 */
@RetryOnException(times = 2, exception = kafka.common.NotLeaderForPartitionException.class)
public void runSimpleConcurrentProducerConsumerTopology() throws Exception {
    final String topic = "concurrentProducerConsumerTopic_" + UUID.randomUUID().toString();
    final String additionalEmptyTopic = "additionalEmptyTopic_" + UUID.randomUUID().toString();
    final int parallelism = 3;
    final int elementsPerPartition = 100;
    final int totalElements = parallelism * elementsPerPartition;
    createTestTopic(topic, parallelism, 2);
    // create an empty topic which will remain empty all the time
    createTestTopic(additionalEmptyTopic, parallelism, 1);
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
    env.setParallelism(parallelism);
    env.enableCheckpointing(500);
    // fail immediately
    env.setRestartStrategy(RestartStrategies.noRestart());
    env.getConfig().disableSysoutLogging();
    TypeInformation<Tuple2<Long, String>> longStringType = TypeInfoParser.parse("Tuple2<Long, String>");
    TypeInformationSerializationSchema<Tuple2<Long, String>> sourceSchema = new TypeInformationSerializationSchema<>(longStringType, env.getConfig());
    TypeInformationSerializationSchema<Tuple2<Long, String>> sinkSchema = new TypeInformationSerializationSchema<>(longStringType, env.getConfig());
    // ----------- add producer dataflow ----------
    DataStream<Tuple2<Long, String>> stream = env.addSource(new RichParallelSourceFunction<Tuple2<Long, String>>() {

        private boolean running = true;

        @Override
        public void run(SourceContext<Tuple2<Long, String>> ctx) throws InterruptedException {
            int cnt = getRuntimeContext().getIndexOfThisSubtask() * elementsPerPartition;
            int limit = cnt + elementsPerPartition;
            while (running && cnt < limit) {
                ctx.collect(new Tuple2<>(1000L + cnt, "kafka-" + cnt));
                cnt++;
                // we delay data generation a bit so that we are sure that some checkpoints are
                // triggered (for FLINK-3156)
                Thread.sleep(50);
            }
        }

        @Override
        public void cancel() {
            running = false;
        }
    });
    Properties producerProperties = FlinkKafkaProducerBase.getPropertiesFromBrokerList(brokerConnectionStrings);
    producerProperties.setProperty("retries", "3");
    producerProperties.putAll(secureProps);
    kafkaServer.produceIntoKafka(stream, topic, new KeyedSerializationSchemaWrapper<>(sinkSchema), producerProperties, null);
    // ----------- add consumer dataflow ----------
    List<String> topics = new ArrayList<>();
    topics.add(topic);
    topics.add(additionalEmptyTopic);
    Properties props = new Properties();
    props.putAll(standardProps);
    props.putAll(secureProps);
    FlinkKafkaConsumerBase<Tuple2<Long, String>> source = kafkaServer.getConsumer(topics, sourceSchema, props);
    DataStreamSource<Tuple2<Long, String>> consuming = env.addSource(source).setParallelism(parallelism);
    consuming.addSink(new RichSinkFunction<Tuple2<Long, String>>() {

        private int elCnt = 0;

        private BitSet validator = new BitSet(totalElements);

        @Override
        public void invoke(Tuple2<Long, String> value) throws Exception {
            String[] sp = value.f1.split("-");
            int v = Integer.parseInt(sp[1]);
            assertEquals(value.f0 - 1000, (long) v);
            assertFalse("Received tuple twice", validator.get(v));
            validator.set(v);
            elCnt++;
            if (elCnt == totalElements) {
                // check if everything in the bitset is set to true
                int nc;
                if ((nc = validator.nextClearBit(0)) != totalElements) {
                    fail("The bitset was not set to 1 on all elements. Next clear:" + nc + " Set: " + validator);
                }
                throw new SuccessException();
            }
        }

        @Override
        public void close() throws Exception {
            super.close();
        }
    }).setParallelism(1);
    try {
        tryExecutePropagateExceptions(env, "runSimpleConcurrentProducerConsumerTopology");
    } catch (ProgramInvocationException | JobExecutionException e) {
        // look for NotLeaderForPartitionException
        Throwable cause = e.getCause();
        // search for nested SuccessExceptions
        int depth = 0;
        while (cause != null && depth++ < 20) {
            if (cause instanceof kafka.common.NotLeaderForPartitionException) {
                throw (Exception) cause;
            }
            cause = cause.getCause();
        }
        throw e;
    }
    deleteTestTopic(topic);
}
Also used : ArrayList(java.util.ArrayList) Properties(java.util.Properties) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) RichSinkFunction(org.apache.flink.streaming.api.functions.sink.RichSinkFunction) BitSet(java.util.BitSet) TypeHint(org.apache.flink.api.common.typeinfo.TypeHint) TypeInformationSerializationSchema(org.apache.flink.streaming.util.serialization.TypeInformationSerializationSchema) Tuple2(org.apache.flink.api.java.tuple.Tuple2) SuccessException(org.apache.flink.test.util.SuccessException) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) RetryOnException(org.apache.flink.testutils.junit.RetryOnException)

Example 13 with ProgramInvocationException

use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.

the class FlinkClient method submitTopologyWithOpts.

/**
	 * Parameter {@code uploadedJarLocation} is actually used to point to the local jar, because Flink does not support
	 * uploading a jar file before hand. Jar files are always uploaded directly when a program is submitted.
	 */
public void submitTopologyWithOpts(final String name, final String uploadedJarLocation, final FlinkTopology topology) throws AlreadyAliveException, InvalidTopologyException {
    if (this.getTopologyJobId(name) != null) {
        throw new AlreadyAliveException();
    }
    final URI uploadedJarUri;
    final URL uploadedJarUrl;
    try {
        uploadedJarUri = new File(uploadedJarLocation).getAbsoluteFile().toURI();
        uploadedJarUrl = uploadedJarUri.toURL();
        JobWithJars.checkJarFile(uploadedJarUrl);
    } catch (final IOException e) {
        throw new RuntimeException("Problem with jar file " + uploadedJarLocation, e);
    }
    try {
        FlinkClient.addStormConfigToTopology(topology, conf);
    } catch (ClassNotFoundException e) {
        LOG.error("Could not register class for Kryo serialization.", e);
        throw new InvalidTopologyException("Could not register class for Kryo serialization.");
    }
    final StreamGraph streamGraph = topology.getExecutionEnvironment().getStreamGraph();
    streamGraph.setJobName(name);
    final JobGraph jobGraph = streamGraph.getJobGraph();
    jobGraph.addJar(new Path(uploadedJarUri));
    final Configuration configuration = jobGraph.getJobConfiguration();
    configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, jobManagerHost);
    configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, jobManagerPort);
    final ClusterClient client;
    try {
        client = new StandaloneClusterClient(configuration);
    } catch (final IOException e) {
        throw new RuntimeException("Could not establish a connection to the job manager", e);
    }
    try {
        ClassLoader classLoader = JobWithJars.buildUserCodeClassLoader(Collections.<URL>singletonList(uploadedJarUrl), Collections.<URL>emptyList(), this.getClass().getClassLoader());
        client.runDetached(jobGraph, classLoader);
    } catch (final ProgramInvocationException e) {
        throw new RuntimeException("Cannot execute job due to ProgramInvocationException", e);
    }
}
Also used : Path(org.apache.flink.core.fs.Path) Configuration(org.apache.flink.configuration.Configuration) GlobalConfiguration(org.apache.flink.configuration.GlobalConfiguration) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) StandaloneClusterClient(org.apache.flink.client.program.StandaloneClusterClient) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException) IOException(java.io.IOException) URI(java.net.URI) URL(java.net.URL) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) StandaloneClusterClient(org.apache.flink.client.program.StandaloneClusterClient) ClusterClient(org.apache.flink.client.program.ClusterClient) StreamGraph(org.apache.flink.streaming.api.graph.StreamGraph) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) File(java.io.File)

Example 14 with ProgramInvocationException

use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.

the class CliFrontend method executeProgram.

// --------------------------------------------------------------------------------------------
//  Interaction with programs and JobManager
// --------------------------------------------------------------------------------------------
protected int executeProgram(PackagedProgram program, ClusterClient client, int parallelism) {
    logAndSysout("Starting execution of program");
    JobSubmissionResult result;
    try {
        result = client.run(program, parallelism);
    } catch (ProgramParametrizationException e) {
        return handleParametrizationException(e);
    } catch (ProgramMissingJobException e) {
        return handleMissingJobException();
    } catch (ProgramInvocationException e) {
        return handleError(e);
    } finally {
        program.deleteExtractedLibraries();
    }
    if (null == result) {
        logAndSysout("No JobSubmissionResult returned, please make sure you called " + "ExecutionEnvironment.execute()");
        return 1;
    }
    if (result.isJobExecutionResult()) {
        logAndSysout("Program execution finished");
        JobExecutionResult execResult = result.getJobExecutionResult();
        System.out.println("Job with JobID " + execResult.getJobID() + " has finished.");
        System.out.println("Job Runtime: " + execResult.getNetRuntime() + " ms");
        Map<String, Object> accumulatorsResult = execResult.getAllAccumulatorResults();
        if (accumulatorsResult.size() > 0) {
            System.out.println("Accumulator Results: ");
            System.out.println(AccumulatorHelper.getResultsFormated(accumulatorsResult));
        }
    } else {
        logAndSysout("Job has been submitted with JobID " + result.getJobID());
    }
    return 0;
}
Also used : JobSubmissionResult(org.apache.flink.api.common.JobSubmissionResult) JobExecutionResult(org.apache.flink.api.common.JobExecutionResult) ProgramParametrizationException(org.apache.flink.client.program.ProgramParametrizationException) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) ProgramMissingJobException(org.apache.flink.client.program.ProgramMissingJobException)

Example 15 with ProgramInvocationException

use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.

the class CliFrontendPackageProgramTest method testFileNotJarFile.

@Test
public void testFileNotJarFile() {
    try {
        CliFrontend frontend = new CliFrontend(CliFrontendTestUtils.getConfigDir());
        ProgramOptions options = mock(ProgramOptions.class);
        when(options.getJarFilePath()).thenReturn(getNonJarFilePath());
        try {
            frontend.buildProgram(options);
            fail("should throw an exception");
        } catch (ProgramInvocationException e) {
        // that's what we want
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) ProgramOptions(org.apache.flink.client.cli.ProgramOptions) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) FileNotFoundException(java.io.FileNotFoundException) CompilerException(org.apache.flink.optimizer.CompilerException) Test(org.junit.Test)

Aggregations

ProgramInvocationException (org.apache.flink.client.program.ProgramInvocationException)23 Test (org.junit.Test)13 IOException (java.io.IOException)8 ExecutionEnvironment (org.apache.flink.api.java.ExecutionEnvironment)8 Configuration (org.apache.flink.configuration.Configuration)7 JobExecutionException (org.apache.flink.runtime.client.JobExecutionException)5 File (java.io.File)4 URL (java.net.URL)4 Properties (java.util.Properties)3 PackagedProgram (org.apache.flink.client.program.PackagedProgram)3 Path (org.apache.flink.core.fs.Path)3 CompilerException (org.apache.flink.optimizer.CompilerException)3 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)3 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)3 FileNotFoundException (java.io.FileNotFoundException)2 StringWriter (java.io.StringWriter)2 MalformedURLException (java.net.MalformedURLException)2 ArrayList (java.util.ArrayList)2 JobSubmissionResult (org.apache.flink.api.common.JobSubmissionResult)2 ClusterClient (org.apache.flink.client.program.ClusterClient)2