Search in sources :

Example 1 with Tuple2

use of org.apache.flink.api.java.tuple.Tuple2 in project flink by apache.

the class Graph500 method main.

public static void main(String[] args) throws Exception {
    // Set up the execution environment
    final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().enableObjectReuse();
    ParameterTool parameters = ParameterTool.fromArgs(args);
    env.getConfig().setGlobalJobParameters(parameters);
    if (!parameters.has("directed")) {
        throw new ProgramParametrizationException(getUsage("must declare execution mode as '--directed true' or '--directed false'"));
    }
    boolean directed = parameters.getBoolean("directed");
    if (!parameters.has("simplify")) {
        throw new ProgramParametrizationException(getUsage("must declare '--simplify true' or '--simplify false'"));
    }
    boolean simplify = parameters.getBoolean("simplify");
    // Generate RMat graph
    int scale = parameters.getInt("scale", DEFAULT_SCALE);
    int edgeFactor = parameters.getInt("edge_factor", DEFAULT_EDGE_FACTOR);
    RandomGenerableFactory<JDKRandomGenerator> rnd = new JDKRandomGeneratorFactory();
    long vertexCount = 1L << scale;
    long edgeCount = vertexCount * edgeFactor;
    boolean clipAndFlip = parameters.getBoolean("clip_and_flip", DEFAULT_CLIP_AND_FLIP);
    Graph<LongValue, NullValue, NullValue> graph = new RMatGraph<>(env, rnd, vertexCount, edgeCount).generate();
    if (directed) {
        if (simplify) {
            graph = graph.run(new org.apache.flink.graph.asm.simple.directed.Simplify<LongValue, NullValue, NullValue>());
        }
    } else {
        if (simplify) {
            graph = graph.run(new org.apache.flink.graph.asm.simple.undirected.Simplify<LongValue, NullValue, NullValue>(clipAndFlip));
        } else {
            graph = graph.getUndirected();
        }
    }
    DataSet<Tuple2<LongValue, LongValue>> edges = graph.getEdges().project(0, 1);
    // Print, hash, or write RMat graph to disk
    switch(parameters.get("output", "")) {
        case "print":
            System.out.println();
            edges.print();
            break;
        case "hash":
            System.out.println();
            System.out.println(DataSetUtils.checksumHashCode(edges));
            break;
        case "csv":
            String filename = parameters.getRequired("output_filename");
            String lineDelimiter = StringEscapeUtils.unescapeJava(parameters.get("output_line_delimiter", CsvOutputFormat.DEFAULT_LINE_DELIMITER));
            String fieldDelimiter = StringEscapeUtils.unescapeJava(parameters.get("output_field_delimiter", CsvOutputFormat.DEFAULT_FIELD_DELIMITER));
            edges.writeAsCsv(filename, lineDelimiter, fieldDelimiter);
            env.execute("Graph500");
            break;
        default:
            throw new ProgramParametrizationException(getUsage("invalid output type"));
    }
    JobExecutionResult result = env.getLastJobExecutionResult();
    NumberFormat nf = NumberFormat.getInstance();
    System.out.println();
    System.out.println("Execution runtime: " + nf.format(result.getNetRuntime()) + " ms");
}
Also used : ParameterTool(org.apache.flink.api.java.utils.ParameterTool) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) JDKRandomGeneratorFactory(org.apache.flink.graph.generator.random.JDKRandomGeneratorFactory) JobExecutionResult(org.apache.flink.api.common.JobExecutionResult) NullValue(org.apache.flink.types.NullValue) ProgramParametrizationException(org.apache.flink.client.program.ProgramParametrizationException) Tuple2(org.apache.flink.api.java.tuple.Tuple2) LongValue(org.apache.flink.types.LongValue) JDKRandomGenerator(org.apache.commons.math3.random.JDKRandomGenerator) NumberFormat(java.text.NumberFormat)

Example 2 with Tuple2

use of org.apache.flink.api.java.tuple.Tuple2 in project flink by apache.

the class JobMaster method offerSlots.

@RpcMethod
public Future<Iterable<SlotOffer>> offerSlots(final ResourceID taskManagerId, final Iterable<SlotOffer> slots, final UUID leaderId) throws Exception {
    validateLeaderSessionId(leaderId);
    Tuple2<TaskManagerLocation, TaskExecutorGateway> taskManager = registeredTaskManagers.get(taskManagerId);
    if (taskManager == null) {
        throw new Exception("Unknown TaskManager " + taskManagerId);
    }
    final JobID jid = jobGraph.getJobID();
    final TaskManagerLocation taskManagerLocation = taskManager.f0;
    final TaskExecutorGateway taskExecutorGateway = taskManager.f1;
    final ArrayList<Tuple2<AllocatedSlot, SlotOffer>> slotsAndOffers = new ArrayList<>();
    final RpcTaskManagerGateway rpcTaskManagerGateway = new RpcTaskManagerGateway(taskExecutorGateway, leaderId);
    for (SlotOffer slotOffer : slots) {
        final AllocatedSlot slot = new AllocatedSlot(slotOffer.getAllocationId(), jid, taskManagerLocation, slotOffer.getSlotIndex(), slotOffer.getResourceProfile(), rpcTaskManagerGateway);
        slotsAndOffers.add(new Tuple2<>(slot, slotOffer));
    }
    return slotPoolGateway.offerSlots(slotsAndOffers);
}
Also used : AllocatedSlot(org.apache.flink.runtime.jobmanager.slots.AllocatedSlot) SlotOffer(org.apache.flink.runtime.taskexecutor.slot.SlotOffer) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) Tuple2(org.apache.flink.api.java.tuple.Tuple2) ArrayList(java.util.ArrayList) TaskExecutorGateway(org.apache.flink.runtime.taskexecutor.TaskExecutorGateway) TimeoutException(java.util.concurrent.TimeoutException) CheckpointException(org.apache.flink.runtime.checkpoint.CheckpointException) LeaderIdMismatchException(org.apache.flink.runtime.highavailability.LeaderIdMismatchException) PartitionProducerDisposedException(org.apache.flink.runtime.jobmanager.PartitionProducerDisposedException) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) IOException(java.io.IOException) JobID(org.apache.flink.api.common.JobID) RpcMethod(org.apache.flink.runtime.rpc.RpcMethod)

Example 3 with Tuple2

use of org.apache.flink.api.java.tuple.Tuple2 in project flink by apache.

the class ExecutionGraphRestartTest method createExecutionGraph.

private static Tuple2<ExecutionGraph, Instance> createExecutionGraph(RestartStrategy restartStrategy, boolean isSpy) throws Exception {
    Instance instance = ExecutionGraphTestUtils.getInstance(new ActorTaskManagerGateway(new SimpleActorGateway(TestingUtils.directExecutionContext())), NUM_TASKS);
    Scheduler scheduler = new Scheduler(TestingUtils.defaultExecutionContext());
    scheduler.newInstanceAvailable(instance);
    JobVertex sender = newJobVertex("Task", NUM_TASKS, NoOpInvokable.class);
    JobGraph jobGraph = new JobGraph("Pointwise job", sender);
    ExecutionGraph eg = newExecutionGraph(restartStrategy, scheduler);
    if (isSpy) {
        eg = spy(eg);
    }
    eg.attachJobGraph(jobGraph.getVerticesSortedTopologicallyFromSources());
    assertEquals(JobStatus.CREATED, eg.getState());
    eg.scheduleForExecution();
    assertEquals(JobStatus.RUNNING, eg.getState());
    return new Tuple2<>(eg, instance);
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) Instance(org.apache.flink.runtime.instance.Instance) Scheduler(org.apache.flink.runtime.jobmanager.scheduler.Scheduler) Tuple2(org.apache.flink.api.java.tuple.Tuple2) SimpleActorGateway(org.apache.flink.runtime.executiongraph.ExecutionGraphTestUtils.SimpleActorGateway) ActorTaskManagerGateway(org.apache.flink.runtime.jobmanager.slots.ActorTaskManagerGateway)

Example 4 with Tuple2

use of org.apache.flink.api.java.tuple.Tuple2 in project flink by apache.

the class WordCountMapredITCase method internalRun.

private void internalRun(boolean isTestDeprecatedAPI) throws Exception {
    final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    DataSet<Tuple2<LongWritable, Text>> input;
    if (isTestDeprecatedAPI) {
        input = env.readHadoopFile(new TextInputFormat(), LongWritable.class, Text.class, textPath);
    } else {
        input = env.createInput(readHadoopFile(new TextInputFormat(), LongWritable.class, Text.class, textPath));
    }
    DataSet<String> text = input.map(new MapFunction<Tuple2<LongWritable, Text>, String>() {

        @Override
        public String map(Tuple2<LongWritable, Text> value) throws Exception {
            return value.f1.toString();
        }
    });
    DataSet<Tuple2<String, Integer>> counts = // split up the lines in pairs (2-tuples) containing: (word,1)
    text.flatMap(new Tokenizer()).groupBy(0).sum(1);
    DataSet<Tuple2<Text, LongWritable>> words = counts.map(new MapFunction<Tuple2<String, Integer>, Tuple2<Text, LongWritable>>() {

        @Override
        public Tuple2<Text, LongWritable> map(Tuple2<String, Integer> value) throws Exception {
            return new Tuple2<Text, LongWritable>(new Text(value.f0), new LongWritable(value.f1));
        }
    });
    // Set up Hadoop Output Format
    HadoopOutputFormat<Text, LongWritable> hadoopOutputFormat = new HadoopOutputFormat<Text, LongWritable>(new TextOutputFormat<Text, LongWritable>(), new JobConf());
    hadoopOutputFormat.getJobConf().set("mapred.textoutputformat.separator", " ");
    TextOutputFormat.setOutputPath(hadoopOutputFormat.getJobConf(), new Path(resultPath));
    // Output & Execute
    words.output(hadoopOutputFormat);
    env.execute("Hadoop Compat WordCount");
}
Also used : Path(org.apache.hadoop.fs.Path) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) Text(org.apache.hadoop.io.Text) HadoopOutputFormat(org.apache.flink.api.java.hadoop.mapred.HadoopOutputFormat) TextInputFormat(org.apache.hadoop.mapred.TextInputFormat) Tuple2(org.apache.flink.api.java.tuple.Tuple2) LongWritable(org.apache.hadoop.io.LongWritable) Tokenizer(org.apache.flink.test.testfunctions.Tokenizer) JobConf(org.apache.hadoop.mapred.JobConf)

Example 5 with Tuple2

use of org.apache.flink.api.java.tuple.Tuple2 in project flink by apache.

the class WordCountMapreduceITCase method internalRun.

private void internalRun(boolean isTestDeprecatedAPI) throws Exception {
    final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    DataSet<Tuple2<LongWritable, Text>> input;
    if (isTestDeprecatedAPI) {
        input = env.readHadoopFile(new TextInputFormat(), LongWritable.class, Text.class, textPath);
    } else {
        input = env.createInput(readHadoopFile(new TextInputFormat(), LongWritable.class, Text.class, textPath));
    }
    DataSet<String> text = input.map(new MapFunction<Tuple2<LongWritable, Text>, String>() {

        @Override
        public String map(Tuple2<LongWritable, Text> value) throws Exception {
            return value.f1.toString();
        }
    });
    DataSet<Tuple2<String, Integer>> counts = // split up the lines in pairs (2-tuples) containing: (word,1)
    text.flatMap(new Tokenizer()).groupBy(0).sum(1);
    DataSet<Tuple2<Text, LongWritable>> words = counts.map(new MapFunction<Tuple2<String, Integer>, Tuple2<Text, LongWritable>>() {

        @Override
        public Tuple2<Text, LongWritable> map(Tuple2<String, Integer> value) throws Exception {
            return new Tuple2<Text, LongWritable>(new Text(value.f0), new LongWritable(value.f1));
        }
    });
    // Set up Hadoop Output Format
    Job job = Job.getInstance();
    HadoopOutputFormat<Text, LongWritable> hadoopOutputFormat = new HadoopOutputFormat<Text, LongWritable>(new TextOutputFormat<Text, LongWritable>(), job);
    job.getConfiguration().set("mapred.textoutputformat.separator", " ");
    TextOutputFormat.setOutputPath(job, new Path(resultPath));
    // Output & Execute
    words.output(hadoopOutputFormat);
    env.execute("Hadoop Compat WordCount");
}
Also used : Path(org.apache.hadoop.fs.Path) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) Text(org.apache.hadoop.io.Text) HadoopOutputFormat(org.apache.flink.api.java.hadoop.mapreduce.HadoopOutputFormat) TextInputFormat(org.apache.hadoop.mapreduce.lib.input.TextInputFormat) Tuple2(org.apache.flink.api.java.tuple.Tuple2) LongWritable(org.apache.hadoop.io.LongWritable) Job(org.apache.hadoop.mapreduce.Job) Tokenizer(org.apache.flink.test.testfunctions.Tokenizer)

Aggregations

Tuple2 (org.apache.flink.api.java.tuple.Tuple2)1159 Test (org.junit.Test)871 ExecutionEnvironment (org.apache.flink.api.java.ExecutionEnvironment)486 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)266 Tuple3 (org.apache.flink.api.java.tuple.Tuple3)195 TimeWindow (org.apache.flink.streaming.api.windowing.windows.TimeWindow)137 ArrayList (java.util.ArrayList)136 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)103 Plan (org.apache.flink.api.common.Plan)103 TypeHint (org.apache.flink.api.common.typeinfo.TypeHint)103 OptimizedPlan (org.apache.flink.optimizer.plan.OptimizedPlan)99 Configuration (org.apache.flink.configuration.Configuration)87 List (java.util.List)82 IOException (java.io.IOException)79 OneInputTransformation (org.apache.flink.streaming.api.transformations.OneInputTransformation)77 ListStateDescriptor (org.apache.flink.api.common.state.ListStateDescriptor)74 HashMap (java.util.HashMap)72 SinkPlanNode (org.apache.flink.optimizer.plan.SinkPlanNode)66 Collection (java.util.Collection)61 ConcurrentLinkedQueue (java.util.concurrent.ConcurrentLinkedQueue)60