Search in sources :

Example 21 with DataSet

use of org.apache.flink.api.java.DataSet in project flink by apache.

the class GSACompilerTest method testGSACompiler.

@Test
public void testGSACompiler() {
    try {
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(DEFAULT_PARALLELISM);
        // compose test program
        {
            DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple3<>(1L, 2L, NullValue.getInstance())).map(new Tuple3ToEdgeMap<Long, NullValue>());
            Graph<Long, Long, NullValue> graph = Graph.fromDataSet(edges, new InitVertices(), env);
            DataSet<Vertex<Long, Long>> result = graph.runGatherSumApplyIteration(new GatherNeighborIds(), new SelectMinId(), new UpdateComponentId(), 100).getVertices();
            result.output(new DiscardingOutputFormat<Vertex<Long, Long>>());
        }
        Plan p = env.createProgramPlan("GSA Connected Components");
        OptimizedPlan op = compileNoStats(p);
        // check the sink
        SinkPlanNode sink = op.getDataSinks().iterator().next();
        assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy());
        assertEquals(DEFAULT_PARALLELISM, sink.getParallelism());
        assertEquals(PartitioningProperty.HASH_PARTITIONED, sink.getGlobalProperties().getPartitioning());
        // check the iteration
        WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
        assertEquals(DEFAULT_PARALLELISM, iteration.getParallelism());
        // check the solution set join and the delta
        PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
        // this is only true if the update function preserves the partitioning
        assertTrue(ssDelta instanceof DualInputPlanNode);
        DualInputPlanNode ssJoin = (DualInputPlanNode) ssDelta;
        assertEquals(DEFAULT_PARALLELISM, ssJoin.getParallelism());
        assertEquals(ShipStrategyType.PARTITION_HASH, ssJoin.getInput1().getShipStrategy());
        assertEquals(new FieldList(0), ssJoin.getInput1().getShipStrategyKeys());
        // check the workset set join
        SingleInputPlanNode sumReducer = (SingleInputPlanNode) ssJoin.getInput1().getSource();
        SingleInputPlanNode gatherMapper = (SingleInputPlanNode) sumReducer.getInput().getSource();
        DualInputPlanNode edgeJoin = (DualInputPlanNode) gatherMapper.getInput().getSource();
        assertEquals(DEFAULT_PARALLELISM, edgeJoin.getParallelism());
        // input1 is the workset
        assertEquals(ShipStrategyType.FORWARD, edgeJoin.getInput1().getShipStrategy());
        // input2 is the edges
        assertEquals(ShipStrategyType.PARTITION_HASH, edgeJoin.getInput2().getShipStrategy());
        assertTrue(edgeJoin.getInput2().getTempMode().isCached());
        assertEquals(new FieldList(0), edgeJoin.getInput2().getShipStrategyKeys());
    } catch (Exception e) {
        System.err.println(e.getMessage());
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) DataSet(org.apache.flink.api.java.DataSet) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) Plan(org.apache.flink.api.common.Plan) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) DiscardingOutputFormat(org.apache.flink.api.java.io.DiscardingOutputFormat) OptimizedPlan(org.apache.flink.optimizer.plan.OptimizedPlan) FieldList(org.apache.flink.api.common.operators.util.FieldList) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) NullValue(org.apache.flink.types.NullValue) Graph(org.apache.flink.graph.Graph) WorksetIterationPlanNode(org.apache.flink.optimizer.plan.WorksetIterationPlanNode) DualInputPlanNode(org.apache.flink.optimizer.plan.DualInputPlanNode) PlanNode(org.apache.flink.optimizer.plan.PlanNode) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) SingleInputPlanNode(org.apache.flink.optimizer.plan.SingleInputPlanNode) Tuple3ToEdgeMap(org.apache.flink.graph.utils.Tuple3ToEdgeMap) SinkPlanNode(org.apache.flink.optimizer.plan.SinkPlanNode) Test(org.junit.Test)

Example 22 with DataSet

use of org.apache.flink.api.java.DataSet in project flink by apache.

the class GSATranslationTest method testTranslation.

@Test
public void testTranslation() {
    try {
        final String ITERATION_NAME = "Test Name";
        final String AGGREGATOR_NAME = "AggregatorName";
        final String BC_SET_GATHER_NAME = "gather messages";
        final String BC_SET_SUM_NAME = "sum updates";
        final String BC_SET_APLLY_NAME = "apply updates";
        final int NUM_ITERATIONS = 13;
        final int ITERATION_parallelism = 77;
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        DataSet<Long> bcGather = env.fromElements(1L);
        DataSet<Long> bcSum = env.fromElements(1L);
        DataSet<Long> bcApply = env.fromElements(1L);
        DataSet<Vertex<Long, Long>> result;
        // ------------ construct the test program ------------------
        {
            DataSet<Edge<Long, NullValue>> edges = env.fromElements(new Tuple3<>(1L, 2L, NullValue.getInstance())).map(new Tuple3ToEdgeMap<Long, NullValue>());
            Graph<Long, Long, NullValue> graph = Graph.fromDataSet(edges, new InitVertices(), env);
            GSAConfiguration parameters = new GSAConfiguration();
            parameters.registerAggregator(AGGREGATOR_NAME, new LongSumAggregator());
            parameters.setName(ITERATION_NAME);
            parameters.setParallelism(ITERATION_parallelism);
            parameters.addBroadcastSetForGatherFunction(BC_SET_GATHER_NAME, bcGather);
            parameters.addBroadcastSetForSumFunction(BC_SET_SUM_NAME, bcSum);
            parameters.addBroadcastSetForApplyFunction(BC_SET_APLLY_NAME, bcApply);
            result = graph.runGatherSumApplyIteration(new GatherNeighborIds(), new SelectMinId(), new UpdateComponentId(), NUM_ITERATIONS, parameters).getVertices();
            result.output(new DiscardingOutputFormat<Vertex<Long, Long>>());
        }
        // ------------- validate the java program ----------------
        assertTrue(result instanceof DeltaIterationResultSet);
        DeltaIterationResultSet<?, ?> resultSet = (DeltaIterationResultSet<?, ?>) result;
        DeltaIteration<?, ?> iteration = resultSet.getIterationHead();
        // check the basic iteration properties
        assertEquals(NUM_ITERATIONS, resultSet.getMaxIterations());
        assertArrayEquals(new int[] { 0 }, resultSet.getKeyPositions());
        assertEquals(ITERATION_parallelism, iteration.getParallelism());
        assertEquals(ITERATION_NAME, iteration.getName());
        assertEquals(AGGREGATOR_NAME, iteration.getAggregators().getAllRegisteredAggregators().iterator().next().getName());
        // validate that the semantic properties are set as they should
        TwoInputUdfOperator<?, ?, ?, ?> solutionSetJoin = (TwoInputUdfOperator<?, ?, ?, ?>) resultSet.getNextWorkset();
        assertTrue(solutionSetJoin.getSemanticProperties().getForwardingTargetFields(0, 0).contains(0));
        assertTrue(solutionSetJoin.getSemanticProperties().getForwardingTargetFields(1, 0).contains(0));
        SingleInputUdfOperator<?, ?, ?> sumReduce = (SingleInputUdfOperator<?, ?, ?>) solutionSetJoin.getInput1();
        SingleInputUdfOperator<?, ?, ?> gatherMap = (SingleInputUdfOperator<?, ?, ?>) sumReduce.getInput();
        // validate that the broadcast sets are forwarded
        assertEquals(bcGather, gatherMap.getBroadcastSets().get(BC_SET_GATHER_NAME));
        assertEquals(bcSum, sumReduce.getBroadcastSets().get(BC_SET_SUM_NAME));
        assertEquals(bcApply, solutionSetJoin.getBroadcastSets().get(BC_SET_APLLY_NAME));
    } catch (Exception e) {
        System.err.println(e.getMessage());
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : Vertex(org.apache.flink.graph.Vertex) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) DataSet(org.apache.flink.api.java.DataSet) LongSumAggregator(org.apache.flink.api.common.aggregators.LongSumAggregator) DeltaIterationResultSet(org.apache.flink.api.java.operators.DeltaIterationResultSet) TwoInputUdfOperator(org.apache.flink.api.java.operators.TwoInputUdfOperator) DiscardingOutputFormat(org.apache.flink.api.java.io.DiscardingOutputFormat) NullValue(org.apache.flink.types.NullValue) SingleInputUdfOperator(org.apache.flink.api.java.operators.SingleInputUdfOperator) Graph(org.apache.flink.graph.Graph) Tuple3ToEdgeMap(org.apache.flink.graph.utils.Tuple3ToEdgeMap) Test(org.junit.Test)

Example 23 with DataSet

use of org.apache.flink.api.java.DataSet in project flink by apache.

the class TPCHQuery10 method main.

// *************************************************************************
//     PROGRAM
// *************************************************************************
public static void main(String[] args) throws Exception {
    if (!parseParameters(args)) {
        return;
    }
    final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    // get customer data set: (custkey, name, address, nationkey, acctbal) 
    DataSet<Tuple5<Integer, String, String, Integer, Double>> customers = getCustomerDataSet(env);
    // get orders data set: (orderkey, custkey, orderdate)
    DataSet<Tuple3<Integer, Integer, String>> orders = getOrdersDataSet(env);
    // get lineitem data set: (orderkey, extendedprice, discount, returnflag)
    DataSet<Tuple4<Integer, Double, Double, String>> lineitems = getLineitemDataSet(env);
    // get nation data set: (nationkey, name)
    DataSet<Tuple2<Integer, String>> nations = getNationsDataSet(env);
    // orders filtered by year: (orderkey, custkey)
    DataSet<Tuple2<Integer, Integer>> ordersFilteredByYear = // filter by year
    orders.filter(order -> Integer.parseInt(order.f2.substring(0, 4)) > 1990).project(0, 1);
    // lineitems filtered by flag: (orderkey, extendedprice, discount)
    DataSet<Tuple3<Integer, Double, Double>> lineitemsFilteredByFlag = // filter by flag
    lineitems.filter(lineitem -> lineitem.f3.equals("R")).project(0, 1, 2);
    // join orders with lineitems: (custkey, extendedprice, discount)
    DataSet<Tuple3<Integer, Double, Double>> lineitemsOfCustomerKey = ordersFilteredByYear.joinWithHuge(lineitemsFilteredByFlag).where(0).equalTo(0).projectFirst(1).projectSecond(1, 2);
    // aggregate for revenue: (custkey, revenue)
    DataSet<Tuple2<Integer, Double>> revenueOfCustomerKey = lineitemsOfCustomerKey.map(i -> new Tuple2<>(i.f0, i.f1 * (1 - i.f2))).groupBy(0).sum(1);
    // join customer with nation (custkey, name, address, nationname, acctbal)
    DataSet<Tuple5<Integer, String, String, String, Double>> customerWithNation = customers.joinWithTiny(nations).where(3).equalTo(0).projectFirst(0, 1, 2).projectSecond(1).projectFirst(4);
    // join customer (with nation) with revenue (custkey, name, address, nationname, acctbal, revenue)
    DataSet<Tuple6<Integer, String, String, String, Double, Double>> customerWithRevenue = customerWithNation.join(revenueOfCustomerKey).where(0).equalTo(0).projectFirst(0, 1, 2, 3, 4).projectSecond(1);
    // emit result
    customerWithRevenue.writeAsCsv(outputPath);
    // execute program
    env.execute("TPCH Query 10 Example");
}
Also used : DataSet(org.apache.flink.api.java.DataSet) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) Tuple3(org.apache.flink.api.java.tuple.Tuple3) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Tuple5(org.apache.flink.api.java.tuple.Tuple5) Tuple4(org.apache.flink.api.java.tuple.Tuple4) Tuple6(org.apache.flink.api.java.tuple.Tuple6) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) Tuple4(org.apache.flink.api.java.tuple.Tuple4) Tuple5(org.apache.flink.api.java.tuple.Tuple5) Tuple6(org.apache.flink.api.java.tuple.Tuple6) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Tuple3(org.apache.flink.api.java.tuple.Tuple3)

Example 24 with DataSet

use of org.apache.flink.api.java.DataSet in project flink by apache.

the class FilterLambda1 method main.

public static void main(String[] args) throws Exception {
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    DataSet<String> input = env.fromElements("Please filter", "the words", "but not this");
    FilterFunction<String> filter = (v) -> WordFilter.filter(v);
    DataSet<String> output = input.filter(filter);
    output.print();
    env.execute();
}
Also used : FilterFunction(org.apache.flink.api.common.functions.FilterFunction) DataSet(org.apache.flink.api.java.DataSet) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment)

Example 25 with DataSet

use of org.apache.flink.api.java.DataSet in project flink by apache.

the class OperatorTranslation method translateSingleInputOperator.

private <I, O> org.apache.flink.api.common.operators.Operator<O> translateSingleInputOperator(SingleInputOperator<?, ?, ?> op) {
    @SuppressWarnings("unchecked") SingleInputOperator<I, O, ?> typedOp = (SingleInputOperator<I, O, ?>) op;
    @SuppressWarnings("unchecked") DataSet<I> typedInput = (DataSet<I>) op.getInput();
    Operator<I> input = translate(typedInput);
    org.apache.flink.api.common.operators.Operator<O> dataFlowOp = typedOp.translateToDataFlow(input);
    if (op instanceof UdfOperator<?>) {
        @SuppressWarnings("unchecked") SingleInputUdfOperator<I, O, ?> udfOp = (SingleInputUdfOperator<I, O, ?>) op;
        // set configuration parameters
        Configuration opParams = udfOp.getParameters();
        if (opParams != null) {
            dataFlowOp.getParameters().addAll(opParams);
        }
        if (dataFlowOp instanceof org.apache.flink.api.common.operators.SingleInputOperator) {
            org.apache.flink.api.common.operators.SingleInputOperator<?, O, ?> unaryOp = (org.apache.flink.api.common.operators.SingleInputOperator<?, O, ?>) dataFlowOp;
            // set the semantic properties
            unaryOp.setSemanticProperties(udfOp.getSemanticProperties());
        }
    }
    return dataFlowOp;
}
Also used : Configuration(org.apache.flink.configuration.Configuration) DataSet(org.apache.flink.api.java.DataSet) AbstractUdfOperator(org.apache.flink.api.common.operators.AbstractUdfOperator)

Aggregations

DataSet (org.apache.flink.api.java.DataSet)43 ExecutionEnvironment (org.apache.flink.api.java.ExecutionEnvironment)18 Test (org.junit.Test)15 Graph (org.apache.flink.graph.Graph)14 DiscardingOutputFormat (org.apache.flink.api.java.io.DiscardingOutputFormat)11 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)11 NullValue (org.apache.flink.types.NullValue)11 Plan (org.apache.flink.api.common.Plan)7 FieldList (org.apache.flink.api.common.operators.util.FieldList)6 DualInputPlanNode (org.apache.flink.optimizer.plan.DualInputPlanNode)6 OptimizedPlan (org.apache.flink.optimizer.plan.OptimizedPlan)6 PlanNode (org.apache.flink.optimizer.plan.PlanNode)6 SinkPlanNode (org.apache.flink.optimizer.plan.SinkPlanNode)6 WorksetIterationPlanNode (org.apache.flink.optimizer.plan.WorksetIterationPlanNode)6 PythonMapPartition (org.apache.flink.python.api.functions.PythonMapPartition)6 LongSumAggregator (org.apache.flink.api.common.aggregators.LongSumAggregator)5 MapFunction (org.apache.flink.api.common.functions.MapFunction)5 Tuple3 (org.apache.flink.api.java.tuple.Tuple3)5 Edge (org.apache.flink.graph.Edge)5 Tuple2ToVertexMap (org.apache.flink.graph.utils.Tuple2ToVertexMap)5