Search in sources :

Example 6 with IterativeDataSet

use of org.apache.flink.api.java.operators.IterativeDataSet in project flink-mirror by flink-ci.

the class BulkIterationTranslationTest method testCorrectTranslation.

@Test
public void testCorrectTranslation() {
    final String jobName = "Test JobName";
    final int numIterations = 13;
    final int defaultParallelism = 133;
    final int iterationParallelism = 77;
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    // ------------ construct the test program ------------------
    {
        env.setParallelism(defaultParallelism);
        @SuppressWarnings("unchecked") DataSet<Tuple3<Double, Long, String>> initialDataSet = env.fromElements(new Tuple3<>(3.44, 5L, "abc"));
        IterativeDataSet<Tuple3<Double, Long, String>> bulkIteration = initialDataSet.iterate(numIterations);
        bulkIteration.setParallelism(iterationParallelism);
        // test that multiple iteration consumers are supported
        DataSet<Tuple3<Double, Long, String>> identity = bulkIteration.map(new IdentityMapper<Tuple3<Double, Long, String>>());
        DataSet<Tuple3<Double, Long, String>> result = bulkIteration.closeWith(identity);
        result.output(new DiscardingOutputFormat<Tuple3<Double, Long, String>>());
        result.writeAsText("/dev/null");
    }
    Plan p = env.createProgramPlan(jobName);
    // ------------- validate the plan ----------------
    BulkIterationBase<?> iteration = (BulkIterationBase<?>) p.getDataSinks().iterator().next().getInput();
    assertEquals(jobName, p.getJobName());
    assertEquals(defaultParallelism, p.getDefaultParallelism());
    assertEquals(iterationParallelism, iteration.getParallelism());
}
Also used : ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) DataSet(org.apache.flink.api.java.DataSet) IterativeDataSet(org.apache.flink.api.java.operators.IterativeDataSet) IterativeDataSet(org.apache.flink.api.java.operators.IterativeDataSet) Plan(org.apache.flink.api.common.Plan) DiscardingOutputFormat(org.apache.flink.api.java.io.DiscardingOutputFormat) Tuple3(org.apache.flink.api.java.tuple.Tuple3) BulkIterationBase(org.apache.flink.api.common.operators.base.BulkIterationBase) Test(org.junit.Test)

Example 7 with IterativeDataSet

use of org.apache.flink.api.java.operators.IterativeDataSet in project flink by splunk.

the class DataSetAllroundTestProgram method main.

@SuppressWarnings("Convert2Lambda")
public static void main(String[] args) throws Exception {
    // get parameters
    ParameterTool params = ParameterTool.fromArgs(args);
    int loadFactor = Integer.parseInt(params.getRequired("loadFactor"));
    String outputPath = params.getRequired("outputPath");
    boolean infinite = params.getBoolean("infinite", false);
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    int numKeys = loadFactor * 128 * 1024;
    DataSet<Tuple2<String, Integer>> x1Keys;
    DataSet<Tuple2<String, Integer>> x2Keys = env.createInput(Generator.generate(numKeys * 32, 2)).setParallelism(4);
    DataSet<Tuple2<String, Integer>> x8Keys = env.createInput(Generator.generate(numKeys, 8)).setParallelism(4);
    if (infinite) {
        x1Keys = env.createInput(Generator.generateInfinitely(numKeys)).setParallelism(4);
    } else {
        x1Keys = env.createInput(Generator.generate(numKeys, 1)).setParallelism(4);
    }
    DataSet<Tuple2<String, Integer>> joined = x2Keys.map(x -> Tuple4.of("0-0", 0L, 1, x.f0)).returns(Types.TUPLE(Types.STRING, Types.LONG, Types.INT, Types.STRING)).join(x8Keys).where(3).equalTo(0).with((l, r) -> Tuple2.of(l.f3, 1)).returns(Types.TUPLE(Types.STRING, Types.INT)).groupBy(new KeySelector<Tuple2<String, Integer>, String>() {

        @Override
        public String getKey(Tuple2<String, Integer> value) {
            return value.f0;
        }
    }).reduce((value1, value2) -> Tuple2.of(value1.f0, value1.f1 + value2.f1));
    // co-group two datasets on their primary keys.
    // we filter both inputs such that only 6.25% of the keys overlap.
    // result: (key, cnt), #keys records with unique keys, cnt = (6.25%: 2, 93.75%: 1)
    DataSet<Tuple2<String, Integer>> coGrouped = x1Keys.filter(x -> x.f1 > 59).coGroup(x1Keys.filter(x -> x.f1 < 68)).where("f0").equalTo("f0").with((CoGroupFunction<Tuple2<String, Integer>, Tuple2<String, Integer>, Tuple2<String, Integer>>) (l, r, out) -> {
        int cnt = 0;
        String key = "";
        for (Tuple2<String, Integer> t : l) {
            cnt++;
            key = t.f0;
        }
        for (Tuple2<String, Integer> t : r) {
            cnt++;
            key = t.f0;
        }
        out.collect(Tuple2.of(key, cnt));
    }).returns(Types.TUPLE(Types.STRING, Types.INT));
    // join datasets on keys (1-1 join) and replicate by 16 (previously computed count)
    // result: (key, cnt), 16 * #keys records, all keys preserved, cnt = (6.25%: 2, 93.75%: 1)
    DataSet<Tuple2<String, Integer>> joined2 = joined.join(coGrouped, JoinOperatorBase.JoinHint.REPARTITION_SORT_MERGE).where(0).equalTo("f0").flatMap((FlatMapFunction<Tuple2<Tuple2<String, Integer>, Tuple2<String, Integer>>, Tuple2<String, Integer>>) (p, out) -> {
        for (int i = 0; i < p.f0.f1; i++) {
            out.collect(Tuple2.of(p.f0.f0, p.f1.f1));
        }
    }).returns(Types.TUPLE(Types.STRING, Types.INT));
    // iteration. double the count field until all counts are at 32 or more
    // result: (key, cnt), 16 * #keys records, all keys preserved, cnt = (6.25%: 64, 93.75%: 32)
    IterativeDataSet<Tuple2<String, Integer>> initial = joined2.iterate(16);
    DataSet<Tuple2<String, Integer>> iteration = initial.map(x -> Tuple2.of(x.f0, x.f1 * 2)).returns(Types.TUPLE(Types.STRING, Types.INT));
    DataSet<Boolean> termination = iteration.flatMap((FlatMapFunction<Tuple2<String, Integer>, Boolean>) (x, out) -> {
        if (x.f1 < 32) {
            out.collect(false);
        }
    }).returns(Types.BOOLEAN);
    DataSet<Tuple2<Integer, Integer>> result = initial.closeWith(iteration, termination).groupBy(1).reduceGroup((GroupReduceFunction<Tuple2<String, Integer>, Tuple2<Integer, Integer>>) (g, out) -> {
        int key = 0;
        int cnt = 0;
        for (Tuple2<String, Integer> r : g) {
            key = r.f1;
            cnt++;
        }
        out.collect(Tuple2.of(key, cnt));
    }).returns(Types.TUPLE(Types.INT, Types.INT)).map(x -> Tuple2.of(x.f0, x.f1 / (loadFactor * 128))).returns(Types.TUPLE(Types.INT, Types.INT));
    // sort and emit result
    result.sortPartition(0, Order.ASCENDING).setParallelism(1).writeAsText(outputPath, FileSystem.WriteMode.OVERWRITE).setParallelism(1);
    env.execute();
}
Also used : ParameterTool(org.apache.flink.api.java.utils.ParameterTool) Types(org.apache.flink.api.common.typeinfo.Types) KeySelector(org.apache.flink.api.java.functions.KeySelector) JoinOperatorBase(org.apache.flink.api.common.operators.base.JoinOperatorBase) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Tuple4(org.apache.flink.api.java.tuple.Tuple4) GroupReduceFunction(org.apache.flink.api.common.functions.GroupReduceFunction) IterativeDataSet(org.apache.flink.api.java.operators.IterativeDataSet) FlatMapFunction(org.apache.flink.api.common.functions.FlatMapFunction) ParameterTool(org.apache.flink.api.java.utils.ParameterTool) CoGroupFunction(org.apache.flink.api.common.functions.CoGroupFunction) DataSet(org.apache.flink.api.java.DataSet) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) FileSystem(org.apache.flink.core.fs.FileSystem) Order(org.apache.flink.api.common.operators.Order) ExecutionEnvironment(org.apache.flink.api.java.ExecutionEnvironment) GroupReduceFunction(org.apache.flink.api.common.functions.GroupReduceFunction) KeySelector(org.apache.flink.api.java.functions.KeySelector) CoGroupFunction(org.apache.flink.api.common.functions.CoGroupFunction) Tuple2(org.apache.flink.api.java.tuple.Tuple2) FlatMapFunction(org.apache.flink.api.common.functions.FlatMapFunction)

Example 8 with IterativeDataSet

use of org.apache.flink.api.java.operators.IterativeDataSet in project gradoop by dbs-leipzig.

the class CentroidFRLayouter method execute.

@Override
public LogicalGraph execute(LogicalGraph g) {
    g = createInitialLayout(g);
    DataSet<EPGMVertex> gradoopVertices = g.getVertices();
    DataSet<EPGMEdge> gradoopEdges = g.getEdges();
    DataSet<LVertex> vertices = gradoopVertices.map(LVertex::new);
    DataSet<LEdge> edges = gradoopEdges.map(LEdge::new);
    centroids = chooseInitialCentroids(vertices);
    // flink can only iterate over one dataset at once. Create a dataset containing both
    // centroids and vertices. Split them again at the begin of every iteration
    DataSet<SimpleGraphElement> graphElements = vertices.map(x -> x);
    graphElements = graphElements.union(centroids.map(x -> x));
    IterativeDataSet<SimpleGraphElement> loop = graphElements.iterate(iterations);
    vertices = loop.filter(x -> x instanceof LVertex).map(x -> (LVertex) x);
    centroids = loop.filter(x -> x instanceof Centroid).map(x -> (Centroid) x);
    centroids = calculateNewCentroids(centroids, vertices);
    center = calculateLayoutCenter(vertices);
    LGraph graph = new LGraph(vertices, edges);
    // we have overridden repulsionForces() so layout() will use or new centroid-based solution
    layout(graph);
    graphElements = graph.getVertices().map(x -> x);
    graphElements = graphElements.union(centroids.map(x -> x));
    graphElements = loop.closeWith(graphElements);
    vertices = graphElements.filter(x -> x instanceof LVertex).map(x -> (LVertex) x);
    gradoopVertices = vertices.join(gradoopVertices).where(LVertex.ID_POSITION).equalTo(new Id<>()).with(new LVertexEPGMVertexJoinFunction());
    return g.getFactory().fromDataSets(gradoopVertices, gradoopEdges);
}
Also used : CentroidUpdater(org.gradoop.flink.model.impl.operators.layouting.functions.CentroidUpdater) LGraph(org.gradoop.flink.model.impl.operators.layouting.util.LGraph) LVertexEPGMVertexJoinFunction(org.gradoop.flink.model.impl.operators.layouting.functions.LVertexEPGMVertexJoinFunction) LVertex(org.gradoop.flink.model.impl.operators.layouting.util.LVertex) IterativeDataSet(org.apache.flink.api.java.operators.IterativeDataSet) CentroidRepulsionForceMapper(org.gradoop.flink.model.impl.operators.layouting.functions.CentroidRepulsionForceMapper) FRRepulsionFunction(org.gradoop.flink.model.impl.operators.layouting.functions.FRRepulsionFunction) Centroid(org.gradoop.flink.model.impl.operators.layouting.util.Centroid) Id(org.gradoop.flink.model.impl.functions.epgm.Id) SimpleGraphElement(org.gradoop.flink.model.impl.operators.layouting.util.SimpleGraphElement) AverageVertexPositionsFunction(org.gradoop.flink.model.impl.operators.layouting.functions.AverageVertexPositionsFunction) DataSet(org.apache.flink.api.java.DataSet) Vector(org.gradoop.flink.model.impl.operators.layouting.util.Vector) EPGMEdge(org.gradoop.common.model.impl.pojo.EPGMEdge) LEdge(org.gradoop.flink.model.impl.operators.layouting.util.LEdge) Force(org.gradoop.flink.model.impl.operators.layouting.util.Force) LogicalGraph(org.gradoop.flink.model.impl.epgm.LogicalGraph) EPGMVertex(org.gradoop.common.model.impl.pojo.EPGMVertex) LEdge(org.gradoop.flink.model.impl.operators.layouting.util.LEdge) EPGMEdge(org.gradoop.common.model.impl.pojo.EPGMEdge) SimpleGraphElement(org.gradoop.flink.model.impl.operators.layouting.util.SimpleGraphElement) LGraph(org.gradoop.flink.model.impl.operators.layouting.util.LGraph) LVertex(org.gradoop.flink.model.impl.operators.layouting.util.LVertex) Centroid(org.gradoop.flink.model.impl.operators.layouting.util.Centroid) EPGMVertex(org.gradoop.common.model.impl.pojo.EPGMVertex) LVertexEPGMVertexJoinFunction(org.gradoop.flink.model.impl.operators.layouting.functions.LVertexEPGMVertexJoinFunction)

Example 9 with IterativeDataSet

use of org.apache.flink.api.java.operators.IterativeDataSet in project Alink by alibaba.

the class BisectingKMeansTrainBatchOp method linkFrom.

/**
 * The bisecting kmeans algorithm has nested loops. In the outer loop, cluster centers
 * are splited. In the inner loop, the splited centers are iteratively refined.
 * However, there lacks nested loop semantic in Flink, so we have to flatten the nested loop
 * in our implementation.
 */
@Override
public BisectingKMeansTrainBatchOp linkFrom(BatchOperator<?>... inputs) {
    BatchOperator<?> in = checkAndGetFirst(inputs);
    // get the input parameter's value
    final DistanceType distanceType = getDistanceType();
    final int k = this.getK();
    final int maxIter = this.getMaxIter();
    final String vectorColName = this.getVectorCol();
    final int minDivisibleClusterSize = this.getMinDivisibleClusterSize();
    ContinuousDistance distance = distanceType.getFastDistance();
    Tuple2<DataSet<Vector>, DataSet<BaseVectorSummary>> vectorsAndStat = StatisticsHelper.summaryHelper(in, null, vectorColName);
    DataSet<Integer> dim = vectorsAndStat.f1.map(new MapFunction<BaseVectorSummary, Integer>() {

        private static final long serialVersionUID = 5358843841535961680L;

        @Override
        public Integer map(BaseVectorSummary value) {
            Preconditions.checkArgument(value.count() > 0, "The train dataset is empty!");
            return value.vectorSize();
        }
    });
    // tuple: sampleId, features, assignment
    DataSet<Tuple3<Long, Vector, Long>> initialAssignment = DataSetUtils.zipWithUniqueId(vectorsAndStat.f0).map(new RichMapFunction<Tuple2<Long, Vector>, Tuple3<Long, Vector, Long>>() {

        private static final long serialVersionUID = -6036596630416015773L;

        private int vectorSize;

        @Override
        public void open(Configuration params) {
            vectorSize = (int) this.getRuntimeContext().getBroadcastVariable(VECTOR_SIZE).get(0);
        }

        @Override
        public Tuple3<Long, Vector, Long> map(Tuple2<Long, Vector> value) {
            if (value.f1 instanceof SparseVector) {
                ((SparseVector) value.f1).setSize(vectorSize);
            }
            return Tuple3.of(value.f0, value.f1, ROOT_INDEX);
        }
    }).withBroadcastSet(dim, VECTOR_SIZE);
    DataSet<Tuple2<Long, ClusterSummary>> clustersSummaries = summary(initialAssignment.project(2, 1), dim, distanceType);
    DataSet<Tuple3<Long, ClusterSummary, IterInfo>> clustersSummariesAndIterInfo = clustersSummaries.map(new MapFunction<Tuple2<Long, ClusterSummary>, Tuple3<Long, ClusterSummary, IterInfo>>() {

        private static final long serialVersionUID = -3883958936263294331L;

        @Override
        public Tuple3<Long, ClusterSummary, IterInfo> map(Tuple2<Long, ClusterSummary> value) {
            return Tuple3.of(value.f0, value.f1, new IterInfo(maxIter));
        }
    }).withForwardedFields("f0;f1");
    IterativeDataSet<Tuple3<Long, ClusterSummary, IterInfo>> loop = clustersSummariesAndIterInfo.iterate(Integer.MAX_VALUE);
    DataSet<Tuple1<IterInfo>> iterInfo = loop.<Tuple1<IterInfo>>project(2).first(1);
    // Get all cluster summaries. Split clusters if at the first step of inner iterations.
    DataSet<Tuple3<Long, ClusterSummary, IterInfo>> allClusters = getOrSplitClusters(loop, k, minDivisibleClusterSize, getRandomSeed());
    DataSet<Long> divisibleClusterIndices = getDivisibleClusterIndices(allClusters);
    DataSet<Tuple2<Long, DenseVector>> newClusterCenters = getNewClusterCenters(allClusters);
    DataSet<Tuple3<Long, Vector, Long>> newAssignment = updateAssignment(initialAssignment, divisibleClusterIndices, newClusterCenters, distance, iterInfo);
    DataSet<Tuple2<Long, ClusterSummary>> newClusterSummaries = summary(newAssignment.project(2, 1), dim, distanceType);
    DataSet<Tuple3<Long, ClusterSummary, IterInfo>> updatedClusterSummariesWithIterInfo = updateClusterSummariesAndIterInfo(allClusters, newClusterSummaries);
    DataSet<Integer> stopCriterion = iterInfo.flatMap(new FlatMapFunction<Tuple1<IterInfo>, Integer>() {

        private static final long serialVersionUID = -4258243788034193744L;

        @Override
        public void flatMap(Tuple1<IterInfo> value, Collector<Integer> out) {
            if (!(value.f0.atLastInnerIterStep() && value.f0.atLastBisectionStep())) {
                out.collect(0);
            }
        }
    });
    DataSet<Tuple2<Long, ClusterSummary>> finalClusterSummaries = loop.closeWith(updatedClusterSummariesWithIterInfo, stopCriterion).project(0, 1);
    DataSet<Row> modelRows = finalClusterSummaries.mapPartition(new SaveModel(distanceType, vectorColName, k)).withBroadcastSet(dim, VECTOR_SIZE).setParallelism(1);
    this.setOutput(modelRows, new BisectingKMeansModelDataConverter().getModelSchema());
    return this;
}
Also used : Configuration(org.apache.flink.configuration.Configuration) DataSet(org.apache.flink.api.java.DataSet) IterativeDataSet(org.apache.flink.api.java.operators.IterativeDataSet) SparseVector(com.alibaba.alink.common.linalg.SparseVector) MapFunction(org.apache.flink.api.common.functions.MapFunction) FlatMapFunction(org.apache.flink.api.common.functions.FlatMapFunction) RichMapFunction(org.apache.flink.api.common.functions.RichMapFunction) BisectingKMeansModelDataConverter(com.alibaba.alink.operator.common.clustering.BisectingKMeansModelDataConverter) BaseVectorSummary(com.alibaba.alink.operator.common.statistics.basicstatistic.BaseVectorSummary) Vector(com.alibaba.alink.common.linalg.Vector) DenseVector(com.alibaba.alink.common.linalg.DenseVector) SparseVector(com.alibaba.alink.common.linalg.SparseVector) ClusterSummary(com.alibaba.alink.operator.common.clustering.BisectingKMeansModelData.ClusterSummary) ContinuousDistance(com.alibaba.alink.operator.common.distance.ContinuousDistance) Tuple1(org.apache.flink.api.java.tuple.Tuple1) RichMapFunction(org.apache.flink.api.common.functions.RichMapFunction) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Tuple3(org.apache.flink.api.java.tuple.Tuple3) Row(org.apache.flink.types.Row)

Aggregations

DataSet (org.apache.flink.api.java.DataSet)9 IterativeDataSet (org.apache.flink.api.java.operators.IterativeDataSet)9 ExecutionEnvironment (org.apache.flink.api.java.ExecutionEnvironment)6 Tuple3 (org.apache.flink.api.java.tuple.Tuple3)5 FlatMapFunction (org.apache.flink.api.common.functions.FlatMapFunction)4 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)4 Tuple4 (org.apache.flink.api.java.tuple.Tuple4)4 Plan (org.apache.flink.api.common.Plan)3 CoGroupFunction (org.apache.flink.api.common.functions.CoGroupFunction)3 GroupReduceFunction (org.apache.flink.api.common.functions.GroupReduceFunction)3 Order (org.apache.flink.api.common.operators.Order)3 BulkIterationBase (org.apache.flink.api.common.operators.base.BulkIterationBase)3 JoinOperatorBase (org.apache.flink.api.common.operators.base.JoinOperatorBase)3 Types (org.apache.flink.api.common.typeinfo.Types)3 KeySelector (org.apache.flink.api.java.functions.KeySelector)3 DiscardingOutputFormat (org.apache.flink.api.java.io.DiscardingOutputFormat)3 ParameterTool (org.apache.flink.api.java.utils.ParameterTool)3 FileSystem (org.apache.flink.core.fs.FileSystem)3 Test (org.junit.Test)3 DenseVector (com.alibaba.alink.common.linalg.DenseVector)2