Search in sources :

Example 1 with ArrayBuffer

use of scala.collection.mutable.ArrayBuffer in project Gaffer by gchq.

the class ImportKeyValuePairRDDToAccumuloHandlerTest method checkImportRDDOfElements.

@Test
public void checkImportRDDOfElements() throws OperationException, IOException {
    final Graph graph1 = new Graph.Builder().addSchema(getClass().getResourceAsStream("/schema/dataSchema.json")).addSchema(getClass().getResourceAsStream("/schema/dataTypes.json")).addSchema(getClass().getResourceAsStream("/schema/storeTypes.json")).addSchema(getClass().getResourceAsStream("/schema/storeSchema.json")).storeProperties(getClass().getResourceAsStream("/store.properties")).build();
    final ArrayBuffer<Element> elements = new ArrayBuffer<>();
    for (int i = 0; i < 10; i++) {
        final Entity entity = new Entity(TestGroups.ENTITY);
        entity.setVertex("" + i);
        final Edge edge1 = new Edge(TestGroups.EDGE);
        edge1.setSource("" + i);
        edge1.setDestination("B");
        edge1.setDirected(false);
        edge1.putProperty(TestPropertyNames.COUNT, 2);
        final Edge edge2 = new Edge(TestGroups.EDGE);
        edge2.setSource("" + i);
        edge2.setDestination("C");
        edge2.setDirected(false);
        edge2.putProperty(TestPropertyNames.COUNT, 4);
        elements.$plus$eq(edge1);
        elements.$plus$eq(edge2);
        elements.$plus$eq(entity);
    }
    final User user = new User();
    final SparkConf sparkConf = new SparkConf().setMaster("local").setAppName("tests").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer").set("spark.kryo.registrator", "uk.gov.gchq.gaffer.spark.serialisation.kryo.Registrator").set("spark.driver.allowMultipleContexts", "true");
    final SparkContext sparkContext = new SparkContext(sparkConf);
    // Create Hadoop configuration and serialise to a string
    final Configuration configuration = new Configuration();
    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    configuration.write(new DataOutputStream(baos));
    final String configurationString = new String(baos.toByteArray(), CommonConstants.UTF_8);
    final String outputPath = this.getClass().getResource("/").getPath().toString() + "load";
    final String failurePath = this.getClass().getResource("/").getPath().toString() + "failure";
    final File file = new File(outputPath);
    if (file.exists()) {
        FileUtils.forceDelete(file);
    }
    final ElementConverterFunction func = new ElementConverterFunction(sparkContext.broadcast(new ByteEntityAccumuloElementConverter(graph1.getSchema()), ACCUMULO_ELEMENT_CONVERTER_CLASS_TAG));
    final RDD<Tuple2<Key, Value>> elementRDD = sparkContext.parallelize(elements, 1, ELEMENT_CLASS_TAG).flatMap(func, TUPLE2_CLASS_TAG);
    final ImportKeyValuePairRDDToAccumulo addRdd = new ImportKeyValuePairRDDToAccumulo.Builder().input(elementRDD).outputPath(outputPath).failurePath(failurePath).build();
    graph1.execute(addRdd, user);
    FileUtils.forceDelete(file);
    // Check all elements were added
    final GetRDDOfAllElements rddQuery = new GetRDDOfAllElements.Builder().sparkContext(sparkContext).option(AbstractGetRDDHandler.HADOOP_CONFIGURATION_KEY, configurationString).build();
    final RDD<Element> rdd = graph1.execute(rddQuery, user);
    if (rdd == null) {
        fail("No RDD returned");
    }
    final Set<Element> results = new HashSet<>();
    final Element[] returnedElements = (Element[]) rdd.collect();
    Collections.addAll(results, returnedElements);
    assertEquals(elements.size(), results.size());
    sparkContext.stop();
}
Also used : Entity(uk.gov.gchq.gaffer.data.element.Entity) User(uk.gov.gchq.gaffer.user.User) Configuration(org.apache.hadoop.conf.Configuration) DataOutputStream(java.io.DataOutputStream) Element(uk.gov.gchq.gaffer.data.element.Element) GetRDDOfAllElements(uk.gov.gchq.gaffer.spark.operation.scalardd.GetRDDOfAllElements) ImportKeyValuePairRDDToAccumulo(uk.gov.gchq.gaffer.sparkaccumulo.operation.scalardd.ImportKeyValuePairRDDToAccumulo) HashSet(java.util.HashSet) ByteArrayOutputStream(org.apache.commons.io.output.ByteArrayOutputStream) Graph(uk.gov.gchq.gaffer.graph.Graph) SparkContext(org.apache.spark.SparkContext) Tuple2(scala.Tuple2) ArrayBuffer(scala.collection.mutable.ArrayBuffer) ByteEntityAccumuloElementConverter(uk.gov.gchq.gaffer.accumulostore.key.core.impl.byteEntity.ByteEntityAccumuloElementConverter) ElementConverterFunction(uk.gov.gchq.gaffer.sparkaccumulo.operation.utils.scala.ElementConverterFunction) Edge(uk.gov.gchq.gaffer.data.element.Edge) SparkConf(org.apache.spark.SparkConf) File(java.io.File) Test(org.junit.Test)

Example 2 with ArrayBuffer

use of scala.collection.mutable.ArrayBuffer in project flink by apache.

the class PythonCorrelateSplitRule method createTopCalc.

private FlinkLogicalCalc createTopCalc(int primitiveLeftFieldCount, RexBuilder rexBuilder, ArrayBuffer<RexNode> extractedRexNodes, RelDataType calcRowType, FlinkLogicalCorrelate newCorrelate) {
    RexProgram rexProgram = new RexProgramBuilder(newCorrelate.getRowType(), rexBuilder).getProgram();
    int offset = extractedRexNodes.size() + primitiveLeftFieldCount;
    // extract correlate output RexNode.
    List<RexNode> newTopCalcProjects = rexProgram.getExprList().stream().filter(x -> x instanceof RexInputRef).filter(x -> {
        int index = ((RexInputRef) x).getIndex();
        return index < primitiveLeftFieldCount || index >= offset;
    }).collect(Collectors.toList());
    return new FlinkLogicalCalc(newCorrelate.getCluster(), newCorrelate.getTraitSet(), newCorrelate, RexProgram.create(newCorrelate.getRowType(), newTopCalcProjects, null, calcRowType, rexBuilder));
}
Also used : RexFieldAccess(org.apache.calcite.rex.RexFieldAccess) RexProgram(org.apache.calcite.rex.RexProgram) RexUtil(org.apache.calcite.rex.RexUtil) SqlValidatorUtil(org.apache.calcite.sql.validate.SqlValidatorUtil) RexNode(org.apache.calcite.rex.RexNode) LinkedList(java.util.LinkedList) ArrayBuffer(scala.collection.mutable.ArrayBuffer) PythonUtil(org.apache.flink.table.planner.plan.utils.PythonUtil) RelDataType(org.apache.calcite.rel.type.RelDataType) RexDefaultVisitor(org.apache.flink.table.planner.plan.utils.RexDefaultVisitor) RexBuilder(org.apache.calcite.rex.RexBuilder) Iterator(scala.collection.Iterator) FlinkLogicalTableFunctionScan(org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalTableFunctionScan) FlinkLogicalCalc(org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalCalc) RelNode(org.apache.calcite.rel.RelNode) Collectors(java.util.stream.Collectors) RelOptRuleCall(org.apache.calcite.plan.RelOptRuleCall) RexInputRef(org.apache.calcite.rex.RexInputRef) RelOptRule(org.apache.calcite.plan.RelOptRule) RexProgramBuilder(org.apache.calcite.rex.RexProgramBuilder) List(java.util.List) StreamPhysicalCorrelateRule(org.apache.flink.table.planner.plan.rules.physical.stream.StreamPhysicalCorrelateRule) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) RexCorrelVariable(org.apache.calcite.rex.RexCorrelVariable) HepRelVertex(org.apache.calcite.plan.hep.HepRelVertex) FlinkLogicalCorrelate(org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalCorrelate) RexCall(org.apache.calcite.rex.RexCall) FlinkLogicalCalc(org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalCalc) RexProgram(org.apache.calcite.rex.RexProgram) RexInputRef(org.apache.calcite.rex.RexInputRef) RexProgramBuilder(org.apache.calcite.rex.RexProgramBuilder) RexNode(org.apache.calcite.rex.RexNode)

Example 3 with ArrayBuffer

use of scala.collection.mutable.ArrayBuffer in project Gaffer by gchq.

the class SplitStoreFromRDDOfElementsHandlerIT method createElements.

private ArrayBuffer<Element> createElements() {
    final ArrayBuffer<Element> elements = new ArrayBuffer<>();
    for (int i = 0; i < 10; i++) {
        final Entity entity = new Entity.Builder().group(TestGroups.ENTITY).vertex("" + i).build();
        final Edge edge1 = new Edge.Builder().group(TestGroups.EDGE).source("" + i).dest("B").directed(false).property(TestPropertyNames.COUNT, 2).build();
        final Edge edge2 = new Edge.Builder().group(TestGroups.EDGE).source("" + i).dest("C").directed(false).property(TestPropertyNames.COUNT, 4).build();
        elements.$plus$eq(edge1);
        elements.$plus$eq(edge2);
        elements.$plus$eq(entity);
    }
    return elements;
}
Also used : Entity(uk.gov.gchq.gaffer.data.element.Entity) Element(uk.gov.gchq.gaffer.data.element.Element) ArrayBuffer(scala.collection.mutable.ArrayBuffer) Edge(uk.gov.gchq.gaffer.data.element.Edge)

Example 4 with ArrayBuffer

use of scala.collection.mutable.ArrayBuffer in project Gaffer by gchq.

the class ElementConverterFunction method apply.

@Override
public TraversableOnce<Tuple2<Key, Value>> apply(final Element element) {
    final ArrayBuffer<Tuple2<Key, Value>> buf = new ArrayBuffer<>();
    Pair<Key, Key> keys = new Pair<>();
    Value value = null;
    try {
        keys = converterBroadcast.value().getKeysFromElement(element);
        value = converterBroadcast.value().getValueFromElement(element);
    } catch (final AccumuloElementConversionException e) {
        LOGGER.error(e.getMessage(), e);
    }
    final Key first = keys.getFirst();
    if (null != first) {
        buf.$plus$eq(new Tuple2<>(first, value));
    }
    final Key second = keys.getSecond();
    if (null != second) {
        buf.$plus$eq(new Tuple2<>(second, value));
    }
    return buf;
}
Also used : Tuple2(scala.Tuple2) Value(org.apache.accumulo.core.data.Value) ArrayBuffer(scala.collection.mutable.ArrayBuffer) Key(org.apache.accumulo.core.data.Key) Pair(uk.gov.gchq.gaffer.commonutil.pair.Pair) AccumuloElementConversionException(uk.gov.gchq.gaffer.accumulostore.key.exception.AccumuloElementConversionException)

Example 5 with ArrayBuffer

use of scala.collection.mutable.ArrayBuffer in project Gaffer by gchq.

the class ImportRDDOfElementsHandlerTest method checkImportRDDOfElements.

@Test
public void checkImportRDDOfElements() throws OperationException, IOException {
    final Graph graph1 = new Graph.Builder().config(new GraphConfig.Builder().graphId("graphId").build()).addSchema(getClass().getResourceAsStream("/schema/elements.json")).addSchema(getClass().getResourceAsStream("/schema/types.json")).addSchema(getClass().getResourceAsStream("/schema/serialisation.json")).storeProperties(PROPERTIES).build();
    final ArrayBuffer<Element> elements = new ArrayBuffer<>();
    for (int i = 0; i < 10; i++) {
        final Entity entity = new Entity.Builder().group(TestGroups.ENTITY).vertex("" + i).build();
        final Edge edge1 = new Edge.Builder().group(TestGroups.EDGE).source("" + i).dest("B").directed(false).property(TestPropertyNames.COUNT, 2).build();
        final Edge edge2 = new Edge.Builder().group(TestGroups.EDGE).source("" + i).dest("C").directed(false).property(TestPropertyNames.COUNT, 4).build();
        elements.$plus$eq(edge1);
        elements.$plus$eq(edge2);
        elements.$plus$eq(entity);
    }
    final User user = new User();
    final SparkSession sparkSession = SparkSessionProvider.getSparkSession();
    // Create Hadoop configuration and serialise to a string
    final Configuration configuration = new Configuration();
    final String configurationString = AbstractGetRDDHandler.convertConfigurationToString(configuration);
    final String outputPath = tempDir.resolve("output").toAbsolutePath().toString();
    final String failurePath = tempDir.resolve("failure").toAbsolutePath().toString();
    final RDD<Element> elementRDD = sparkSession.sparkContext().parallelize(elements, 8, ELEMENT_CLASS_TAG);
    final ImportRDDOfElements addRdd = new ImportRDDOfElements.Builder().input(elementRDD).option("outputPath", outputPath).option("failurePath", failurePath).build();
    graph1.execute(addRdd, user);
    // Check all elements were added
    final GetRDDOfAllElements rddQuery = new GetRDDOfAllElements.Builder().option(AbstractGetRDDHandler.HADOOP_CONFIGURATION_KEY, configurationString).build();
    final RDD<Element> rdd = graph1.execute(rddQuery, user);
    if (rdd == null) {
        fail("No RDD returned");
    }
    final Set<Element> results = new HashSet<>();
    final Element[] returnedElements = (Element[]) rdd.collect();
    for (int i = 0; i < returnedElements.length; i++) {
        results.add(returnedElements[i]);
    }
    assertEquals(elements.size(), results.size());
}
Also used : Entity(uk.gov.gchq.gaffer.data.element.Entity) User(uk.gov.gchq.gaffer.user.User) SparkSession(org.apache.spark.sql.SparkSession) Configuration(org.apache.hadoop.conf.Configuration) Element(uk.gov.gchq.gaffer.data.element.Element) Graph(uk.gov.gchq.gaffer.graph.Graph) ImportRDDOfElements(uk.gov.gchq.gaffer.spark.operation.scalardd.ImportRDDOfElements) GetRDDOfAllElements(uk.gov.gchq.gaffer.spark.operation.scalardd.GetRDDOfAllElements) ArrayBuffer(scala.collection.mutable.ArrayBuffer) Edge(uk.gov.gchq.gaffer.data.element.Edge) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Aggregations

ArrayBuffer (scala.collection.mutable.ArrayBuffer)9 Edge (uk.gov.gchq.gaffer.data.element.Edge)5 Element (uk.gov.gchq.gaffer.data.element.Element)5 Entity (uk.gov.gchq.gaffer.data.element.Entity)5 HashSet (java.util.HashSet)3 HepRelVertex (org.apache.calcite.plan.hep.HepRelVertex)3 RelNode (org.apache.calcite.rel.RelNode)3 RexBuilder (org.apache.calcite.rex.RexBuilder)3 RexNode (org.apache.calcite.rex.RexNode)3 FlinkLogicalCalc (org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalCalc)3 Configuration (org.apache.hadoop.conf.Configuration)3 Tuple2 (scala.Tuple2)3 Graph (uk.gov.gchq.gaffer.graph.Graph)3 GetRDDOfAllElements (uk.gov.gchq.gaffer.spark.operation.scalardd.GetRDDOfAllElements)3 User (uk.gov.gchq.gaffer.user.User)3 LinkedList (java.util.LinkedList)2 List (java.util.List)2 Collectors (java.util.stream.Collectors)2 RelOptRule (org.apache.calcite.plan.RelOptRule)2 RelOptRuleCall (org.apache.calcite.plan.RelOptRuleCall)2