Search in sources :

Example 51 with ByteArrayOutputStream

use of org.apache.commons.io.output.ByteArrayOutputStream in project Gaffer by gchq.

the class ImportJavaRDDOfElementsHandlerTest method checkImportJavaRDDOfElements.

@Test
public void checkImportJavaRDDOfElements() throws OperationException, IOException, InterruptedException {
    final Graph graph1 = new Graph.Builder().addSchema(getClass().getResourceAsStream("/schema/dataSchema.json")).addSchema(getClass().getResourceAsStream("/schema/dataTypes.json")).addSchema(getClass().getResourceAsStream("/schema/storeTypes.json")).storeProperties(getClass().getResourceAsStream("/store.properties")).build();
    final List<Element> elements = new ArrayList<>();
    for (int i = 0; i < 10; i++) {
        final Entity entity = new Entity(TestGroups.ENTITY);
        entity.setVertex("" + i);
        final Edge edge1 = new Edge(TestGroups.EDGE);
        edge1.setSource("" + i);
        edge1.setDestination("B");
        edge1.setDirected(false);
        edge1.putProperty(TestPropertyNames.COUNT, 2);
        final Edge edge2 = new Edge(TestGroups.EDGE);
        edge2.setSource("" + i);
        edge2.setDestination("C");
        edge2.setDirected(false);
        edge2.putProperty(TestPropertyNames.COUNT, 4);
        elements.add(edge1);
        elements.add(edge2);
        elements.add(entity);
    }
    final User user = new User();
    final SparkConf sparkConf = new SparkConf().setMaster("local").setAppName("testCheckGetCorrectElementsInJavaRDDForEntitySeed").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer").set("spark.kryo.registrator", "uk.gov.gchq.gaffer.spark.serialisation.kryo.Registrator").set("spark.driver.allowMultipleContexts", "true");
    final JavaSparkContext sparkContext = new JavaSparkContext(sparkConf);
    // Create Hadoop configuration and serialise to a string
    final Configuration configuration = new Configuration();
    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    configuration.write(new DataOutputStream(baos));
    final String configurationString = new String(baos.toByteArray(), CommonConstants.UTF_8);
    final String outputPath = this.getClass().getResource("/").getPath().toString() + "load";
    final String failurePath = this.getClass().getResource("/").getPath().toString() + "failure";
    final File file = new File(outputPath);
    if (file.exists()) {
        FileUtils.forceDelete(file);
    }
    final JavaRDD<Element> elementJavaRDD = sparkContext.parallelize(elements);
    final ImportJavaRDDOfElements addRdd = new ImportJavaRDDOfElements.Builder().javaSparkContext(sparkContext).input(elementJavaRDD).option("outputPath", outputPath).option("failurePath", failurePath).build();
    graph1.execute(addRdd, user);
    FileUtils.forceDelete(file);
    // Check all elements were added
    final GetJavaRDDOfAllElements rddQuery = new GetJavaRDDOfAllElements.Builder().javaSparkContext(sparkContext).option(AbstractGetRDDHandler.HADOOP_CONFIGURATION_KEY, configurationString).build();
    final JavaRDD<Element> rdd = graph1.execute(rddQuery, user);
    if (rdd == null) {
        fail("No RDD returned");
    }
    final Set<Element> results = new HashSet<>(rdd.collect());
    assertEquals(elements.size(), results.size());
    sparkContext.stop();
}
Also used : Entity(uk.gov.gchq.gaffer.data.element.Entity) User(uk.gov.gchq.gaffer.user.User) Configuration(org.apache.hadoop.conf.Configuration) DataOutputStream(java.io.DataOutputStream) Element(uk.gov.gchq.gaffer.data.element.Element) ArrayList(java.util.ArrayList) GetJavaRDDOfAllElements(uk.gov.gchq.gaffer.spark.operation.javardd.GetJavaRDDOfAllElements) ByteArrayOutputStream(org.apache.commons.io.output.ByteArrayOutputStream) Graph(uk.gov.gchq.gaffer.graph.Graph) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) ImportJavaRDDOfElements(uk.gov.gchq.gaffer.spark.operation.javardd.ImportJavaRDDOfElements) Edge(uk.gov.gchq.gaffer.data.element.Edge) SparkConf(org.apache.spark.SparkConf) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 52 with ByteArrayOutputStream

use of org.apache.commons.io.output.ByteArrayOutputStream in project Gaffer by gchq.

the class ImportKeyValueJavaPairRDDToAccumuloHandlerTest method checkImportKeyValueJavaPairRDD.

@Test
public void checkImportKeyValueJavaPairRDD() throws OperationException, IOException, InterruptedException {
    final Graph graph1 = new Graph.Builder().addSchema(getClass().getResourceAsStream("/schema/dataSchema.json")).addSchema(getClass().getResourceAsStream("/schema/dataTypes.json")).addSchema(getClass().getResourceAsStream("/schema/storeSchema.json")).addSchema(getClass().getResourceAsStream("/schema/storeTypes.json")).storeProperties(getClass().getResourceAsStream("/store.properties")).build();
    final List<Element> elements = new ArrayList<>();
    for (int i = 0; i < 10; i++) {
        final Entity entity = new Entity(TestGroups.ENTITY);
        entity.setVertex("" + i);
        final Edge edge1 = new Edge(TestGroups.EDGE);
        edge1.setSource("" + i);
        edge1.setDestination("B");
        edge1.setDirected(false);
        edge1.putProperty(TestPropertyNames.COUNT, 2);
        final Edge edge2 = new Edge(TestGroups.EDGE);
        edge2.setSource("" + i);
        edge2.setDestination("C");
        edge2.setDirected(false);
        edge2.putProperty(TestPropertyNames.COUNT, 4);
        elements.add(edge1);
        elements.add(edge2);
        elements.add(entity);
    }
    final User user = new User();
    final SparkConf sparkConf = new SparkConf().setMaster("local").setAppName("testCheckGetCorrectElementsInJavaRDDForEntitySeed").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer").set("spark.kryo.registrator", "uk.gov.gchq.gaffer.spark.serialisation.kryo.Registrator").set("spark.driver.allowMultipleContexts", "true");
    final JavaSparkContext sparkContext = new JavaSparkContext(sparkConf);
    // Create Hadoop configuration and serialise to a string
    final Configuration configuration = new Configuration();
    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    configuration.write(new DataOutputStream(baos));
    final String configurationString = new String(baos.toByteArray(), CommonConstants.UTF_8);
    final String outputPath = this.getClass().getResource("/").getPath().toString() + "load";
    final String failurePath = this.getClass().getResource("/").getPath().toString() + "failure";
    final File file = new File(outputPath);
    if (file.exists()) {
        FileUtils.forceDelete(file);
    }
    final ElementConverterFunction func = new ElementConverterFunction(sparkContext.broadcast(new ByteEntityAccumuloElementConverter(graph1.getSchema())));
    final JavaPairRDD<Key, Value> elementJavaRDD = sparkContext.parallelize(elements).flatMapToPair(func);
    final ImportKeyValueJavaPairRDDToAccumulo addRdd = new ImportKeyValueJavaPairRDDToAccumulo.Builder().input(elementJavaRDD).outputPath(outputPath).failurePath(failurePath).build();
    graph1.execute(addRdd, user);
    FileUtils.forceDelete(file);
    // Check all elements were added
    final GetJavaRDDOfAllElements rddQuery = new GetJavaRDDOfAllElements.Builder().javaSparkContext(sparkContext).option(AbstractGetRDDHandler.HADOOP_CONFIGURATION_KEY, configurationString).build();
    final JavaRDD<Element> rdd = graph1.execute(rddQuery, user);
    if (rdd == null) {
        fail("No RDD returned");
    }
    final Set<Element> results = new HashSet<>(rdd.collect());
    assertEquals(elements.size(), results.size());
    sparkContext.stop();
}
Also used : Entity(uk.gov.gchq.gaffer.data.element.Entity) User(uk.gov.gchq.gaffer.user.User) Configuration(org.apache.hadoop.conf.Configuration) DataOutputStream(java.io.DataOutputStream) Element(uk.gov.gchq.gaffer.data.element.Element) ArrayList(java.util.ArrayList) GetJavaRDDOfAllElements(uk.gov.gchq.gaffer.spark.operation.javardd.GetJavaRDDOfAllElements) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) HashSet(java.util.HashSet) ByteArrayOutputStream(org.apache.commons.io.output.ByteArrayOutputStream) ImportKeyValueJavaPairRDDToAccumulo(uk.gov.gchq.gaffer.sparkaccumulo.operation.javardd.ImportKeyValueJavaPairRDDToAccumulo) Graph(uk.gov.gchq.gaffer.graph.Graph) Value(org.apache.accumulo.core.data.Value) ByteEntityAccumuloElementConverter(uk.gov.gchq.gaffer.accumulostore.key.core.impl.byteEntity.ByteEntityAccumuloElementConverter) ElementConverterFunction(uk.gov.gchq.gaffer.sparkaccumulo.operation.utils.java.ElementConverterFunction) Edge(uk.gov.gchq.gaffer.data.element.Edge) SparkConf(org.apache.spark.SparkConf) File(java.io.File) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 53 with ByteArrayOutputStream

use of org.apache.commons.io.output.ByteArrayOutputStream in project Gaffer by gchq.

the class GetJavaRDDOfElementsHandlerTest method checkGetCorrectElementsInJavaRDDForEntitySeed.

@Test
public void checkGetCorrectElementsInJavaRDDForEntitySeed() throws OperationException, IOException {
    final Graph graph1 = new Graph.Builder().addSchema(getClass().getResourceAsStream("/schema/dataSchema.json")).addSchema(getClass().getResourceAsStream("/schema/dataTypes.json")).addSchema(getClass().getResourceAsStream("/schema/storeTypes.json")).storeProperties(getClass().getResourceAsStream("/store.properties")).build();
    final List<Element> elements = new ArrayList<>();
    for (int i = 0; i < 10; i++) {
        final Entity entity = new Entity(ENTITY_GROUP);
        entity.setVertex("" + i);
        final Edge edge1 = new Edge(EDGE_GROUP);
        edge1.setSource("" + i);
        edge1.setDestination("B");
        edge1.setDirected(false);
        edge1.putProperty("count", 2);
        final Edge edge2 = new Edge(EDGE_GROUP);
        edge2.setSource("" + i);
        edge2.setDestination("C");
        edge2.setDirected(false);
        edge2.putProperty("count", 4);
        elements.add(edge1);
        elements.add(edge2);
        elements.add(entity);
    }
    final User user = new User();
    graph1.execute(new AddElements(elements), user);
    final SparkConf sparkConf = new SparkConf().setMaster("local").setAppName("testCheckGetCorrectElementsInJavaRDDForEntitySeed").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer").set("spark.kryo.registrator", "uk.gov.gchq.gaffer.spark.serialisation.kryo.Registrator").set("spark.driver.allowMultipleContexts", "true");
    final JavaSparkContext sparkContext = new JavaSparkContext(sparkConf);
    // Create Hadoop configuration and serialise to a string
    final Configuration configuration = new Configuration();
    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    configuration.write(new DataOutputStream(baos));
    final String configurationString = new String(baos.toByteArray(), CommonConstants.UTF_8);
    // Check get correct edges for "1"
    GetJavaRDDOfElements<EntitySeed> rddQuery = new GetJavaRDDOfElements.Builder<EntitySeed>().javaSparkContext(sparkContext).seeds(Collections.singleton(new EntitySeed("1"))).build();
    rddQuery.addOption(AbstractGetRDDHandler.HADOOP_CONFIGURATION_KEY, configurationString);
    JavaRDD<Element> rdd = graph1.execute(rddQuery, user);
    if (rdd == null) {
        fail("No RDD returned");
    }
    final Set<Element> results = new HashSet<>(rdd.collect());
    final Set<Element> expectedElements = new HashSet<>();
    final Entity entity1 = new Entity(ENTITY_GROUP);
    entity1.setVertex("1");
    final Edge edge1B = new Edge(EDGE_GROUP);
    edge1B.setSource("1");
    edge1B.setDestination("B");
    edge1B.setDirected(false);
    edge1B.putProperty("count", 2);
    final Edge edge1C = new Edge(EDGE_GROUP);
    edge1C.setSource("1");
    edge1C.setDestination("C");
    edge1C.setDirected(false);
    edge1C.putProperty("count", 4);
    expectedElements.add(entity1);
    expectedElements.add(edge1B);
    expectedElements.add(edge1C);
    assertEquals(expectedElements, results);
    // Check get correct edges for "1" when specify entities only
    rddQuery = new GetJavaRDDOfElements.Builder<EntitySeed>().javaSparkContext(sparkContext).seeds(Collections.singleton(new EntitySeed("1"))).view(new View.Builder().entity(ENTITY_GROUP).build()).build();
    rddQuery.addOption(AbstractGetRDDHandler.HADOOP_CONFIGURATION_KEY, configurationString);
    rdd = graph1.execute(rddQuery, user);
    if (rdd == null) {
        fail("No RDD returned");
    }
    results.clear();
    results.addAll(rdd.collect());
    expectedElements.clear();
    expectedElements.add(entity1);
    assertEquals(expectedElements, results);
    // Check get correct edges for "1" when specify edges only
    rddQuery = new GetJavaRDDOfElements.Builder<EntitySeed>().javaSparkContext(sparkContext).seeds(Collections.singleton(new EntitySeed("1"))).view(new View.Builder().edge(EDGE_GROUP).build()).build();
    rddQuery.addOption(AbstractGetRDDHandler.HADOOP_CONFIGURATION_KEY, configurationString);
    rdd = graph1.execute(rddQuery, user);
    if (rdd == null) {
        fail("No RDD returned");
    }
    results.clear();
    results.addAll(rdd.collect());
    expectedElements.clear();
    expectedElements.add(edge1B);
    expectedElements.add(edge1C);
    assertEquals(expectedElements, results);
    // Check get correct edges for "1" and "5"
    Set<EntitySeed> seeds = new HashSet<>();
    seeds.add(new EntitySeed("1"));
    seeds.add(new EntitySeed("5"));
    rddQuery = new GetJavaRDDOfElements.Builder<EntitySeed>().javaSparkContext(sparkContext).seeds(seeds).build();
    rddQuery.addOption(AbstractGetRDDHandler.HADOOP_CONFIGURATION_KEY, configurationString);
    rdd = graph1.execute(rddQuery, user);
    if (rdd == null) {
        fail("No RDD returned");
    }
    results.clear();
    results.addAll(rdd.collect());
    final Entity entity5 = new Entity(ENTITY_GROUP);
    entity5.setVertex("5");
    final Edge edge5B = new Edge(EDGE_GROUP);
    edge5B.setSource("5");
    edge5B.setDestination("B");
    edge5B.setDirected(false);
    edge5B.putProperty("count", 2);
    final Edge edge5C = new Edge(EDGE_GROUP);
    edge5C.setSource("5");
    edge5C.setDestination("C");
    edge5C.setDirected(false);
    edge5C.putProperty("count", 4);
    expectedElements.clear();
    expectedElements.add(entity1);
    expectedElements.add(edge1B);
    expectedElements.add(edge1C);
    expectedElements.add(entity5);
    expectedElements.add(edge5B);
    expectedElements.add(edge5C);
    assertEquals(expectedElements, results);
    sparkContext.stop();
}
Also used : AddElements(uk.gov.gchq.gaffer.operation.impl.add.AddElements) Entity(uk.gov.gchq.gaffer.data.element.Entity) User(uk.gov.gchq.gaffer.user.User) Configuration(org.apache.hadoop.conf.Configuration) DataOutputStream(java.io.DataOutputStream) Element(uk.gov.gchq.gaffer.data.element.Element) ArrayList(java.util.ArrayList) GetJavaRDDOfElements(uk.gov.gchq.gaffer.spark.operation.javardd.GetJavaRDDOfElements) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) HashSet(java.util.HashSet) ByteArrayOutputStream(org.apache.commons.io.output.ByteArrayOutputStream) View(uk.gov.gchq.gaffer.data.elementdefinition.view.View) Graph(uk.gov.gchq.gaffer.graph.Graph) EntitySeed(uk.gov.gchq.gaffer.operation.data.EntitySeed) Edge(uk.gov.gchq.gaffer.data.element.Edge) SparkConf(org.apache.spark.SparkConf) Test(org.junit.Test)

Example 54 with ByteArrayOutputStream

use of org.apache.commons.io.output.ByteArrayOutputStream in project hudson-2.x by hudson.

the class ZFSInstaller method doStart.

/**
     * Called from the confirmation screen to actually initiate the migration.
     */
public void doStart(StaplerRequest req, StaplerResponse rsp, @QueryParameter String username, @QueryParameter String password) throws ServletException, IOException {
    requirePOST();
    Hudson hudson = Hudson.getInstance();
    hudson.checkPermission(Hudson.ADMINISTER);
    final String datasetName;
    ByteArrayOutputStream log = new ByteArrayOutputStream();
    StreamTaskListener listener = new StreamTaskListener(log);
    try {
        datasetName = createZfsFileSystem(listener, username, password);
    } catch (Exception e) {
        e.printStackTrace(listener.error(e.getMessage()));
        if (e instanceof ZFSException) {
            ZFSException ze = (ZFSException) e;
            if (ze.getCode() == ErrorCode.EZFS_PERM) {
                // permission problem. ask the user to give us the root password
                req.setAttribute("message", log.toString());
                rsp.forward(this, "askRootPassword", req);
                return;
            }
        }
        // for other kinds of problems, report and bail out
        req.setAttribute("pre", true);
        sendError(log.toString(), req, rsp);
        return;
    }
    // file system creation successful, so restart
    WebAppController.get().install(new HudsonIsRestarting());
    // redirect the user to the manage page
    rsp.sendRedirect2(req.getContextPath() + "/manage");
    // asynchronously restart, so that we can give a bit of time to the browser to load "restarting..." screen.
    new Thread("restart thread") {

        @Override
        public void run() {
            try {
                Thread.sleep(5000);
                // close all descriptors on exec except stdin,out,err
                int sz = LIBC.getdtablesize();
                for (int i = 3; i < sz; i++) {
                    int flags = LIBC.fcntl(i, F_GETFD);
                    if (flags < 0)
                        continue;
                    LIBC.fcntl(i, F_SETFD, flags | FD_CLOEXEC);
                }
                // re-exec with the system property to indicate where to migrate the data to.
                // the 2nd phase is implemented in the migrate method.
                JavaVMArguments args = JavaVMArguments.current();
                args.setSystemProperty(ZFSInstaller.class.getName() + ".migrate", datasetName);
                Daemon.selfExec(args);
            } catch (InterruptedException e) {
                LOGGER.log(Level.SEVERE, "Restart failed", e);
            } catch (IOException e) {
                LOGGER.log(Level.SEVERE, "Restart failed", e);
            }
        }
    }.start();
}
Also used : StreamTaskListener(hudson.util.StreamTaskListener) ZFSException(org.jvnet.solaris.libzfs.ZFSException) Hudson(hudson.model.Hudson) JavaVMArguments(com.sun.akuma.JavaVMArguments) ByteArrayOutputStream(org.apache.commons.io.output.ByteArrayOutputStream) IOException(java.io.IOException) HudsonIsRestarting(hudson.util.HudsonIsRestarting) ServletException(javax.servlet.ServletException) ZFSException(org.jvnet.solaris.libzfs.ZFSException) IOException(java.io.IOException)

Example 55 with ByteArrayOutputStream

use of org.apache.commons.io.output.ByteArrayOutputStream in project hive by apache.

the class TestLazyAccumuloMap method toBytes.

protected byte[] toBytes(int i) throws IOException {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream out = new DataOutputStream(baos);
    out.writeInt(i);
    out.close();
    return baos.toByteArray();
}
Also used : DataOutputStream(java.io.DataOutputStream) ByteArrayOutputStream(org.apache.commons.io.output.ByteArrayOutputStream)

Aggregations

ByteArrayOutputStream (org.apache.commons.io.output.ByteArrayOutputStream)92 Test (org.junit.Test)36 DataOutputStream (java.io.DataOutputStream)15 IOException (java.io.IOException)14 HashSet (java.util.HashSet)13 ByteArrayInputStream (java.io.ByteArrayInputStream)12 ArrayList (java.util.ArrayList)12 Configuration (org.apache.hadoop.conf.Configuration)12 PrintStream (java.io.PrintStream)10 SparkConf (org.apache.spark.SparkConf)10 Edge (uk.gov.gchq.gaffer.data.element.Edge)10 Element (uk.gov.gchq.gaffer.data.element.Element)10 Entity (uk.gov.gchq.gaffer.data.element.Entity)10 Graph (uk.gov.gchq.gaffer.graph.Graph)10 User (uk.gov.gchq.gaffer.user.User)10 File (java.io.File)9 HashMap (java.util.HashMap)8 InputStream (java.io.InputStream)7 OutputStream (java.io.OutputStream)6 JavaSparkContext (org.apache.spark.api.java.JavaSparkContext)6