Search in sources :

Example 1 with ByteArrayOutputStream

use of org.apache.commons.io.output.ByteArrayOutputStream in project flink by apache.

the class StateBackendTestBase method testValueStateNullUpdate.

/**
	 * This test verifies that passing {@code null} to {@link ValueState#update(Object)} acts
	 * the same as {@link ValueState#clear()}.
	 *
	 * @throws Exception
	 */
@Test
@SuppressWarnings("unchecked")
public void testValueStateNullUpdate() throws Exception {
    // later if null values where actually stored in the state instead of acting as clear()
    try {
        LongSerializer.INSTANCE.serialize(null, new DataOutputViewStreamWrapper(new ByteArrayOutputStream()));
        fail("Should fail with NullPointerException");
    } catch (NullPointerException e) {
    // alrighty
    }
    CheckpointStreamFactory streamFactory = createStreamFactory();
    AbstractKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE);
    ValueStateDescriptor<Long> kvId = new ValueStateDescriptor<>("id", LongSerializer.INSTANCE, 42L);
    kvId.initializeSerializerUnlessSet(new ExecutionConfig());
    ValueState<Long> state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId);
    // some modifications to the state
    backend.setCurrentKey(1);
    // verify default value
    assertEquals(42L, (long) state.value());
    state.update(1L);
    assertEquals(1L, (long) state.value());
    backend.setCurrentKey(2);
    assertEquals(42L, (long) state.value());
    backend.setCurrentKey(1);
    state.clear();
    assertEquals(42L, (long) state.value());
    state.update(17L);
    assertEquals(17L, (long) state.value());
    state.update(null);
    assertEquals(42L, (long) state.value());
    // draw a snapshot
    KeyGroupsStateHandle snapshot1 = FutureUtil.runIfNotDoneAndGet(backend.snapshot(682375462378L, 2, streamFactory, CheckpointOptions.forFullCheckpoint()));
    backend.dispose();
    backend = restoreKeyedBackend(IntSerializer.INSTANCE, snapshot1);
    snapshot1.discardState();
    backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId);
    backend.dispose();
}
Also used : ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) DataOutputViewStreamWrapper(org.apache.flink.core.memory.DataOutputViewStreamWrapper) BlockerCheckpointStreamFactory(org.apache.flink.runtime.util.BlockerCheckpointStreamFactory) ByteArrayOutputStream(org.apache.commons.io.output.ByteArrayOutputStream) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Test(org.junit.Test)

Example 2 with ByteArrayOutputStream

use of org.apache.commons.io.output.ByteArrayOutputStream in project hadoop by apache.

the class TestDecommissioningStatus method checkDFSAdminDecommissionStatus.

private void checkDFSAdminDecommissionStatus(List<DatanodeDescriptor> expectedDecomm, DistributedFileSystem dfs, DFSAdmin admin) throws IOException {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    PrintStream ps = new PrintStream(baos);
    PrintStream oldOut = System.out;
    System.setOut(ps);
    try {
        // Parse DFSAdmin just to check the count
        admin.report(new String[] { "-decommissioning" }, 0);
        String[] lines = baos.toString().split("\n");
        Integer num = null;
        int count = 0;
        for (String line : lines) {
            if (line.startsWith("Decommissioning datanodes")) {
                // Pull out the "(num)" and parse it into an int
                String temp = line.split(" ")[2];
                num = Integer.parseInt((String) temp.subSequence(1, temp.length() - 2));
            }
            if (line.contains("Decommission in progress")) {
                count++;
            }
        }
        assertTrue("No decommissioning output", num != null);
        assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(), num.intValue());
        assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(), count);
        // Check Java API for correct contents
        List<DatanodeInfo> decomming = new ArrayList<DatanodeInfo>(Arrays.asList(dfs.getDataNodeStats(DatanodeReportType.DECOMMISSIONING)));
        assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(), decomming.size());
        for (DatanodeID id : expectedDecomm) {
            assertTrue("Did not find expected decomming DN " + id, decomming.contains(id));
        }
    } finally {
        System.setOut(oldOut);
    }
}
Also used : PrintStream(java.io.PrintStream) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) ArrayList(java.util.ArrayList) ByteArrayOutputStream(org.apache.commons.io.output.ByteArrayOutputStream)

Example 3 with ByteArrayOutputStream

use of org.apache.commons.io.output.ByteArrayOutputStream in project hadoop by apache.

the class TestHsJobBlock method testHsJobBlockForOversizeJobShouldDisplayWarningMessage.

@Test
public void testHsJobBlockForOversizeJobShouldDisplayWarningMessage() {
    int maxAllowedTaskNum = 100;
    Configuration config = new Configuration();
    config.setInt(JHAdminConfig.MR_HS_LOADED_JOBS_TASKS_MAX, maxAllowedTaskNum);
    JobHistory jobHistory = new JobHistoryStubWithAllOversizeJobs(maxAllowedTaskNum);
    jobHistory.init(config);
    HsJobBlock jobBlock = new HsJobBlock(jobHistory) {

        // override this so that job block can fetch a job id.
        @Override
        public Map<String, String> moreParams() {
            Map<String, String> map = new HashMap<>();
            map.put(AMParams.JOB_ID, "job_0000_0001");
            return map;
        }
    };
    // set up the test block to render HsJobBLock to
    OutputStream outputStream = new ByteArrayOutputStream();
    HtmlBlock.Block block = createBlockToCreateTo(outputStream);
    jobBlock.render(block);
    block.getWriter().flush();
    String out = outputStream.toString();
    Assert.assertTrue("Should display warning message for jobs that have too " + "many tasks", out.contains("Any job larger than " + maxAllowedTaskNum + " will not be loaded"));
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) JobHistory(org.apache.hadoop.mapreduce.v2.hs.JobHistory) HashMap(java.util.HashMap) OutputStream(java.io.OutputStream) ByteArrayOutputStream(org.apache.commons.io.output.ByteArrayOutputStream) ByteArrayOutputStream(org.apache.commons.io.output.ByteArrayOutputStream) HtmlBlock(org.apache.hadoop.yarn.webapp.view.HtmlBlock) BlockForTest(org.apache.hadoop.yarn.webapp.view.BlockForTest) HtmlBlockForTest(org.apache.hadoop.yarn.webapp.view.HtmlBlockForTest) Test(org.junit.Test)

Example 4 with ByteArrayOutputStream

use of org.apache.commons.io.output.ByteArrayOutputStream in project hive by apache.

the class TestExplainTask method explainToString.

private <K, V> String explainToString(Map<K, V> explainMap) throws Exception {
    ExplainWork work = new ExplainWork();
    ParseContext pCtx = new ParseContext();
    HashMap<String, TableScanOperator> topOps = new HashMap<>();
    TableScanOperator scanOp = new DummyOperator(new DummyExplainDesc<K, V>(explainMap));
    topOps.put("sample", scanOp);
    pCtx.setTopOps(topOps);
    work.setParseContext(pCtx);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    work.setConfig(new ExplainConfiguration());
    new ExplainTask().getJSONLogicalPlan(new PrintStream(baos), work);
    baos.close();
    return baos.toString();
}
Also used : PrintStream(java.io.PrintStream) ExplainConfiguration(org.apache.hadoop.hive.ql.parse.ExplainConfiguration) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ExplainWork(org.apache.hadoop.hive.ql.plan.ExplainWork) ByteArrayOutputStream(org.apache.commons.io.output.ByteArrayOutputStream) ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext)

Example 5 with ByteArrayOutputStream

use of org.apache.commons.io.output.ByteArrayOutputStream in project hive by apache.

the class KryoSerializer method serialize.

public static byte[] serialize(Object object) {
    ByteArrayOutputStream stream = new ByteArrayOutputStream();
    Output output = new Output(stream);
    Kryo kryo = SerializationUtilities.borrowKryo();
    kryo.setClassLoader(Thread.currentThread().getContextClassLoader());
    try {
        kryo.writeObject(output, object);
    } finally {
        SerializationUtilities.releaseKryo(kryo);
    }
    // close() also calls flush()
    output.close();
    return stream.toByteArray();
}
Also used : Output(com.esotericsoftware.kryo.io.Output) ByteArrayOutputStream(org.apache.commons.io.output.ByteArrayOutputStream) Kryo(com.esotericsoftware.kryo.Kryo)

Aggregations

ByteArrayOutputStream (org.apache.commons.io.output.ByteArrayOutputStream)80 Test (org.junit.Test)36 DataOutputStream (java.io.DataOutputStream)15 IOException (java.io.IOException)14 HashSet (java.util.HashSet)13 ArrayList (java.util.ArrayList)12 Configuration (org.apache.hadoop.conf.Configuration)12 ByteArrayInputStream (java.io.ByteArrayInputStream)11 SparkConf (org.apache.spark.SparkConf)10 Edge (uk.gov.gchq.gaffer.data.element.Edge)10 Element (uk.gov.gchq.gaffer.data.element.Element)10 Entity (uk.gov.gchq.gaffer.data.element.Entity)10 Graph (uk.gov.gchq.gaffer.graph.Graph)10 User (uk.gov.gchq.gaffer.user.User)10 PrintStream (java.io.PrintStream)9 HashMap (java.util.HashMap)9 File (java.io.File)8 InputStream (java.io.InputStream)6 OutputStream (java.io.OutputStream)6 JavaSparkContext (org.apache.spark.api.java.JavaSparkContext)6