Search in sources :

Example 26 with Random

use of java.util.Random in project flink by apache.

the class RecordWriterTest method testBroadcastEventMixedRecords.

/**
	 * Tests broadcasting events when records have been emitted. The emitted
	 * records cover all three {@link SerializationResult} types.
	 */
@Test
public void testBroadcastEventMixedRecords() throws Exception {
    Random rand = new XORShiftRandom();
    int numChannels = 4;
    int bufferSize = 32;
    // serialized length
    int lenBytes = 4;
    @SuppressWarnings("unchecked") Queue<BufferOrEvent>[] queues = new Queue[numChannels];
    for (int i = 0; i < numChannels; i++) {
        queues[i] = new ArrayDeque<>();
    }
    BufferProvider bufferProvider = createBufferProvider(bufferSize);
    ResultPartitionWriter partitionWriter = createCollectingPartitionWriter(queues, bufferProvider);
    RecordWriter<ByteArrayIO> writer = new RecordWriter<>(partitionWriter, new RoundRobin<ByteArrayIO>());
    CheckpointBarrier barrier = new CheckpointBarrier(Integer.MAX_VALUE + 1292L, Integer.MAX_VALUE + 199L, CheckpointOptions.forFullCheckpoint());
    // Emit records on some channels first (requesting buffers), then
    // broadcast the event. The record buffers should be emitted first, then
    // the event. After the event, no new buffer should be requested.
    // (i) Smaller than the buffer size (single buffer request => 1)
    byte[] bytes = new byte[bufferSize / 2];
    rand.nextBytes(bytes);
    writer.emit(new ByteArrayIO(bytes));
    // (ii) Larger than the buffer size (two buffer requests => 1 + 2)
    bytes = new byte[bufferSize + 1];
    rand.nextBytes(bytes);
    writer.emit(new ByteArrayIO(bytes));
    // (iii) Exactly the buffer size (single buffer request => 1 + 2 + 1)
    bytes = new byte[bufferSize - lenBytes];
    rand.nextBytes(bytes);
    writer.emit(new ByteArrayIO(bytes));
    // (iv) Nothing on the 4th channel (no buffer request => 1 + 2 + 1 + 0 = 4)
    // (v) Broadcast the event
    writer.broadcastEvent(barrier);
    verify(bufferProvider, times(4)).requestBufferBlocking();
    // 1 buffer + 1 event
    assertEquals(2, queues[0].size());
    // 2 buffers + 1 event
    assertEquals(3, queues[1].size());
    // 1 buffer + 1 event
    assertEquals(2, queues[2].size());
    // 0 buffers + 1 event
    assertEquals(1, queues[3].size());
}
Also used : XORShiftRandom(org.apache.flink.util.XORShiftRandom) CheckpointBarrier(org.apache.flink.runtime.io.network.api.CheckpointBarrier) Random(java.util.Random) XORShiftRandom(org.apache.flink.util.XORShiftRandom) TestInfiniteBufferProvider(org.apache.flink.runtime.io.network.util.TestInfiniteBufferProvider) BufferProvider(org.apache.flink.runtime.io.network.buffer.BufferProvider) Queue(java.util.Queue) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 27 with Random

use of java.util.Random in project flink by apache.

the class StateTableSnapshotCompatibilityTest method checkCompatibleSerializationFormats.

/**
	 * This test ensures that different implementations of {@link StateTable} are compatible in their serialization
	 * format.
	 */
@Test
public void checkCompatibleSerializationFormats() throws IOException {
    final Random r = new Random(42);
    RegisteredBackendStateMetaInfo<Integer, ArrayList<Integer>> metaInfo = new RegisteredBackendStateMetaInfo<>(StateDescriptor.Type.UNKNOWN, "test", IntSerializer.INSTANCE, new ArrayListSerializer<>(IntSerializer.INSTANCE));
    final CopyOnWriteStateTableTest.MockInternalKeyContext<Integer> keyContext = new CopyOnWriteStateTableTest.MockInternalKeyContext<>(IntSerializer.INSTANCE);
    CopyOnWriteStateTable<Integer, Integer, ArrayList<Integer>> cowStateTable = new CopyOnWriteStateTable<>(keyContext, metaInfo);
    for (int i = 0; i < 100; ++i) {
        ArrayList<Integer> list = new ArrayList<>(5);
        int end = r.nextInt(5);
        for (int j = 0; j < end; ++j) {
            list.add(r.nextInt(100));
        }
        cowStateTable.put(r.nextInt(10), r.nextInt(2), list);
    }
    StateTableSnapshot snapshot = cowStateTable.createSnapshot();
    final NestedMapsStateTable<Integer, Integer, ArrayList<Integer>> nestedMapsStateTable = new NestedMapsStateTable<>(keyContext, metaInfo);
    restoreStateTableFromSnapshot(nestedMapsStateTable, snapshot, keyContext.getKeyGroupRange());
    snapshot.release();
    Assert.assertEquals(cowStateTable.size(), nestedMapsStateTable.size());
    for (StateEntry<Integer, Integer, ArrayList<Integer>> entry : cowStateTable) {
        Assert.assertEquals(entry.getState(), nestedMapsStateTable.get(entry.getKey(), entry.getNamespace()));
    }
    snapshot = nestedMapsStateTable.createSnapshot();
    cowStateTable = new CopyOnWriteStateTable<>(keyContext, metaInfo);
    restoreStateTableFromSnapshot(cowStateTable, snapshot, keyContext.getKeyGroupRange());
    snapshot.release();
    Assert.assertEquals(nestedMapsStateTable.size(), cowStateTable.size());
    for (StateEntry<Integer, Integer, ArrayList<Integer>> entry : cowStateTable) {
        Assert.assertEquals(nestedMapsStateTable.get(entry.getKey(), entry.getNamespace()), entry.getState());
    }
}
Also used : RegisteredBackendStateMetaInfo(org.apache.flink.runtime.state.RegisteredBackendStateMetaInfo) ArrayList(java.util.ArrayList) Random(java.util.Random) Test(org.junit.Test)

Example 28 with Random

use of java.util.Random in project flink by apache.

the class CopyOnWriteStateTableTest method testRandomModificationsAndCopyOnWriteIsolation.

/**
	 * This test does some random modifications to a state table and a reference (hash map). Then draws snapshots,
	 * performs more modifications and checks snapshot integrity.
	 */
@Test
public void testRandomModificationsAndCopyOnWriteIsolation() throws Exception {
    final RegisteredBackendStateMetaInfo<Integer, ArrayList<Integer>> metaInfo = new RegisteredBackendStateMetaInfo<>(StateDescriptor.Type.UNKNOWN, "test", IntSerializer.INSTANCE, // we use mutable state objects.
    new ArrayListSerializer<>(IntSerializer.INSTANCE));
    final MockInternalKeyContext<Integer> keyContext = new MockInternalKeyContext<>(IntSerializer.INSTANCE);
    final CopyOnWriteStateTable<Integer, Integer, ArrayList<Integer>> stateTable = new CopyOnWriteStateTable<>(keyContext, metaInfo);
    final HashMap<Tuple2<Integer, Integer>, ArrayList<Integer>> referenceMap = new HashMap<>();
    final Random random = new Random(42);
    // holds snapshots from the map under test
    CopyOnWriteStateTable.StateTableEntry<Integer, Integer, ArrayList<Integer>>[] snapshot = null;
    int snapshotSize = 0;
    // holds a reference snapshot from our reference map that we compare against
    Tuple3<Integer, Integer, ArrayList<Integer>>[] reference = null;
    int val = 0;
    int snapshotCounter = 0;
    int referencedSnapshotId = 0;
    final StateTransformationFunction<ArrayList<Integer>, Integer> transformationFunction = new StateTransformationFunction<ArrayList<Integer>, Integer>() {

        @Override
        public ArrayList<Integer> apply(ArrayList<Integer> previousState, Integer value) throws Exception {
            if (previousState == null) {
                previousState = new ArrayList<>();
            }
            previousState.add(value);
            // we give back the original, attempting to spot errors in to copy-on-write
            return previousState;
        }
    };
    // the main loop for modifications
    for (int i = 0; i < 10_000_000; ++i) {
        int key = random.nextInt(20);
        int namespace = random.nextInt(4);
        Tuple2<Integer, Integer> compositeKey = new Tuple2<>(key, namespace);
        int op = random.nextInt(7);
        ArrayList<Integer> state = null;
        ArrayList<Integer> referenceState = null;
        switch(op) {
            case 0:
            case 1:
                {
                    state = stateTable.get(key, namespace);
                    referenceState = referenceMap.get(compositeKey);
                    if (null == state) {
                        state = new ArrayList<>();
                        stateTable.put(key, namespace, state);
                        referenceState = new ArrayList<>();
                        referenceMap.put(compositeKey, referenceState);
                    }
                    break;
                }
            case 2:
                {
                    stateTable.put(key, namespace, new ArrayList<Integer>());
                    referenceMap.put(compositeKey, new ArrayList<Integer>());
                    break;
                }
            case 3:
                {
                    state = stateTable.putAndGetOld(key, namespace, new ArrayList<Integer>());
                    referenceState = referenceMap.put(compositeKey, new ArrayList<Integer>());
                    break;
                }
            case 4:
                {
                    stateTable.remove(key, namespace);
                    referenceMap.remove(compositeKey);
                    break;
                }
            case 5:
                {
                    state = stateTable.removeAndGetOld(key, namespace);
                    referenceState = referenceMap.remove(compositeKey);
                    break;
                }
            case 6:
                {
                    final int updateValue = random.nextInt(1000);
                    stateTable.transform(key, namespace, updateValue, transformationFunction);
                    referenceMap.put(compositeKey, transformationFunction.apply(referenceMap.remove(compositeKey), updateValue));
                    break;
                }
            default:
                {
                    Assert.fail("Unknown op-code " + op);
                }
        }
        Assert.assertEquals(referenceMap.size(), stateTable.size());
        if (state != null) {
            // mutate the states a bit...
            if (random.nextBoolean() && !state.isEmpty()) {
                state.remove(state.size() - 1);
                referenceState.remove(referenceState.size() - 1);
            } else {
                state.add(val);
                referenceState.add(val);
                ++val;
            }
        }
        Assert.assertEquals(referenceState, state);
        // snapshot triggering / comparison / release
        if (i > 0 && i % 500 == 0) {
            if (snapshot != null) {
                // check our referenced snapshot
                deepCheck(reference, convert(snapshot, snapshotSize));
                if (i % 1_000 == 0) {
                    // draw and release some other snapshot while holding on the old snapshot
                    ++snapshotCounter;
                    stateTable.snapshotTableArrays();
                    stateTable.releaseSnapshot(snapshotCounter);
                }
                //release the snapshot after some time
                if (i % 5_000 == 0) {
                    snapshot = null;
                    reference = null;
                    snapshotSize = 0;
                    stateTable.releaseSnapshot(referencedSnapshotId);
                }
            } else {
                // if there is no more referenced snapshot, we create one
                ++snapshotCounter;
                referencedSnapshotId = snapshotCounter;
                snapshot = stateTable.snapshotTableArrays();
                snapshotSize = stateTable.size();
                reference = manualDeepDump(referenceMap);
            }
        }
    }
}
Also used : StateTransformationFunction(org.apache.flink.runtime.state.StateTransformationFunction) HashMap(java.util.HashMap) RegisteredBackendStateMetaInfo(org.apache.flink.runtime.state.RegisteredBackendStateMetaInfo) ArrayList(java.util.ArrayList) Random(java.util.Random) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Tuple3(org.apache.flink.api.java.tuple.Tuple3) Test(org.junit.Test)

Example 29 with Random

use of java.util.Random in project flink by apache.

the class StateBackendTestBase method testKeyGroupSnapshotRestore.

/**
	 * This test verifies that state is correctly assigned to key groups and that restore
	 * restores the relevant key groups in the backend.
	 *
	 * <p>We have ten key groups. Initially, one backend is responsible for all ten key groups.
	 * Then we snapshot, split up the state and restore in to backends where each is responsible
	 * for five key groups. Then we make sure that the state is only available in the correct
	 * backend.
	 * @throws Exception
	 */
@Test
public void testKeyGroupSnapshotRestore() throws Exception {
    final int MAX_PARALLELISM = 10;
    CheckpointStreamFactory streamFactory = createStreamFactory();
    AbstractKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE, MAX_PARALLELISM, new KeyGroupRange(0, MAX_PARALLELISM - 1), new DummyEnvironment("test", 1, 0));
    ValueStateDescriptor<String> kvId = new ValueStateDescriptor<>("id", String.class);
    kvId.initializeSerializerUnlessSet(new ExecutionConfig());
    ValueState<String> state = backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId);
    // keys that fall into the first half/second half of the key groups, respectively
    int keyInFirstHalf = 17;
    int keyInSecondHalf = 42;
    Random rand = new Random(0);
    // for each key, determine into which half of the key-group space they fall
    int firstKeyHalf = KeyGroupRangeAssignment.assignKeyToParallelOperator(keyInFirstHalf, MAX_PARALLELISM, 2);
    int secondKeyHalf = KeyGroupRangeAssignment.assignKeyToParallelOperator(keyInFirstHalf, MAX_PARALLELISM, 2);
    while (firstKeyHalf == secondKeyHalf) {
        keyInSecondHalf = rand.nextInt();
        secondKeyHalf = KeyGroupRangeAssignment.assignKeyToParallelOperator(keyInSecondHalf, MAX_PARALLELISM, 2);
    }
    backend.setCurrentKey(keyInFirstHalf);
    state.update("ShouldBeInFirstHalf");
    backend.setCurrentKey(keyInSecondHalf);
    state.update("ShouldBeInSecondHalf");
    KeyGroupsStateHandle snapshot = FutureUtil.runIfNotDoneAndGet(backend.snapshot(0, 0, streamFactory, CheckpointOptions.forFullCheckpoint()));
    List<KeyGroupsStateHandle> firstHalfKeyGroupStates = StateAssignmentOperation.getKeyGroupsStateHandles(Collections.singletonList(snapshot), KeyGroupRangeAssignment.computeKeyGroupRangeForOperatorIndex(MAX_PARALLELISM, 2, 0));
    List<KeyGroupsStateHandle> secondHalfKeyGroupStates = StateAssignmentOperation.getKeyGroupsStateHandles(Collections.singletonList(snapshot), KeyGroupRangeAssignment.computeKeyGroupRangeForOperatorIndex(MAX_PARALLELISM, 2, 1));
    backend.dispose();
    // backend for the first half of the key group range
    AbstractKeyedStateBackend<Integer> firstHalfBackend = restoreKeyedBackend(IntSerializer.INSTANCE, MAX_PARALLELISM, new KeyGroupRange(0, 4), firstHalfKeyGroupStates, new DummyEnvironment("test", 1, 0));
    // backend for the second half of the key group range
    AbstractKeyedStateBackend<Integer> secondHalfBackend = restoreKeyedBackend(IntSerializer.INSTANCE, MAX_PARALLELISM, new KeyGroupRange(5, 9), secondHalfKeyGroupStates, new DummyEnvironment("test", 1, 0));
    ValueState<String> firstHalfState = firstHalfBackend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId);
    firstHalfBackend.setCurrentKey(keyInFirstHalf);
    assertTrue(firstHalfState.value().equals("ShouldBeInFirstHalf"));
    firstHalfBackend.setCurrentKey(keyInSecondHalf);
    assertTrue(firstHalfState.value() == null);
    ValueState<String> secondHalfState = secondHalfBackend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, kvId);
    secondHalfBackend.setCurrentKey(keyInFirstHalf);
    assertTrue(secondHalfState.value() == null);
    secondHalfBackend.setCurrentKey(keyInSecondHalf);
    assertTrue(secondHalfState.value().equals("ShouldBeInSecondHalf"));
    firstHalfBackend.dispose();
    secondHalfBackend.dispose();
}
Also used : BlockerCheckpointStreamFactory(org.apache.flink.runtime.util.BlockerCheckpointStreamFactory) DummyEnvironment(org.apache.flink.runtime.operators.testutils.DummyEnvironment) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) ValueStateDescriptor(org.apache.flink.api.common.state.ValueStateDescriptor) Random(java.util.Random) Test(org.junit.Test)

Example 30 with Random

use of java.util.Random in project hadoop by apache.

the class TestLocalFileSystem method testBufferedFSInputStream.

/**
   * Regression test for HADOOP-9307: BufferedFSInputStream returning
   * wrong results after certain sequences of seeks and reads.
   */
@Test
public void testBufferedFSInputStream() throws IOException {
    Configuration conf = new Configuration();
    conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);
    conf.setInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, 4096);
    FileSystem fs = FileSystem.newInstance(conf);
    byte[] buf = new byte[10 * 1024];
    new Random().nextBytes(buf);
    // Write random bytes to file
    FSDataOutputStream stream = fs.create(TEST_PATH);
    try {
        stream.write(buf);
    } finally {
        stream.close();
    }
    Random r = new Random();
    FSDataInputStream stm = fs.open(TEST_PATH);
    // Record the sequence of seeks and reads which trigger a failure.
    int[] seeks = new int[10];
    int[] reads = new int[10];
    try {
        for (int i = 0; i < 1000; i++) {
            int seekOff = r.nextInt(buf.length);
            int toRead = r.nextInt(Math.min(buf.length - seekOff, 32000));
            seeks[i % seeks.length] = seekOff;
            reads[i % reads.length] = toRead;
            verifyRead(stm, buf, seekOff, toRead);
        }
    } catch (AssertionError afe) {
        StringBuilder sb = new StringBuilder();
        sb.append("Sequence of actions:\n");
        for (int j = 0; j < seeks.length; j++) {
            sb.append("seek @ ").append(seeks[j]).append("  ").append("read ").append(reads[j]).append("\n");
        }
        System.err.println(sb.toString());
        throw afe;
    } finally {
        stm.close();
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Random(java.util.Random) Test(org.junit.Test)

Aggregations

Random (java.util.Random)4728 Test (org.junit.Test)1273 ArrayList (java.util.ArrayList)602 IOException (java.io.IOException)313 HashMap (java.util.HashMap)242 File (java.io.File)209 List (java.util.List)154 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)151 ByteArrayInputStream (java.io.ByteArrayInputStream)134 HashSet (java.util.HashSet)129 ByteBuffer (java.nio.ByteBuffer)123 Test (org.testng.annotations.Test)121 Path (org.apache.hadoop.fs.Path)116 Map (java.util.Map)106 QuickTest (com.hazelcast.test.annotation.QuickTest)99 ParallelTest (com.hazelcast.test.annotation.ParallelTest)94 CountDownLatch (java.util.concurrent.CountDownLatch)93 Configuration (org.apache.hadoop.conf.Configuration)88 ByteArrayOutputStream (java.io.ByteArrayOutputStream)79 Before (org.junit.Before)78