use of java.util.Random in project flink by apache.
the class CompactingHashTableTest method testDoubleResize.
@Test
public void testDoubleResize() {
// Only CompactingHashTable
try {
final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
final Random rnd = new Random(RANDOM_SEED);
final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);
List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
table.open();
for (int i = 0; i < NUM_PAIRS; i++) {
table.insert(pairs[i]);
}
AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
IntPair target = new IntPair();
for (int i = 0; i < NUM_PAIRS; i++) {
assertNotNull(prober.getMatchFor(pairs[i], target));
assertEquals(pairs[i].getValue(), target.getValue());
}
// make sure there is enough memory for resize
memory.addAll(getMemory(ADDITIONAL_MEM));
Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
assertTrue(b);
for (int i = 0; i < NUM_PAIRS; i++) {
assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
assertEquals(pairs[i].getValue(), target.getValue());
}
// make sure there is enough memory for resize
memory.addAll(getMemory(ADDITIONAL_MEM));
b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
assertTrue(b);
for (int i = 0; i < NUM_PAIRS; i++) {
assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
assertEquals(pairs[i].getValue(), target.getValue());
}
table.close();
assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM + ADDITIONAL_MEM, table.getFreeMemory().size());
} catch (Exception e) {
e.printStackTrace();
fail("Error: " + e.getMessage());
}
}
use of java.util.Random in project flink by apache.
the class CompactingHashTableTest method testResize.
@Test
public void testResize() {
// Only CompactingHashTable
try {
final int NUM_MEM_PAGES = 30 * NUM_PAIRS / PAGE_SIZE;
final Random rnd = new Random(RANDOM_SEED);
final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);
List<MemorySegment> memory = getMemory(NUM_MEM_PAGES);
CompactingHashTable<IntPair> table = new CompactingHashTable<IntPair>(intPairSerializer, intPairComparator, memory);
table.open();
for (int i = 0; i < NUM_PAIRS; i++) {
table.insert(pairs[i]);
}
AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, new SameTypePairComparator<>(intPairComparator));
IntPair target = new IntPair();
for (int i = 0; i < NUM_PAIRS; i++) {
assertNotNull(prober.getMatchFor(pairs[i], target));
assertEquals(pairs[i].getValue(), target.getValue());
}
// make sure there is enough memory for resize
memory.addAll(getMemory(ADDITIONAL_MEM));
Boolean b = Whitebox.<Boolean>invokeMethod(table, "resizeHashTable");
assertTrue(b);
for (int i = 0; i < NUM_PAIRS; i++) {
assertNotNull(pairs[i].getKey() + " " + pairs[i].getValue(), prober.getMatchFor(pairs[i], target));
assertEquals(pairs[i].getValue(), target.getValue());
}
table.close();
assertEquals("Memory lost", NUM_MEM_PAGES + ADDITIONAL_MEM, table.getFreeMemory().size());
} catch (Exception e) {
e.printStackTrace();
fail("Error: " + e.getMessage());
}
}
use of java.util.Random in project hadoop by apache.
the class MetricsSinkAdapter method publishMetricsFromQueue.
void publishMetricsFromQueue() {
int retryDelay = firstRetryDelay;
int n = retryCount;
// millis
int minDelay = Math.min(500, retryDelay * 1000);
Random rng = new Random(System.nanoTime());
while (!stopping) {
try {
queue.consumeAll(this);
refreshQueueSizeGauge();
retryDelay = firstRetryDelay;
n = retryCount;
inError = false;
} catch (InterruptedException e) {
LOG.info(name + " thread interrupted.");
} catch (Exception e) {
if (n > 0) {
int retryWindow = Math.max(0, 1000 / 2 * retryDelay - minDelay);
int awhile = rng.nextInt(retryWindow) + minDelay;
if (!inError) {
LOG.error("Got sink exception, retry in " + awhile + "ms", e);
}
retryDelay *= retryBackoff;
try {
Thread.sleep(awhile);
} catch (InterruptedException e2) {
LOG.info(name + " thread interrupted while waiting for retry", e2);
}
--n;
} else {
if (!inError) {
LOG.error("Got sink exception and over retry limit, " + "suppressing further error messages", e);
}
queue.clear();
refreshQueueSizeGauge();
// Don't keep complaining ad infinitum
inError = true;
}
}
}
}
use of java.util.Random in project flink by apache.
the class SavepointV1SerializerTest method testSerializeDeserializeV1.
/**
* Test serialization of {@link SavepointV1} instance.
*/
@Test
public void testSerializeDeserializeV1() throws Exception {
Random r = new Random(42);
for (int i = 0; i < 100; ++i) {
SavepointV1 expected = new SavepointV1(i + 123123, SavepointV1Test.createTaskStates(1 + r.nextInt(64), 1 + r.nextInt(64)));
SavepointV1Serializer serializer = SavepointV1Serializer.INSTANCE;
// Serialize
ByteArrayOutputStreamWithPos baos = new ByteArrayOutputStreamWithPos();
serializer.serialize(expected, new DataOutputViewStreamWrapper(baos));
byte[] bytes = baos.toByteArray();
// Deserialize
ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
Savepoint actual = serializer.deserialize(new DataInputViewStreamWrapper(bais), Thread.currentThread().getContextClassLoader());
assertEquals(expected, actual);
}
}
use of java.util.Random in project flink by apache.
the class SavepointV1Test method createTaskStates.
static Collection<TaskState> createTaskStates(int numTaskStates, int numSubtasksPerTask) throws IOException {
Random random = new Random(numTaskStates * 31 + numSubtasksPerTask);
List<TaskState> taskStates = new ArrayList<>(numTaskStates);
for (int stateIdx = 0; stateIdx < numTaskStates; ++stateIdx) {
int chainLength = 1 + random.nextInt(8);
TaskState taskState = new TaskState(new JobVertexID(), numSubtasksPerTask, 128, chainLength);
int noNonPartitionableStateAtIndex = random.nextInt(chainLength);
int noOperatorStateBackendAtIndex = random.nextInt(chainLength);
int noOperatorStateStreamAtIndex = random.nextInt(chainLength);
boolean hasKeyedBackend = random.nextInt(4) != 0;
boolean hasKeyedStream = random.nextInt(4) != 0;
for (int subtaskIdx = 0; subtaskIdx < numSubtasksPerTask; subtaskIdx++) {
List<StreamStateHandle> nonPartitionableStates = new ArrayList<>(chainLength);
List<OperatorStateHandle> operatorStatesBackend = new ArrayList<>(chainLength);
List<OperatorStateHandle> operatorStatesStream = new ArrayList<>(chainLength);
for (int chainIdx = 0; chainIdx < chainLength; ++chainIdx) {
StreamStateHandle nonPartitionableState = new TestByteStreamStateHandleDeepCompare("a-" + chainIdx, ("Hi-" + chainIdx).getBytes(ConfigConstants.DEFAULT_CHARSET));
StreamStateHandle operatorStateBackend = new TestByteStreamStateHandleDeepCompare("b-" + chainIdx, ("Beautiful-" + chainIdx).getBytes(ConfigConstants.DEFAULT_CHARSET));
StreamStateHandle operatorStateStream = new TestByteStreamStateHandleDeepCompare("b-" + chainIdx, ("Beautiful-" + chainIdx).getBytes(ConfigConstants.DEFAULT_CHARSET));
Map<String, OperatorStateHandle.StateMetaInfo> offsetsMap = new HashMap<>();
offsetsMap.put("A", new OperatorStateHandle.StateMetaInfo(new long[] { 0, 10, 20 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
offsetsMap.put("B", new OperatorStateHandle.StateMetaInfo(new long[] { 30, 40, 50 }, OperatorStateHandle.Mode.SPLIT_DISTRIBUTE));
offsetsMap.put("C", new OperatorStateHandle.StateMetaInfo(new long[] { 60, 70, 80 }, OperatorStateHandle.Mode.BROADCAST));
if (chainIdx != noNonPartitionableStateAtIndex) {
nonPartitionableStates.add(nonPartitionableState);
}
if (chainIdx != noOperatorStateBackendAtIndex) {
OperatorStateHandle operatorStateHandleBackend = new OperatorStateHandle(offsetsMap, operatorStateBackend);
operatorStatesBackend.add(operatorStateHandleBackend);
}
if (chainIdx != noOperatorStateStreamAtIndex) {
OperatorStateHandle operatorStateHandleStream = new OperatorStateHandle(offsetsMap, operatorStateStream);
operatorStatesStream.add(operatorStateHandleStream);
}
}
KeyGroupsStateHandle keyedStateBackend = null;
KeyGroupsStateHandle keyedStateStream = null;
if (hasKeyedBackend) {
keyedStateBackend = new KeyGroupsStateHandle(new KeyGroupRangeOffsets(1, 1, new long[] { 42 }), new TestByteStreamStateHandleDeepCompare("c", "Hello".getBytes(ConfigConstants.DEFAULT_CHARSET)));
}
if (hasKeyedStream) {
keyedStateStream = new KeyGroupsStateHandle(new KeyGroupRangeOffsets(1, 1, new long[] { 23 }), new TestByteStreamStateHandleDeepCompare("d", "World".getBytes(ConfigConstants.DEFAULT_CHARSET)));
}
taskState.putState(subtaskIdx, new SubtaskState(new ChainedStateHandle<>(nonPartitionableStates), new ChainedStateHandle<>(operatorStatesBackend), new ChainedStateHandle<>(operatorStatesStream), keyedStateStream, keyedStateBackend));
}
taskStates.add(taskState);
}
return taskStates;
}
Aggregations