use of java.util.Timer in project flink by apache.
the class LocalInputChannelTest method testPartitionRequestExponentialBackoff.
@Test
public void testPartitionRequestExponentialBackoff() throws Exception {
// Config
Tuple2<Integer, Integer> backoff = new Tuple2<>(500, 3000);
// Start with initial backoff, then keep doubling, and cap at max.
int[] expectedDelays = { backoff._1(), 1000, 2000, backoff._2() };
// Setup
SingleInputGate inputGate = mock(SingleInputGate.class);
BufferProvider bufferProvider = mock(BufferProvider.class);
when(inputGate.getBufferProvider()).thenReturn(bufferProvider);
ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);
LocalInputChannel ch = createLocalInputChannel(inputGate, partitionManager, backoff);
when(partitionManager.createSubpartitionView(eq(ch.partitionId), eq(0), eq(bufferProvider), any(BufferAvailabilityListener.class))).thenThrow(new PartitionNotFoundException(ch.partitionId));
Timer timer = mock(Timer.class);
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
((TimerTask) invocation.getArguments()[0]).run();
return null;
}
}).when(timer).schedule(any(TimerTask.class), anyLong());
// Initial request
ch.requestSubpartition(0);
verify(partitionManager).createSubpartitionView(eq(ch.partitionId), eq(0), eq(bufferProvider), any(BufferAvailabilityListener.class));
// Request subpartition and verify that the actual requests are delayed.
for (long expected : expectedDelays) {
ch.retriggerSubpartitionRequest(timer, 0);
verify(timer).schedule(any(TimerTask.class), eq(expected));
}
// Exception after backoff is greater than the maximum backoff.
try {
ch.retriggerSubpartitionRequest(timer, 0);
ch.getNextBuffer();
fail("Did not throw expected exception.");
} catch (Exception expected) {
}
}
use of java.util.Timer in project flink by apache.
the class StateBackendTestBase method testValueStateRace.
/**
* Tests {@link ValueState#value()} and {@link InternalKvState#getSerializedValue(byte[])}
* accessing the state concurrently. They should not get in the way of each
* other.
*/
@Test
@SuppressWarnings("unchecked")
public void testValueStateRace() throws Exception {
final AbstractKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE);
final Integer namespace = 1;
final ValueStateDescriptor<String> kvId = new ValueStateDescriptor<>("id", String.class);
kvId.initializeSerializerUnlessSet(new ExecutionConfig());
final TypeSerializer<Integer> keySerializer = IntSerializer.INSTANCE;
final TypeSerializer<Integer> namespaceSerializer = IntSerializer.INSTANCE;
final TypeSerializer<String> valueSerializer = kvId.getSerializer();
final ValueState<String> state = backend.getPartitionedState(namespace, IntSerializer.INSTANCE, kvId);
@SuppressWarnings("unchecked") final InternalKvState<Integer> kvState = (InternalKvState<Integer>) state;
/**
* 1) Test that ValueState#value() before and after
* KvState#getSerializedValue(byte[]) return the same value.
*/
// set some key and namespace
final int key1 = 1;
backend.setCurrentKey(key1);
kvState.setCurrentNamespace(2);
state.update("2");
assertEquals("2", state.value());
// query another key and namespace
assertNull(getSerializedValue(kvState, 3, keySerializer, namespace, IntSerializer.INSTANCE, valueSerializer));
// the state should not have changed!
assertEquals("2", state.value());
// re-set values
kvState.setCurrentNamespace(namespace);
/**
* 2) Test two threads concurrently using ValueState#value() and
* KvState#getSerializedValue(byte[]).
*/
// some modifications to the state
final int key2 = 10;
backend.setCurrentKey(key2);
assertNull(state.value());
assertNull(getSerializedValue(kvState, key2, keySerializer, namespace, namespaceSerializer, valueSerializer));
state.update("1");
final CheckedThread getter = new CheckedThread("State getter") {
@Override
public void go() throws Exception {
while (!isInterrupted()) {
assertEquals("1", state.value());
}
}
};
final CheckedThread serializedGetter = new CheckedThread("Serialized state getter") {
@Override
public void go() throws Exception {
while (!isInterrupted() && getter.isAlive()) {
final String serializedValue = getSerializedValue(kvState, key2, keySerializer, namespace, namespaceSerializer, valueSerializer);
assertEquals("1", serializedValue);
}
}
};
getter.start();
serializedGetter.start();
// run both threads for max 100ms
Timer t = new Timer("stopper");
t.schedule(new TimerTask() {
@Override
public void run() {
getter.interrupt();
serializedGetter.interrupt();
this.cancel();
}
}, 100);
// wait for both threads to finish
try {
// serializedGetter will finish if its assertion fails or if
// getter is not alive any more
serializedGetter.sync();
// if serializedGetter crashed, getter will not know -> interrupt just in case
getter.interrupt();
getter.sync();
// if not executed yet
t.cancel();
} finally {
// clean up
backend.dispose();
}
}
use of java.util.Timer in project hadoop by apache.
the class Shell method runCommand.
/** Run the command. */
private void runCommand() throws IOException {
ProcessBuilder builder = new ProcessBuilder(getExecString());
Timer timeOutTimer = null;
ShellTimeoutTimerTask timeoutTimerTask = null;
timedOut.set(false);
completed.set(false);
// the parent process.
if (!inheritParentEnv) {
builder.environment().clear();
}
if (environment != null) {
builder.environment().putAll(this.environment);
}
if (dir != null) {
builder.directory(this.dir);
}
builder.redirectErrorStream(redirectErrorStream);
if (Shell.WINDOWS) {
synchronized (WindowsProcessLaunchLock) {
// To workaround the race condition issue with child processes
// inheriting unintended handles during process launch that can
// lead to hangs on reading output and error streams, we
// serialize process creation. More info available at:
// http://support.microsoft.com/kb/315939
process = builder.start();
}
} else {
process = builder.start();
}
waitingThread = Thread.currentThread();
CHILD_SHELLS.put(this, null);
if (timeOutInterval > 0) {
timeOutTimer = new Timer("Shell command timeout");
timeoutTimerTask = new ShellTimeoutTimerTask(this);
//One time scheduling.
timeOutTimer.schedule(timeoutTimerTask, timeOutInterval);
}
final BufferedReader errReader = new BufferedReader(new InputStreamReader(process.getErrorStream(), Charset.defaultCharset()));
BufferedReader inReader = new BufferedReader(new InputStreamReader(process.getInputStream(), Charset.defaultCharset()));
final StringBuffer errMsg = new StringBuffer();
// read error and input streams as this would free up the buffers
// free the error stream buffer
Thread errThread = new Thread() {
@Override
public void run() {
try {
String line = errReader.readLine();
while ((line != null) && !isInterrupted()) {
errMsg.append(line);
errMsg.append(System.getProperty("line.separator"));
line = errReader.readLine();
}
} catch (IOException ioe) {
// so only log a WARN if the command didn't time out
if (!isTimedOut()) {
LOG.warn("Error reading the error stream", ioe);
} else {
LOG.debug("Error reading the error stream due to shell " + "command timeout", ioe);
}
}
}
};
try {
errThread.start();
} catch (IllegalStateException ise) {
} catch (OutOfMemoryError oe) {
LOG.error("Caught " + oe + ". One possible reason is that ulimit" + " setting of 'max user processes' is too low. If so, do" + " 'ulimit -u <largerNum>' and try again.");
throw oe;
}
try {
// parse the output
parseExecResult(inReader);
// clear the input stream buffer
String line = inReader.readLine();
while (line != null) {
line = inReader.readLine();
}
// wait for the process to finish and check the exit code
exitCode = process.waitFor();
// make sure that the error thread exits
joinThread(errThread);
completed.set(true);
//taken care in finally block
if (exitCode != 0) {
throw new ExitCodeException(exitCode, errMsg.toString());
}
} catch (InterruptedException ie) {
InterruptedIOException iie = new InterruptedIOException(ie.toString());
iie.initCause(ie);
throw iie;
} finally {
if (timeOutTimer != null) {
timeOutTimer.cancel();
}
// close the input stream
try {
// JDK 7 tries to automatically drain the input streams for us
// when the process exits, but since close is not synchronized,
// it creates a race if we close the stream first and the same
// fd is recycled. the stream draining thread will attempt to
// drain that fd!! it may block, OOM, or cause bizarre behavior
// see: https://bugs.openjdk.java.net/browse/JDK-8024521
// issue is fixed in build 7u60
InputStream stdout = process.getInputStream();
synchronized (stdout) {
inReader.close();
}
} catch (IOException ioe) {
LOG.warn("Error while closing the input stream", ioe);
}
if (!completed.get()) {
errThread.interrupt();
joinThread(errThread);
}
try {
InputStream stderr = process.getErrorStream();
synchronized (stderr) {
errReader.close();
}
} catch (IOException ioe) {
LOG.warn("Error while closing the error stream", ioe);
}
process.destroy();
waitingThread = null;
CHILD_SHELLS.remove(this);
lastTime = Time.monotonicNow();
}
}
use of java.util.Timer in project hadoop by apache.
the class MetricsSystemImpl method startTimer.
private synchronized void startTimer() {
if (timer != null) {
LOG.warn(prefix + " metrics system timer already started!");
return;
}
logicalTime = 0;
long millis = period;
timer = new Timer("Timer for '" + prefix + "' metrics system", true);
timer.scheduleAtFixedRate(new TimerTask() {
@Override
public void run() {
try {
onTimerEvent();
} catch (Exception e) {
LOG.warn("Error invoking metrics timer", e);
}
}
}, millis, millis);
LOG.info("Scheduled Metric snapshot period at " + (period / 1000) + " second(s).");
}
use of java.util.Timer in project hadoop by apache.
the class RollingFileSystemSink method initFs.
/**
* Initialize the connection to HDFS and create the base directory. Also
* launch the flush thread.
*/
private boolean initFs() {
boolean success = false;
fileSystem = getFileSystem();
// copious debug info if it fails.
try {
fileSystem.mkdirs(basePath);
success = true;
} catch (Exception ex) {
if (!ignoreError) {
throw new MetricsException("Failed to create " + basePath + "[" + SOURCE_KEY + "=" + source + ", " + ALLOW_APPEND_KEY + "=" + allowAppend + ", " + stringifySecurityProperty(KEYTAB_PROPERTY_KEY) + ", " + stringifySecurityProperty(USERNAME_PROPERTY_KEY) + "] -- " + ex.toString(), ex);
}
}
if (success) {
// If we're permitted to append, check if we actually can
if (allowAppend) {
allowAppend = checkAppend(fileSystem);
}
flushTimer = new Timer("RollingFileSystemSink Flusher", true);
setInitialFlushTime(new Date());
}
return success;
}
Aggregations