use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class SavepointReaderITTestBase method takeSavepoint.
private String takeSavepoint(JobGraph jobGraph) throws Exception {
SavepointSource.initializeForTest();
ClusterClient<?> client = MINI_CLUSTER_RESOURCE.getClusterClient();
JobID jobId = jobGraph.getJobID();
Deadline deadline = Deadline.fromNow(Duration.ofMinutes(5));
String dirPath = getTempDirPath(new AbstractID().toHexString());
try {
JobID jobID = client.submitJob(jobGraph).get();
waitForAllRunningOrSomeTerminal(jobID, MINI_CLUSTER_RESOURCE);
boolean finished = false;
while (deadline.hasTimeLeft()) {
if (SavepointSource.isFinished()) {
finished = true;
break;
}
try {
Thread.sleep(2L);
} catch (InterruptedException ignored) {
Thread.currentThread().interrupt();
}
}
if (!finished) {
Assert.fail("Failed to initialize state within deadline");
}
CompletableFuture<String> path = client.triggerSavepoint(jobID, dirPath, SavepointFormatType.CANONICAL);
return path.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
} finally {
client.cancel(jobId).get();
}
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class DataSetSavepointReaderITTestBase method takeSavepoint.
private String takeSavepoint(JobGraph jobGraph) throws Exception {
SavepointSource.initializeForTest();
ClusterClient<?> client = MINI_CLUSTER_RESOURCE.getClusterClient();
JobID jobId = jobGraph.getJobID();
Deadline deadline = Deadline.fromNow(Duration.ofMinutes(5));
String dirPath = getTempDirPath(new AbstractID().toHexString());
try {
JobID jobID = client.submitJob(jobGraph).get();
waitForAllRunningOrSomeTerminal(jobID, MINI_CLUSTER_RESOURCE);
boolean finished = false;
while (deadline.hasTimeLeft()) {
if (SavepointSource.isFinished()) {
finished = true;
break;
}
try {
Thread.sleep(2L);
} catch (InterruptedException ignored) {
Thread.currentThread().interrupt();
}
}
if (!finished) {
Assert.fail("Failed to initialize state within deadline");
}
CompletableFuture<String> path = client.triggerSavepoint(jobID, dirPath, SavepointFormatType.CANONICAL);
return path.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
} finally {
client.cancel(jobId).get();
}
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class AbstractQueryableStateTestBase method testWrongJobIdAndWrongQueryableStateName.
/**
* Tests that the correct exception is thrown if the query contains a wrong jobId or wrong
* queryable state name.
*/
@Test
@Ignore
public void testWrongJobIdAndWrongQueryableStateName() throws Exception {
final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT);
final long numElements = 1024L;
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(maxParallelism);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));
DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));
ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>("any", source.getType());
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = 7662520075515707428L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) {
return value.f0;
}
}).asQueryableState("hakuna", valueState);
try (AutoCancellableJob closableJobGraph = new AutoCancellableJob(deadline, clusterClient, env)) {
clusterClient.submitJob(closableJobGraph.getJobGraph()).get();
CompletableFuture<JobStatus> jobStatusFuture = clusterClient.getJobStatus(closableJobGraph.getJobId());
while (deadline.hasTimeLeft() && !jobStatusFuture.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS).equals(JobStatus.RUNNING)) {
Thread.sleep(50);
jobStatusFuture = clusterClient.getJobStatus(closableJobGraph.getJobId());
}
assertEquals(JobStatus.RUNNING, jobStatusFuture.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS));
final JobID wrongJobId = new JobID();
CompletableFuture<ValueState<Tuple2<Integer, Long>>> unknownJobFuture = client.getKvState(// this is the wrong job id
wrongJobId, "hakuna", 0, BasicTypeInfo.INT_TYPE_INFO, valueState);
try {
unknownJobFuture.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
// by now the request must have failed.
fail();
} catch (ExecutionException e) {
Assert.assertTrue("GOT: " + e.getCause().getMessage(), e.getCause() instanceof RuntimeException);
Assert.assertTrue("GOT: " + e.getCause().getMessage(), e.getCause().getMessage().contains("FlinkJobNotFoundException: Could not find Flink job (" + wrongJobId + ")"));
} catch (Exception f) {
fail("Unexpected type of exception: " + f.getMessage());
}
CompletableFuture<ValueState<Tuple2<Integer, Long>>> unknownQSName = client.getKvState(closableJobGraph.getJobId(), // this is the wrong name.
"wrong-hakuna", 0, BasicTypeInfo.INT_TYPE_INFO, valueState);
try {
unknownQSName.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
// by now the request must have failed.
fail();
} catch (ExecutionException e) {
Assert.assertTrue("GOT: " + e.getCause().getMessage(), e.getCause() instanceof RuntimeException);
Assert.assertTrue("GOT: " + e.getCause().getMessage(), e.getCause().getMessage().contains("UnknownKvStateLocation: No KvStateLocation found for KvState instance with name 'wrong-hakuna'."));
} catch (Exception f) {
fail("Unexpected type of exception: " + f.getMessage());
}
}
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class AbstractQueryableStateTestBase method testQueryableState.
/**
* Runs a simple topology producing random (key, 1) pairs at the sources (where number of keys
* is in fixed in range 0...numKeys). The records are keyed and a reducing queryable state
* instance is created, which sums up the records.
*
* <p>After submitting the job in detached mode, the QueryableStateCLient is used to query the
* counts of each key in rounds until all keys have non-zero counts.
*/
@Test
public void testQueryableState() throws Exception {
final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT);
final int numKeys = 256;
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(maxParallelism);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));
DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestKeyRangeSource(numKeys));
ReducingStateDescriptor<Tuple2<Integer, Long>> reducingState = new ReducingStateDescriptor<>("any-name", new SumReduce(), source.getType());
final String queryName = "hakuna-matata";
source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = 7143749578983540352L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) {
return value.f0;
}
}).asQueryableState(queryName, reducingState);
try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(deadline, clusterClient, env)) {
final JobID jobId = autoCancellableJob.getJobId();
final JobGraph jobGraph = autoCancellableJob.getJobGraph();
clusterClient.submitJob(jobGraph).get();
final AtomicLongArray counts = new AtomicLongArray(numKeys);
final List<CompletableFuture<ReducingState<Tuple2<Integer, Long>>>> futures = new ArrayList<>(numKeys);
boolean allNonZero = false;
while (!allNonZero && deadline.hasTimeLeft()) {
allNonZero = true;
futures.clear();
for (int i = 0; i < numKeys; i++) {
final int key = i;
if (counts.get(key) > 0L) {
// Skip this one
continue;
} else {
allNonZero = false;
}
CompletableFuture<ReducingState<Tuple2<Integer, Long>>> result = getKvState(deadline, client, jobId, queryName, key, BasicTypeInfo.INT_TYPE_INFO, reducingState, false, executor);
result.thenAccept(response -> {
try {
Tuple2<Integer, Long> res = response.get();
counts.set(key, res.f1);
assertEquals("Key mismatch", key, res.f0.intValue());
} catch (Exception e) {
Assert.fail(e.getMessage());
}
});
futures.add(result);
}
// wait for all the futures to complete
CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[futures.size()])).get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
}
assertTrue("Not all keys are non-zero", allNonZero);
// All should be non-zero
for (int i = 0; i < numKeys; i++) {
long count = counts.get(i);
assertTrue("Count at position " + i + " is " + count, count > 0);
}
}
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class AbstractQueryableStateTestBase method testValueStateShortcut.
/**
* Tests simple value state queryable state instance. Each source emits (subtaskIndex,
* 0)..(subtaskIndex, numElements) tuples, which are then queried. The tests succeeds after each
* subtask index is queried with value numElements (the latest element updated the state).
*
* <p>This is the same as the simple value state test, but uses the API shortcut.
*/
@Test
public void testValueStateShortcut() throws Exception {
final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT);
final long numElements = 1024L;
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(stateBackend);
env.setParallelism(maxParallelism);
// Very important, because cluster is shared between tests and we
// don't explicitly check that all slots are available before
// submitting.
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));
DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));
// Value state shortcut
final QueryableStateStream<Integer, Tuple2<Integer, Long>> queryableState = source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
private static final long serialVersionUID = 9168901838808830068L;
@Override
public Integer getKey(Tuple2<Integer, Long> value) {
return value.f0;
}
}).asQueryableState("matata");
@SuppressWarnings("unchecked") final ValueStateDescriptor<Tuple2<Integer, Long>> stateDesc = (ValueStateDescriptor<Tuple2<Integer, Long>>) queryableState.getStateDescriptor();
try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(deadline, clusterClient, env)) {
final JobID jobId = autoCancellableJob.getJobId();
final JobGraph jobGraph = autoCancellableJob.getJobGraph();
clusterClient.submitJob(jobGraph).get();
executeValueQuery(deadline, client, jobId, "matata", stateDesc, numElements);
}
}
Aggregations