use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class BatchShuffleReadBufferPool method requestBuffers.
/**
* Requests a collection of buffers (determined by {@link #numBuffersPerRequest}) from this
* buffer pool.
*/
public List<MemorySegment> requestBuffers() throws Exception {
List<MemorySegment> allocated = new ArrayList<>(numBuffersPerRequest);
synchronized (buffers) {
checkState(!destroyed, "Buffer pool is already destroyed.");
if (!initialized) {
initialize();
}
Deadline deadline = Deadline.fromNow(WAITING_TIME);
while (buffers.size() < numBuffersPerRequest) {
checkState(!destroyed, "Buffer pool is already destroyed.");
buffers.wait(WAITING_TIME.toMillis());
if (!deadline.hasTimeLeft()) {
// return the empty list
return allocated;
}
}
while (allocated.size() < numBuffersPerRequest) {
allocated.add(buffers.poll());
}
lastBufferOperationTimestamp = System.nanoTime();
}
return allocated;
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class ElasticsearchDynamicSinkBaseITCase method testWritingDocumentsNoPrimaryKey.
@Test
public void testWritingDocumentsNoPrimaryKey() throws Exception {
TableEnvironment tableEnvironment = TableEnvironment.create(EnvironmentSettings.inStreamingMode());
String index = "no-primary-key";
tableEnvironment.executeSql("CREATE TABLE esTable (" + "a BIGINT NOT NULL,\n" + "b TIME,\n" + "c STRING NOT NULL,\n" + "d FLOAT,\n" + "e TINYINT NOT NULL,\n" + "f DATE,\n" + "g TIMESTAMP NOT NULL\n" + ")\n" + "WITH (\n" + getConnectorSql(index) + ")");
tableEnvironment.fromValues(row(1L, LocalTime.ofNanoOfDay(12345L * 1_000_000L), "ABCDE", 12.12f, (byte) 2, LocalDate.ofEpochDay(12345), LocalDateTime.parse("2012-12-12T12:12:12")), row(2L, LocalTime.ofNanoOfDay(12345L * 1_000_000L), "FGHIJK", 13.13f, (byte) 4, LocalDate.ofEpochDay(12345), LocalDateTime.parse("2013-12-12T13:13:13"))).executeInsert("esTable").await();
RestHighLevelClient client = getClient();
// search API does not return documents that were not indexed, we might need to query
// the index a few times
Deadline deadline = Deadline.fromNow(Duration.ofSeconds(30));
SearchHits hits;
do {
hits = makeSearchRequest(client, index);
if (getTotalSearchHits(hits) < 2) {
Thread.sleep(200);
}
} while (getTotalSearchHits(hits) < 2 && deadline.hasTimeLeft());
if (getTotalSearchHits(hits) < 2) {
throw new AssertionError("Could not retrieve results from Elasticsearch.");
}
HashSet<Map<String, Object>> resultSet = new HashSet<>();
resultSet.add(hits.getAt(0).getSourceAsMap());
resultSet.add(hits.getAt(1).getSourceAsMap());
Map<Object, Object> expectedMap1 = new HashMap<>();
expectedMap1.put("a", 1);
expectedMap1.put("b", "00:00:12");
expectedMap1.put("c", "ABCDE");
expectedMap1.put("d", 12.12d);
expectedMap1.put("e", 2);
expectedMap1.put("f", "2003-10-20");
expectedMap1.put("g", "2012-12-12 12:12:12");
Map<Object, Object> expectedMap2 = new HashMap<>();
expectedMap2.put("a", 2);
expectedMap2.put("b", "00:00:12");
expectedMap2.put("c", "FGHIJK");
expectedMap2.put("d", 13.13d);
expectedMap2.put("e", 4);
expectedMap2.put("f", "2003-10-20");
expectedMap2.put("g", "2013-12-12 13:13:13");
HashSet<Map<Object, Object>> expectedSet = new HashSet<>();
expectedSet.add(expectedMap1);
expectedSet.add(expectedMap2);
Assertions.assertEquals(resultSet, expectedSet);
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class JobMasterStopWithSavepointITCase method waitForJob.
private void waitForJob() throws Exception {
Deadline deadline = Deadline.fromNow(Duration.ofMinutes(5));
JobID jobID = jobGraph.getJobID();
CommonTestUtils.waitForAllTaskRunning(() -> MINI_CLUSTER_RESOURCE.getMiniCluster().getExecutionGraph(jobID).get(60, TimeUnit.SECONDS), deadline, false);
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class AccumulatorLiveITCase method submitJobAndVerifyResults.
private static void submitJobAndVerifyResults(JobGraph jobGraph) throws Exception {
Deadline deadline = Deadline.now().plus(Duration.ofSeconds(30));
final ClusterClient<?> client = MINI_CLUSTER_RESOURCE.getClusterClient();
final CheckedThread submissionThread = new CheckedThread() {
@Override
public void go() throws Exception {
submitJobAndWaitForResult(client, jobGraph, getClass().getClassLoader());
}
};
submissionThread.start();
try {
NotifyingMapper.notifyLatch.await();
// verify using the ClusterClient
verifyResults(jobGraph, deadline, client);
// verify using the MiniClusterJobClient
verifyResults(jobGraph, deadline, null);
NotifyingMapper.shutdownLatch.trigger();
} finally {
NotifyingMapper.shutdownLatch.trigger();
// wait for the job to have terminated
submissionThread.sync();
}
}
use of org.apache.flink.api.common.time.Deadline in project flink by apache.
the class ZooKeeperLeaderElectionITCase method testJobExecutionOnClusterWithLeaderChange.
/**
* Tests that a job can be executed after a new leader has been elected. For all except for the
* last leader, the job is blocking. The JobManager will be terminated while executing the
* blocking job. Once only one JobManager is left, it is checked that a non-blocking can be
* successfully executed.
*/
@Test
@Ignore("FLINK-25235")
public void testJobExecutionOnClusterWithLeaderChange() throws Exception {
final int numDispatchers = 3;
final int numTMs = 2;
final int numSlotsPerTM = 2;
final Configuration configuration = ZooKeeperTestUtils.createZooKeeperHAConfig(zkServer.getConnectString(), tempFolder.newFolder().getAbsolutePath());
// speed up refused registration retries
configuration.setLong(ClusterOptions.REFUSED_REGISTRATION_DELAY, 50L);
final TestingMiniClusterConfiguration miniClusterConfiguration = TestingMiniClusterConfiguration.newBuilder().setConfiguration(configuration).setNumberDispatcherResourceManagerComponents(numDispatchers).setNumTaskManagers(numTMs).setNumSlotsPerTaskManager(numSlotsPerTM).build();
final Deadline timeout = Deadline.fromNow(TEST_TIMEOUT);
try (TestingMiniCluster miniCluster = TestingMiniCluster.newBuilder(miniClusterConfiguration).build();
final CuratorFrameworkWithUnhandledErrorListener curatorFramework = ZooKeeperUtils.startCuratorFramework(configuration, exception -> fail("Fatal error in curator framework."))) {
// We need to watch for resource manager leader changes to avoid race conditions.
final DefaultLeaderRetrievalService resourceManagerLeaderRetrieval = ZooKeeperUtils.createLeaderRetrievalService(curatorFramework.asCuratorFramework(), ZooKeeperUtils.getLeaderPathForResourceManager(), configuration);
@SuppressWarnings("unchecked") final CompletableFuture<String>[] resourceManagerLeaderFutures = (CompletableFuture<String>[]) new CompletableFuture[numDispatchers];
for (int i = 0; i < numDispatchers; i++) {
resourceManagerLeaderFutures[i] = new CompletableFuture<>();
}
resourceManagerLeaderRetrieval.start(new TestLeaderRetrievalListener(resourceManagerLeaderFutures));
miniCluster.start();
final int parallelism = numTMs * numSlotsPerTM;
JobGraph jobGraph = createJobGraph(parallelism);
miniCluster.submitJob(jobGraph).get();
String previousLeaderAddress = null;
for (int i = 0; i < numDispatchers - 1; i++) {
final DispatcherGateway leaderDispatcherGateway = getNextLeadingDispatcherGateway(miniCluster, previousLeaderAddress, timeout);
// Make sure resource manager has also changed leadership.
resourceManagerLeaderFutures[i].get();
previousLeaderAddress = leaderDispatcherGateway.getAddress();
awaitRunningStatus(leaderDispatcherGateway, jobGraph, timeout);
leaderDispatcherGateway.shutDownCluster();
}
final DispatcherGateway leaderDispatcherGateway = getNextLeadingDispatcherGateway(miniCluster, previousLeaderAddress, timeout);
// Make sure resource manager has also changed leadership.
resourceManagerLeaderFutures[numDispatchers - 1].get();
awaitRunningStatus(leaderDispatcherGateway, jobGraph, timeout);
CompletableFuture<JobResult> jobResultFuture = leaderDispatcherGateway.requestJobResult(jobGraph.getJobID(), RPC_TIMEOUT);
BlockingOperator.unblock();
assertThat(jobResultFuture.get().isSuccess(), is(true));
resourceManagerLeaderRetrieval.stop();
}
}
Aggregations