use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class StreamKafkaP_StandaloneKafkaTest method when_cancelledAfterBrokerDown_then_cancelsPromptly.
@Test
public void when_cancelledAfterBrokerDown_then_cancelsPromptly() throws IOException {
KafkaTestSupport kafkaTestSupport = new KafkaTestSupport();
kafkaTestSupport.createKafkaCluster();
kafkaTestSupport.createTopic("topic", 1);
DAG dag = new DAG();
dag.newVertex("src", KafkaProcessors.streamKafkaP(getProperties(kafkaTestSupport.getBrokerConnectionString()), FunctionEx.identity(), EventTimePolicy.noEventTime(), "topic")).localParallelism(1);
Job job = createHazelcastInstance().getJet().newJob(dag);
assertJobStatusEventually(job, RUNNING);
sleepSeconds(1);
kafkaTestSupport.shutdownKafkaCluster();
sleepSeconds(3);
long start = System.nanoTime();
job.cancel();
try {
job.join();
} catch (CancellationException ignored) {
}
// There was an issue claimed that when the broker was down, job did not cancel.
// Let's assert the cancellation didn't take too long.
long durationSeconds = NANOSECONDS.toSeconds(System.nanoTime() - start);
assertTrue("durationSeconds=" + durationSeconds, durationSeconds < 10);
logger.info("Job cancelled in " + durationSeconds + " seconds");
}
use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class ShowStatementTest method createJobInJava.
private void createJobInJava(String jobName) {
DAG dag = new DAG();
dag.newVertex("v", () -> new TestProcessors.MockP().streaming());
instance().getJet().newJob(dag, new JobConfig().setName(jobName));
}
use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class TestAbstractSqlConnector method fullScanReader.
@Nonnull
@Override
public Vertex fullScanReader(@Nonnull DAG dag, @Nonnull Table table_, @Nullable Expression<Boolean> predicate, @Nonnull List<Expression<?>> projection, @Nullable FunctionEx<ExpressionEvalContext, EventTimePolicy<JetSqlRow>> eventTimePolicyProvider) {
TestTable table = (TestTable) table_;
List<Object[]> rows = table.rows;
boolean streaming = table.streaming;
FunctionEx<Context, TestDataGenerator> createContextFn = ctx -> {
ExpressionEvalContext evalContext = ExpressionEvalContext.from(ctx);
EventTimePolicy<JetSqlRow> eventTimePolicy = eventTimePolicyProvider == null ? EventTimePolicy.noEventTime() : eventTimePolicyProvider.apply(evalContext);
return new TestDataGenerator(rows, predicate, projection, evalContext, eventTimePolicy, streaming);
};
ProcessorMetaSupplier pms = createProcessorSupplier(createContextFn);
return dag.newUniqueVertex(table.toString(), pms);
}
use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class TestAllTypesSqlConnector method fullScanReader.
@Nonnull
@Override
public Vertex fullScanReader(@Nonnull DAG dag, @Nonnull Table table, @Nullable Expression<Boolean> predicate, @Nonnull List<Expression<?>> projection, @Nullable FunctionEx<ExpressionEvalContext, EventTimePolicy<JetSqlRow>> eventTimePolicyProvider) {
if (eventTimePolicyProvider != null) {
throw QueryException.error("Ordering function are not supported for " + TYPE_NAME + " mappings");
}
BatchSource<JetSqlRow> source = SourceBuilder.batch("batch", ExpressionEvalContext::from).<JetSqlRow>fillBufferFn((ctx, buf) -> {
JetSqlRow row = ExpressionUtil.evaluate(predicate, projection, VALUES, ctx);
if (row != null) {
buf.add(row);
}
buf.close();
}).build();
ProcessorMetaSupplier pms = ((BatchSourceTransform<JetSqlRow>) source).metaSupplier;
return dag.newUniqueVertex(table.toString(), pms);
}
use of com.hazelcast.jet.core.DAG in project hazelcast by hazelcast.
the class WriteFilePTest method test_rollByDate.
@Test
public void test_rollByDate() {
int numItems = 10;
DAG dag = new DAG();
Vertex src = dag.newVertex("src", () -> new SlowSourceP(semaphore, numItems)).localParallelism(1);
@SuppressWarnings("Convert2MethodRef") Vertex sink = dag.newVertex("sink", WriteFileP.metaSupplier(directory.toString(), Objects::toString, "utf-8", "SSS", DISABLE_ROLLING, true, (LongSupplier & Serializable) () -> clock.get()));
dag.edge(between(src, sink));
Job job = instance().getJet().newJob(dag);
for (int i = 0; i < numItems; i++) {
// When
semaphore.release();
String stringValue = i + System.lineSeparator();
// Then
Path file = directory.resolve(String.format("%03d-0", i));
assertTrueEventually(() -> assertTrue("file not found: " + file, Files.exists(file)), 5);
assertTrueEventually(() -> assertEquals(stringValue, new String(Files.readAllBytes(file), StandardCharsets.UTF_8)), 5);
clock.incrementAndGet();
}
job.join();
}
Aggregations