use of org.apache.kafka.trogdor.rest.WorkerRunning in project kafka by apache.
the class CoordinatorTest method testCreateTask.
@Test
public void testCreateTask() throws Exception {
MockTime time = new MockTime(0, 0, 0);
Scheduler scheduler = new MockScheduler(time);
try (MiniTrogdorCluster cluster = new MiniTrogdorCluster.Builder().addCoordinator("node01").addAgent("node02").scheduler(scheduler).build()) {
new ExpectedTasks().waitFor(cluster.coordinatorClient());
NoOpTaskSpec fooSpec = new NoOpTaskSpec(1, 2);
cluster.coordinatorClient().createTask(new CreateTaskRequest("foo", fooSpec));
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").taskState(new TaskPending(fooSpec)).build()).waitFor(cluster.coordinatorClient());
// Re-creating a task with the same arguments is not an error.
cluster.coordinatorClient().createTask(new CreateTaskRequest("foo", fooSpec));
// Re-creating a task with different arguments gives a RequestConflictException.
NoOpTaskSpec barSpec = new NoOpTaskSpec(1000, 2000);
assertThrows(RequestConflictException.class, () -> cluster.coordinatorClient().createTask(new CreateTaskRequest("foo", barSpec)), "Recreating task with different task spec is not allowed");
time.sleep(2);
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").taskState(new TaskRunning(fooSpec, 2, new TextNode("active"))).workerState(new WorkerRunning("foo", fooSpec, 2, new TextNode("active"))).build()).waitFor(cluster.coordinatorClient()).waitFor(cluster.agentClient("node02"));
time.sleep(3);
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").taskState(new TaskDone(fooSpec, 2, 5, "", false, new TextNode("done"))).build()).waitFor(cluster.coordinatorClient());
}
}
use of org.apache.kafka.trogdor.rest.WorkerRunning in project kafka by apache.
the class AgentTest method testWorkerCompletions.
@Test
public void testWorkerCompletions() throws Exception {
MockTime time = new MockTime(0, 0, 0);
MockScheduler scheduler = new MockScheduler(time);
Agent agent = createAgent(scheduler);
AgentClient client = new AgentClient.Builder().maxTries(10).target("localhost", agent.port()).build();
new ExpectedTasks().waitFor(client);
SampleTaskSpec fooSpec = new SampleTaskSpec(0, 900000, Collections.singletonMap("node01", 1L), "");
client.createWorker(new CreateWorkerRequest(0, "foo", fooSpec));
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").workerState(new WorkerRunning("foo", fooSpec, 0, new TextNode("active"))).build()).waitFor(client);
SampleTaskSpec barSpec = new SampleTaskSpec(0, 900000, Collections.singletonMap("node01", 2L), "baz");
client.createWorker(new CreateWorkerRequest(1, "bar", barSpec));
time.sleep(1);
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").workerState(new WorkerDone("foo", fooSpec, 0, 1, new TextNode("halted"), "")).build()).addTask(new ExpectedTaskBuilder("bar").workerState(new WorkerRunning("bar", barSpec, 0, new TextNode("active"))).build()).waitFor(client);
time.sleep(1);
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").workerState(new WorkerDone("foo", fooSpec, 0, 1, new TextNode("halted"), "")).build()).addTask(new ExpectedTaskBuilder("bar").workerState(new WorkerDone("bar", barSpec, 0, 2, new TextNode("halted"), "baz")).build()).waitFor(client);
}
use of org.apache.kafka.trogdor.rest.WorkerRunning in project kafka by apache.
the class AgentTest method testDestroyWorkers.
@Test
public void testDestroyWorkers() throws Exception {
MockTime time = new MockTime(0, 0, 0);
MockScheduler scheduler = new MockScheduler(time);
Agent agent = createAgent(scheduler);
AgentClient client = new AgentClient.Builder().maxTries(10).target("localhost", agent.port()).build();
new ExpectedTasks().waitFor(client);
final NoOpTaskSpec fooSpec = new NoOpTaskSpec(0, 5);
client.createWorker(new CreateWorkerRequest(0, "foo", fooSpec));
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").workerState(new WorkerRunning("foo", fooSpec, 0, new TextNode("active"))).build()).waitFor(client);
time.sleep(1);
client.destroyWorker(new DestroyWorkerRequest(0));
client.destroyWorker(new DestroyWorkerRequest(0));
client.destroyWorker(new DestroyWorkerRequest(1));
new ExpectedTasks().waitFor(client);
time.sleep(1);
final NoOpTaskSpec fooSpec2 = new NoOpTaskSpec(2, 1);
client.createWorker(new CreateWorkerRequest(1, "foo", fooSpec2));
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").workerState(new WorkerRunning("foo", fooSpec2, 2, new TextNode("active"))).build()).waitFor(client);
time.sleep(2);
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").workerState(new WorkerDone("foo", fooSpec2, 2, 4, new TextNode("done"), "")).build()).waitFor(client);
time.sleep(1);
client.destroyWorker(new DestroyWorkerRequest(1));
new ExpectedTasks().waitFor(client);
agent.beginShutdown();
agent.waitForShutdown();
}
use of org.apache.kafka.trogdor.rest.WorkerRunning in project kafka by apache.
the class JsonSerializationTest method testDeserializationDoesNotProduceNulls.
@Test
public void testDeserializationDoesNotProduceNulls() throws Exception {
verify(new FilesUnreadableFaultSpec(0, 0, null, null, null, 0));
verify(new Kibosh.KiboshControlFile(null));
verify(new NetworkPartitionFaultSpec(0, 0, null));
verify(new ProcessStopFaultSpec(0, 0, null, null));
verify(new AgentStatusResponse(0, null));
verify(new TasksResponse(null));
verify(new WorkerDone(null, null, 0, 0, null, null));
verify(new WorkerRunning(null, null, 0, null));
verify(new WorkerStopping(null, null, 0, null));
verify(new ProduceBenchSpec(0, 0, null, null, 0, 0, null, null, Optional.empty(), null, null, null, null, null, false, false));
verify(new RoundTripWorkloadSpec(0, 0, null, null, null, null, null, null, 0, null, null, 0));
verify(new TopicsSpec());
verify(new PartitionsSpec(0, (short) 0, null, null));
Map<Integer, List<Integer>> partitionAssignments = new HashMap<Integer, List<Integer>>();
partitionAssignments.put(0, Arrays.asList(1, 2, 3));
partitionAssignments.put(1, Arrays.asList(1, 2, 3));
verify(new PartitionsSpec(0, (short) 0, partitionAssignments, null));
verify(new PartitionsSpec(0, (short) 0, null, null));
}
use of org.apache.kafka.trogdor.rest.WorkerRunning in project kafka by apache.
the class CoordinatorTest method testAgentFailureAndTaskExpiry.
/**
* If an agent fails in the middle of a task and comes back up when the task is considered expired,
* we want the task to be marked as DONE and not re-sent should a second failure happen.
*/
@Test
public void testAgentFailureAndTaskExpiry() throws Exception {
MockTime time = new MockTime(0, 0, 0);
Scheduler scheduler = new MockScheduler(time);
try (MiniTrogdorCluster cluster = new MiniTrogdorCluster.Builder().addCoordinator("node01").addAgent("node02").scheduler(scheduler).build()) {
CoordinatorClient coordinatorClient = cluster.coordinatorClient();
NoOpTaskSpec fooSpec = new NoOpTaskSpec(1, 500);
coordinatorClient.createTask(new CreateTaskRequest("foo", fooSpec));
TaskState expectedState = new ExpectedTaskBuilder("foo").taskState(new TaskPending(fooSpec)).build().taskState();
TaskState resp = coordinatorClient.task(new TaskRequest("foo"));
assertEquals(expectedState, resp);
time.sleep(2);
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").taskState(new TaskRunning(fooSpec, 2, new TextNode("active"))).workerState(new WorkerRunning("foo", fooSpec, 2, new TextNode("active"))).build()).waitFor(coordinatorClient).waitFor(cluster.agentClient("node02"));
cluster.restartAgent("node02");
time.sleep(550);
// coordinator heartbeat sees that the agent is back up, re-schedules the task but the agent expires it
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").taskState(new TaskDone(fooSpec, 2, 552, "worker expired", false, null)).workerState(new WorkerDone("foo", fooSpec, 552, 552, null, "worker expired")).build()).waitFor(coordinatorClient).waitFor(cluster.agentClient("node02"));
cluster.restartAgent("node02");
// coordinator heartbeat sees that the agent is back up but does not re-schedule the task as it is DONE
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").taskState(new TaskDone(fooSpec, 2, 552, "worker expired", false, null)).build()).waitFor(coordinatorClient).waitFor(cluster.agentClient("node02"));
}
}
Aggregations