use of org.apache.kafka.trogdor.rest.TasksResponse in project kafka by apache.
the class CoordinatorTest method testTasksRequest.
@Test
public void testTasksRequest() throws Exception {
MockTime time = new MockTime(0, 0, 0);
Scheduler scheduler = new MockScheduler(time);
try (MiniTrogdorCluster cluster = new MiniTrogdorCluster.Builder().addCoordinator("node01").addAgent("node02").scheduler(scheduler).build()) {
CoordinatorClient coordinatorClient = cluster.coordinatorClient();
new ExpectedTasks().waitFor(coordinatorClient);
NoOpTaskSpec fooSpec = new NoOpTaskSpec(1, 10);
NoOpTaskSpec barSpec = new NoOpTaskSpec(3, 1);
coordinatorClient.createTask(new CreateTaskRequest("foo", fooSpec));
coordinatorClient.createTask(new CreateTaskRequest("bar", barSpec));
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").taskState(new TaskPending(fooSpec)).build()).addTask(new ExpectedTaskBuilder("bar").taskState(new TaskPending(barSpec)).build()).waitFor(coordinatorClient);
assertEquals(0, coordinatorClient.tasks(new TasksRequest(null, 10, 0, 10, 0, Optional.empty())).tasks().size());
TasksResponse resp1 = coordinatorClient.tasks(new TasksRequest(Arrays.asList("foo", "baz"), 0, 0, 0, 0, Optional.empty()));
assertTrue(resp1.tasks().containsKey("foo"));
assertFalse(resp1.tasks().containsKey("bar"));
assertEquals(1, resp1.tasks().size());
time.sleep(2);
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").taskState(new TaskRunning(fooSpec, 2, new TextNode("active"))).workerState(new WorkerRunning("foo", fooSpec, 2, new TextNode("active"))).build()).addTask(new ExpectedTaskBuilder("bar").taskState(new TaskPending(barSpec)).build()).waitFor(coordinatorClient).waitFor(cluster.agentClient("node02"));
TasksResponse resp2 = coordinatorClient.tasks(new TasksRequest(null, 1, 0, 0, 0, Optional.empty()));
assertTrue(resp2.tasks().containsKey("foo"));
assertFalse(resp2.tasks().containsKey("bar"));
assertEquals(1, resp2.tasks().size());
assertEquals(0, coordinatorClient.tasks(new TasksRequest(null, 3, 0, 0, 0, Optional.empty())).tasks().size());
}
}
use of org.apache.kafka.trogdor.rest.TasksResponse in project kafka by apache.
the class CoordinatorClient method tasks.
public TasksResponse tasks(TasksRequest request) throws Exception {
UriBuilder uriBuilder = UriBuilder.fromPath(url("/coordinator/tasks"));
uriBuilder.queryParam("taskId", request.taskIds().toArray(new Object[0]));
uriBuilder.queryParam("firstStartMs", request.firstStartMs());
uriBuilder.queryParam("lastStartMs", request.lastStartMs());
uriBuilder.queryParam("firstEndMs", request.firstEndMs());
uriBuilder.queryParam("lastEndMs", request.lastEndMs());
if (request.state().isPresent()) {
uriBuilder.queryParam("state", request.state().get().toString());
}
HttpResponse<TasksResponse> resp = JsonRestServer.httpRequest(log, uriBuilder.build().toString(), "GET", null, new TypeReference<TasksResponse>() {
}, maxTries);
return resp.body();
}
use of org.apache.kafka.trogdor.rest.TasksResponse in project kafka by apache.
the class CoordinatorClient method main.
public static void main(String[] args) throws Exception {
ArgumentParser rootParser = ArgumentParsers.newArgumentParser("trogdor-coordinator-client").description("The Trogdor coordinator client.");
Subparsers subParsers = rootParser.addSubparsers().dest("command");
Subparser uptimeParser = subParsers.addParser("uptime").help("Get the coordinator uptime.");
addTargetArgument(uptimeParser);
addJsonArgument(uptimeParser);
Subparser statusParser = subParsers.addParser("status").help("Get the coordinator status.");
addTargetArgument(statusParser);
addJsonArgument(statusParser);
Subparser showTaskParser = subParsers.addParser("showTask").help("Show a coordinator task.");
addTargetArgument(showTaskParser);
addJsonArgument(showTaskParser);
showTaskParser.addArgument("--id", "-i").action(store()).required(true).type(String.class).dest("taskId").metavar("TASK_ID").help("The task ID to show.");
showTaskParser.addArgument("--verbose", "-v").action(storeTrue()).dest("verbose").metavar("VERBOSE").help("Print out everything.");
showTaskParser.addArgument("--show-status", "-S").action(storeTrue()).dest("showStatus").metavar("SHOW_STATUS").help("Show the task status.");
Subparser showTasksParser = subParsers.addParser("showTasks").help("Show many coordinator tasks. By default, all tasks are shown, but " + "command-line options can be specified as filters.");
addTargetArgument(showTasksParser);
addJsonArgument(showTasksParser);
MutuallyExclusiveGroup idGroup = showTasksParser.addMutuallyExclusiveGroup();
idGroup.addArgument("--id", "-i").action(append()).type(String.class).dest("taskIds").metavar("TASK_IDS").help("Show only this task ID. This option may be specified multiple times.");
idGroup.addArgument("--id-pattern").action(store()).type(String.class).dest("taskIdPattern").metavar("TASK_ID_PATTERN").help("Only display tasks which match the given ID pattern.");
showTasksParser.addArgument("--state", "-s").type(TaskStateType.class).dest("taskStateType").metavar("TASK_STATE_TYPE").help("Show only tasks in this state.");
Subparser createTaskParser = subParsers.addParser("createTask").help("Create a new task.");
addTargetArgument(createTaskParser);
createTaskParser.addArgument("--id", "-i").action(store()).required(true).type(String.class).dest("taskId").metavar("TASK_ID").help("The task ID to create.");
createTaskParser.addArgument("--spec", "-s").action(store()).required(true).type(String.class).dest("taskSpec").metavar("TASK_SPEC").help("The task spec to create, or a path to a file containing the task spec.");
Subparser stopTaskParser = subParsers.addParser("stopTask").help("Stop a task.");
addTargetArgument(stopTaskParser);
stopTaskParser.addArgument("--id", "-i").action(store()).required(true).type(String.class).dest("taskId").metavar("TASK_ID").help("The task ID to create.");
Subparser destroyTaskParser = subParsers.addParser("destroyTask").help("Destroy a task.");
addTargetArgument(destroyTaskParser);
destroyTaskParser.addArgument("--id", "-i").action(store()).required(true).type(String.class).dest("taskId").metavar("TASK_ID").help("The task ID to destroy.");
Subparser shutdownParser = subParsers.addParser("shutdown").help("Shut down the coordinator.");
addTargetArgument(shutdownParser);
Namespace res = rootParser.parseArgsOrFail(args);
String target = res.getString("target");
CoordinatorClient client = new Builder().maxTries(3).target(target).build();
ZoneOffset localOffset = OffsetDateTime.now().getOffset();
switch(res.getString("command")) {
case "uptime":
{
UptimeResponse uptime = client.uptime();
if (res.getBoolean("json")) {
System.out.println(JsonUtil.toJsonString(uptime));
} else {
System.out.printf("Coordinator is running at %s.%n", target);
System.out.printf("\tStart time: %s%n", dateString(uptime.serverStartMs(), localOffset));
System.out.printf("\tCurrent server time: %s%n", dateString(uptime.nowMs(), localOffset));
System.out.printf("\tUptime: %s%n", durationString(uptime.nowMs() - uptime.serverStartMs()));
}
break;
}
case "status":
{
CoordinatorStatusResponse response = client.status();
if (res.getBoolean("json")) {
System.out.println(JsonUtil.toJsonString(response));
} else {
System.out.printf("Coordinator is running at %s.%n", target);
System.out.printf("\tStart time: %s%n", dateString(response.serverStartMs(), localOffset));
}
break;
}
case "showTask":
{
String taskId = res.getString("taskId");
TaskRequest req = new TaskRequest(taskId);
TaskState taskState = null;
try {
taskState = client.task(req);
} catch (NotFoundException e) {
System.out.printf("Task %s was not found.%n", taskId);
Exit.exit(1);
}
if (res.getBoolean("json")) {
System.out.println(JsonUtil.toJsonString(taskState));
} else {
System.out.printf("Task %s of type %s is %s. %s%n", taskId, taskState.spec().getClass().getCanonicalName(), taskState.stateType(), prettyPrintTaskInfo(taskState, localOffset));
if (taskState instanceof TaskDone) {
TaskDone taskDone = (TaskDone) taskState;
if ((taskDone.error() != null) && (!taskDone.error().isEmpty())) {
System.out.printf("Error: %s%n", taskDone.error());
}
}
if (res.getBoolean("verbose")) {
System.out.printf("Spec: %s%n%n", JsonUtil.toPrettyJsonString(taskState.spec()));
}
if (res.getBoolean("verbose") || res.getBoolean("showStatus")) {
System.out.printf("Status: %s%n%n", JsonUtil.toPrettyJsonString(taskState.status()));
}
}
break;
}
case "showTasks":
{
TaskStateType taskStateType = res.<TaskStateType>get("taskStateType");
List<String> taskIds = new ArrayList<>();
Pattern taskIdPattern = null;
if (res.getList("taskIds") != null) {
for (Object taskId : res.getList("taskIds")) {
taskIds.add((String) taskId);
}
} else if (res.getString("taskIdPattern") != null) {
try {
taskIdPattern = Pattern.compile(res.getString("taskIdPattern"));
} catch (PatternSyntaxException e) {
System.out.println("Invalid task ID regular expression " + res.getString("taskIdPattern"));
e.printStackTrace();
Exit.exit(1);
}
}
TasksRequest req = new TasksRequest(taskIds, 0, 0, 0, 0, Optional.ofNullable(taskStateType));
TasksResponse response = client.tasks(req);
if (taskIdPattern != null) {
TreeMap<String, TaskState> filteredTasks = new TreeMap<>();
for (Map.Entry<String, TaskState> entry : response.tasks().entrySet()) {
if (taskIdPattern.matcher(entry.getKey()).matches()) {
filteredTasks.put(entry.getKey(), entry.getValue());
}
}
response = new TasksResponse(filteredTasks);
}
if (res.getBoolean("json")) {
System.out.println(JsonUtil.toJsonString(response));
} else {
System.out.println(prettyPrintTasksResponse(response, localOffset));
}
if (response.tasks().isEmpty()) {
Exit.exit(1);
}
break;
}
case "createTask":
{
String taskId = res.getString("taskId");
TaskSpec taskSpec = JsonUtil.objectFromCommandLineArgument(res.getString("taskSpec"), TaskSpec.class);
CreateTaskRequest req = new CreateTaskRequest(taskId, taskSpec);
try {
client.createTask(req);
System.out.printf("Sent CreateTaskRequest for task %s.%n", req.id());
} catch (RequestConflictException rce) {
System.out.printf("CreateTaskRequest for task %s got a 409 status code - " + "a task with the same ID but a different specification already exists.%nException: %s%n", req.id(), rce.getMessage());
Exit.exit(1);
}
break;
}
case "stopTask":
{
String taskId = res.getString("taskId");
StopTaskRequest req = new StopTaskRequest(taskId);
client.stopTask(req);
System.out.printf("Sent StopTaskRequest for task %s.%n", taskId);
break;
}
case "destroyTask":
{
String taskId = res.getString("taskId");
DestroyTaskRequest req = new DestroyTaskRequest(taskId);
client.destroyTask(req);
System.out.printf("Sent DestroyTaskRequest for task %s.%n", taskId);
break;
}
case "shutdown":
{
client.shutdown();
System.out.println("Sent ShutdownRequest.");
break;
}
default:
{
System.out.println("You must choose an action. Type --help for help.");
Exit.exit(1);
}
}
}
use of org.apache.kafka.trogdor.rest.TasksResponse in project kafka by apache.
the class ExpectedTasks method waitFor.
public ExpectedTasks waitFor(final CoordinatorClient client) throws InterruptedException {
TestUtils.waitForCondition(() -> {
TasksResponse tasks = null;
try {
tasks = client.tasks(new TasksRequest(null, 0, 0, 0, 0, Optional.empty()));
} catch (Exception e) {
log.info("Unable to get coordinator tasks", e);
throw new RuntimeException(e);
}
StringBuilder errors = new StringBuilder();
for (Map.Entry<String, ExpectedTask> entry : expected.entrySet()) {
String id = entry.getKey();
ExpectedTask task = entry.getValue();
String differences = task.compare(tasks.tasks().get(id));
if (differences != null) {
errors.append(differences);
}
}
String errorString = errors.toString();
if (!errorString.isEmpty()) {
log.info("EXPECTED TASKS: {}", JsonUtil.toJsonString(expected));
log.info("ACTUAL TASKS : {}", JsonUtil.toJsonString(tasks.tasks()));
log.info(errorString);
return false;
}
return true;
}, "Timed out waiting for expected tasks " + JsonUtil.toJsonString(expected));
return this;
}
use of org.apache.kafka.trogdor.rest.TasksResponse in project apache-kafka-on-k8s by banzaicloud.
the class JsonSerializationTest method testDeserializationDoesNotProduceNulls.
@Test
public void testDeserializationDoesNotProduceNulls() throws Exception {
verify(new FilesUnreadableFaultSpec(0, 0, null, null, null, 0));
verify(new Kibosh.KiboshControlFile(null));
verify(new NetworkPartitionFaultSpec(0, 0, null));
verify(new ProcessStopFaultSpec(0, 0, null, null));
verify(new AgentStatusResponse(0, null));
verify(new TasksResponse(null));
verify(new WorkerDone(null, 0, 0, null, null));
verify(new WorkerRunning(null, 0, null));
verify(new WorkerStopping(null, 0, null));
verify(new ProduceBenchSpec(0, 0, null, null, 0, 0, null, null, null, 0, 0, "test-topic", 1, (short) 3));
verify(new RoundTripWorkloadSpec(0, 0, null, null, 0, null, null, 0));
verify(new SampleTaskSpec(0, 0, 0, null));
}
Aggregations