use of org.apache.kafka.trogdor.rest.TasksRequest in project kafka by apache.
the class CoordinatorTest method testTasksRequestMatches.
@Test
public void testTasksRequestMatches() throws Exception {
TasksRequest req1 = new TasksRequest(null, 0, 0, 0, 0, Optional.empty());
assertTrue(req1.matches("foo1", -1, -1, TaskStateType.PENDING));
assertTrue(req1.matches("bar1", 100, 200, TaskStateType.DONE));
assertTrue(req1.matches("baz1", 100, -1, TaskStateType.RUNNING));
TasksRequest req2 = new TasksRequest(null, 100, 0, 0, 0, Optional.empty());
assertFalse(req2.matches("foo1", -1, -1, TaskStateType.PENDING));
assertTrue(req2.matches("bar1", 100, 200, TaskStateType.DONE));
assertFalse(req2.matches("bar1", 99, 200, TaskStateType.DONE));
assertFalse(req2.matches("baz1", 99, -1, TaskStateType.RUNNING));
TasksRequest req3 = new TasksRequest(null, 200, 900, 200, 900, Optional.empty());
assertFalse(req3.matches("foo1", -1, -1, TaskStateType.PENDING));
assertFalse(req3.matches("bar1", 100, 200, TaskStateType.DONE));
assertFalse(req3.matches("bar1", 200, 1000, TaskStateType.DONE));
assertTrue(req3.matches("bar1", 200, 700, TaskStateType.DONE));
assertFalse(req3.matches("baz1", 101, -1, TaskStateType.RUNNING));
List<String> taskIds = new ArrayList<>();
taskIds.add("foo1");
taskIds.add("bar1");
taskIds.add("baz1");
TasksRequest req4 = new TasksRequest(taskIds, 1000, -1, -1, -1, Optional.empty());
assertFalse(req4.matches("foo1", -1, -1, TaskStateType.PENDING));
assertTrue(req4.matches("foo1", 1000, -1, TaskStateType.RUNNING));
assertFalse(req4.matches("foo1", 900, -1, TaskStateType.RUNNING));
assertFalse(req4.matches("baz2", 2000, -1, TaskStateType.RUNNING));
assertFalse(req4.matches("baz2", -1, -1, TaskStateType.PENDING));
TasksRequest req5 = new TasksRequest(null, 0, 0, 0, 0, Optional.of(TaskStateType.RUNNING));
assertTrue(req5.matches("foo1", -1, -1, TaskStateType.RUNNING));
assertFalse(req5.matches("bar1", -1, -1, TaskStateType.DONE));
assertFalse(req5.matches("baz1", -1, -1, TaskStateType.STOPPING));
assertFalse(req5.matches("baz1", -1, -1, TaskStateType.PENDING));
}
use of org.apache.kafka.trogdor.rest.TasksRequest in project kafka by apache.
the class CoordinatorClient method main.
public static void main(String[] args) throws Exception {
ArgumentParser rootParser = ArgumentParsers.newArgumentParser("trogdor-coordinator-client").description("The Trogdor coordinator client.");
Subparsers subParsers = rootParser.addSubparsers().dest("command");
Subparser uptimeParser = subParsers.addParser("uptime").help("Get the coordinator uptime.");
addTargetArgument(uptimeParser);
addJsonArgument(uptimeParser);
Subparser statusParser = subParsers.addParser("status").help("Get the coordinator status.");
addTargetArgument(statusParser);
addJsonArgument(statusParser);
Subparser showTaskParser = subParsers.addParser("showTask").help("Show a coordinator task.");
addTargetArgument(showTaskParser);
addJsonArgument(showTaskParser);
showTaskParser.addArgument("--id", "-i").action(store()).required(true).type(String.class).dest("taskId").metavar("TASK_ID").help("The task ID to show.");
showTaskParser.addArgument("--verbose", "-v").action(storeTrue()).dest("verbose").metavar("VERBOSE").help("Print out everything.");
showTaskParser.addArgument("--show-status", "-S").action(storeTrue()).dest("showStatus").metavar("SHOW_STATUS").help("Show the task status.");
Subparser showTasksParser = subParsers.addParser("showTasks").help("Show many coordinator tasks. By default, all tasks are shown, but " + "command-line options can be specified as filters.");
addTargetArgument(showTasksParser);
addJsonArgument(showTasksParser);
MutuallyExclusiveGroup idGroup = showTasksParser.addMutuallyExclusiveGroup();
idGroup.addArgument("--id", "-i").action(append()).type(String.class).dest("taskIds").metavar("TASK_IDS").help("Show only this task ID. This option may be specified multiple times.");
idGroup.addArgument("--id-pattern").action(store()).type(String.class).dest("taskIdPattern").metavar("TASK_ID_PATTERN").help("Only display tasks which match the given ID pattern.");
showTasksParser.addArgument("--state", "-s").type(TaskStateType.class).dest("taskStateType").metavar("TASK_STATE_TYPE").help("Show only tasks in this state.");
Subparser createTaskParser = subParsers.addParser("createTask").help("Create a new task.");
addTargetArgument(createTaskParser);
createTaskParser.addArgument("--id", "-i").action(store()).required(true).type(String.class).dest("taskId").metavar("TASK_ID").help("The task ID to create.");
createTaskParser.addArgument("--spec", "-s").action(store()).required(true).type(String.class).dest("taskSpec").metavar("TASK_SPEC").help("The task spec to create, or a path to a file containing the task spec.");
Subparser stopTaskParser = subParsers.addParser("stopTask").help("Stop a task.");
addTargetArgument(stopTaskParser);
stopTaskParser.addArgument("--id", "-i").action(store()).required(true).type(String.class).dest("taskId").metavar("TASK_ID").help("The task ID to create.");
Subparser destroyTaskParser = subParsers.addParser("destroyTask").help("Destroy a task.");
addTargetArgument(destroyTaskParser);
destroyTaskParser.addArgument("--id", "-i").action(store()).required(true).type(String.class).dest("taskId").metavar("TASK_ID").help("The task ID to destroy.");
Subparser shutdownParser = subParsers.addParser("shutdown").help("Shut down the coordinator.");
addTargetArgument(shutdownParser);
Namespace res = rootParser.parseArgsOrFail(args);
String target = res.getString("target");
CoordinatorClient client = new Builder().maxTries(3).target(target).build();
ZoneOffset localOffset = OffsetDateTime.now().getOffset();
switch(res.getString("command")) {
case "uptime":
{
UptimeResponse uptime = client.uptime();
if (res.getBoolean("json")) {
System.out.println(JsonUtil.toJsonString(uptime));
} else {
System.out.printf("Coordinator is running at %s.%n", target);
System.out.printf("\tStart time: %s%n", dateString(uptime.serverStartMs(), localOffset));
System.out.printf("\tCurrent server time: %s%n", dateString(uptime.nowMs(), localOffset));
System.out.printf("\tUptime: %s%n", durationString(uptime.nowMs() - uptime.serverStartMs()));
}
break;
}
case "status":
{
CoordinatorStatusResponse response = client.status();
if (res.getBoolean("json")) {
System.out.println(JsonUtil.toJsonString(response));
} else {
System.out.printf("Coordinator is running at %s.%n", target);
System.out.printf("\tStart time: %s%n", dateString(response.serverStartMs(), localOffset));
}
break;
}
case "showTask":
{
String taskId = res.getString("taskId");
TaskRequest req = new TaskRequest(taskId);
TaskState taskState = null;
try {
taskState = client.task(req);
} catch (NotFoundException e) {
System.out.printf("Task %s was not found.%n", taskId);
Exit.exit(1);
}
if (res.getBoolean("json")) {
System.out.println(JsonUtil.toJsonString(taskState));
} else {
System.out.printf("Task %s of type %s is %s. %s%n", taskId, taskState.spec().getClass().getCanonicalName(), taskState.stateType(), prettyPrintTaskInfo(taskState, localOffset));
if (taskState instanceof TaskDone) {
TaskDone taskDone = (TaskDone) taskState;
if ((taskDone.error() != null) && (!taskDone.error().isEmpty())) {
System.out.printf("Error: %s%n", taskDone.error());
}
}
if (res.getBoolean("verbose")) {
System.out.printf("Spec: %s%n%n", JsonUtil.toPrettyJsonString(taskState.spec()));
}
if (res.getBoolean("verbose") || res.getBoolean("showStatus")) {
System.out.printf("Status: %s%n%n", JsonUtil.toPrettyJsonString(taskState.status()));
}
}
break;
}
case "showTasks":
{
TaskStateType taskStateType = res.<TaskStateType>get("taskStateType");
List<String> taskIds = new ArrayList<>();
Pattern taskIdPattern = null;
if (res.getList("taskIds") != null) {
for (Object taskId : res.getList("taskIds")) {
taskIds.add((String) taskId);
}
} else if (res.getString("taskIdPattern") != null) {
try {
taskIdPattern = Pattern.compile(res.getString("taskIdPattern"));
} catch (PatternSyntaxException e) {
System.out.println("Invalid task ID regular expression " + res.getString("taskIdPattern"));
e.printStackTrace();
Exit.exit(1);
}
}
TasksRequest req = new TasksRequest(taskIds, 0, 0, 0, 0, Optional.ofNullable(taskStateType));
TasksResponse response = client.tasks(req);
if (taskIdPattern != null) {
TreeMap<String, TaskState> filteredTasks = new TreeMap<>();
for (Map.Entry<String, TaskState> entry : response.tasks().entrySet()) {
if (taskIdPattern.matcher(entry.getKey()).matches()) {
filteredTasks.put(entry.getKey(), entry.getValue());
}
}
response = new TasksResponse(filteredTasks);
}
if (res.getBoolean("json")) {
System.out.println(JsonUtil.toJsonString(response));
} else {
System.out.println(prettyPrintTasksResponse(response, localOffset));
}
if (response.tasks().isEmpty()) {
Exit.exit(1);
}
break;
}
case "createTask":
{
String taskId = res.getString("taskId");
TaskSpec taskSpec = JsonUtil.objectFromCommandLineArgument(res.getString("taskSpec"), TaskSpec.class);
CreateTaskRequest req = new CreateTaskRequest(taskId, taskSpec);
try {
client.createTask(req);
System.out.printf("Sent CreateTaskRequest for task %s.%n", req.id());
} catch (RequestConflictException rce) {
System.out.printf("CreateTaskRequest for task %s got a 409 status code - " + "a task with the same ID but a different specification already exists.%nException: %s%n", req.id(), rce.getMessage());
Exit.exit(1);
}
break;
}
case "stopTask":
{
String taskId = res.getString("taskId");
StopTaskRequest req = new StopTaskRequest(taskId);
client.stopTask(req);
System.out.printf("Sent StopTaskRequest for task %s.%n", taskId);
break;
}
case "destroyTask":
{
String taskId = res.getString("taskId");
DestroyTaskRequest req = new DestroyTaskRequest(taskId);
client.destroyTask(req);
System.out.printf("Sent DestroyTaskRequest for task %s.%n", taskId);
break;
}
case "shutdown":
{
client.shutdown();
System.out.println("Sent ShutdownRequest.");
break;
}
default:
{
System.out.println("You must choose an action. Type --help for help.");
Exit.exit(1);
}
}
}
use of org.apache.kafka.trogdor.rest.TasksRequest in project kafka by apache.
the class CoordinatorTest method testTasksRequest.
@Test
public void testTasksRequest() throws Exception {
MockTime time = new MockTime(0, 0, 0);
Scheduler scheduler = new MockScheduler(time);
try (MiniTrogdorCluster cluster = new MiniTrogdorCluster.Builder().addCoordinator("node01").addAgent("node02").scheduler(scheduler).build()) {
CoordinatorClient coordinatorClient = cluster.coordinatorClient();
new ExpectedTasks().waitFor(coordinatorClient);
NoOpTaskSpec fooSpec = new NoOpTaskSpec(1, 10);
NoOpTaskSpec barSpec = new NoOpTaskSpec(3, 1);
coordinatorClient.createTask(new CreateTaskRequest("foo", fooSpec));
coordinatorClient.createTask(new CreateTaskRequest("bar", barSpec));
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").taskState(new TaskPending(fooSpec)).build()).addTask(new ExpectedTaskBuilder("bar").taskState(new TaskPending(barSpec)).build()).waitFor(coordinatorClient);
assertEquals(0, coordinatorClient.tasks(new TasksRequest(null, 10, 0, 10, 0, Optional.empty())).tasks().size());
TasksResponse resp1 = coordinatorClient.tasks(new TasksRequest(Arrays.asList("foo", "baz"), 0, 0, 0, 0, Optional.empty()));
assertTrue(resp1.tasks().containsKey("foo"));
assertFalse(resp1.tasks().containsKey("bar"));
assertEquals(1, resp1.tasks().size());
time.sleep(2);
new ExpectedTasks().addTask(new ExpectedTaskBuilder("foo").taskState(new TaskRunning(fooSpec, 2, new TextNode("active"))).workerState(new WorkerRunning("foo", fooSpec, 2, new TextNode("active"))).build()).addTask(new ExpectedTaskBuilder("bar").taskState(new TaskPending(barSpec)).build()).waitFor(coordinatorClient).waitFor(cluster.agentClient("node02"));
TasksResponse resp2 = coordinatorClient.tasks(new TasksRequest(null, 1, 0, 0, 0, Optional.empty()));
assertTrue(resp2.tasks().containsKey("foo"));
assertFalse(resp2.tasks().containsKey("bar"));
assertEquals(1, resp2.tasks().size());
assertEquals(0, coordinatorClient.tasks(new TasksRequest(null, 3, 0, 0, 0, Optional.empty())).tasks().size());
}
}
use of org.apache.kafka.trogdor.rest.TasksRequest in project kafka by apache.
the class ExpectedTasks method waitFor.
public ExpectedTasks waitFor(final CoordinatorClient client) throws InterruptedException {
TestUtils.waitForCondition(() -> {
TasksResponse tasks = null;
try {
tasks = client.tasks(new TasksRequest(null, 0, 0, 0, 0, Optional.empty()));
} catch (Exception e) {
log.info("Unable to get coordinator tasks", e);
throw new RuntimeException(e);
}
StringBuilder errors = new StringBuilder();
for (Map.Entry<String, ExpectedTask> entry : expected.entrySet()) {
String id = entry.getKey();
ExpectedTask task = entry.getValue();
String differences = task.compare(tasks.tasks().get(id));
if (differences != null) {
errors.append(differences);
}
}
String errorString = errors.toString();
if (!errorString.isEmpty()) {
log.info("EXPECTED TASKS: {}", JsonUtil.toJsonString(expected));
log.info("ACTUAL TASKS : {}", JsonUtil.toJsonString(tasks.tasks()));
log.info(errorString);
return false;
}
return true;
}, "Timed out waiting for expected tasks " + JsonUtil.toJsonString(expected));
return this;
}
use of org.apache.kafka.trogdor.rest.TasksRequest in project kafka by apache.
the class CoordinatorRestResource method tasks.
@GET
@Path("/tasks/")
public Response tasks(@QueryParam("taskId") List<String> taskId, @DefaultValue("0") @QueryParam("firstStartMs") long firstStartMs, @DefaultValue("0") @QueryParam("lastStartMs") long lastStartMs, @DefaultValue("0") @QueryParam("firstEndMs") long firstEndMs, @DefaultValue("0") @QueryParam("lastEndMs") long lastEndMs, @DefaultValue("") @QueryParam("state") String state) throws Throwable {
boolean isEmptyState = state.equals("");
if (!isEmptyState && !TaskStateType.Constants.VALUES.contains(state)) {
return Response.status(400).entity(String.format("State %s is invalid. Must be one of %s", state, TaskStateType.Constants.VALUES)).build();
}
Optional<TaskStateType> givenState = Optional.ofNullable(isEmptyState ? null : TaskStateType.valueOf(state));
TasksResponse resp = coordinator().tasks(new TasksRequest(taskId, firstStartMs, lastStartMs, firstEndMs, lastEndMs, givenState));
return Response.status(200).entity(resp).build();
}
Aggregations