use of org.apache.hadoop.yarn.util.Clock in project hadoop by apache.
the class TestYarnClient method testReservationDelete.
@Test
public void testReservationDelete() throws Exception {
MiniYARNCluster cluster = setupMiniYARNCluster();
YarnClient client = setupYarnClient(cluster);
try {
Clock clock = new UTCClock();
long arrival = clock.getTime();
long duration = 60000;
long deadline = (long) (arrival + 1.05 * duration);
ReservationSubmissionRequest sRequest = submitReservationTestHelper(client, arrival, deadline, duration);
ReservationId reservationID = sRequest.getReservationId();
// Delete the reservation
ReservationDeleteRequest dRequest = ReservationDeleteRequest.newInstance(reservationID);
ReservationDeleteResponse dResponse = client.deleteReservation(dRequest);
Assert.assertNotNull(dResponse);
System.out.println("Delete reservation response: " + dResponse);
// List reservations, search by non-existent reservationID
ReservationListRequest request = ReservationListRequest.newInstance(ReservationSystemTestUtil.reservationQ, reservationID.toString(), -1, -1, false);
ReservationListResponse response = client.listReservations(request);
Assert.assertNotNull(response);
Assert.assertEquals(0, response.getReservationAllocationState().size());
} finally {
// clean-up
if (client != null) {
client.stop();
}
cluster.stop();
}
}
use of org.apache.hadoop.yarn.util.Clock in project hadoop by apache.
the class TestYarnClient method testListReservationsByTimeIntervalContainingNoReservations.
@Test
public void testListReservationsByTimeIntervalContainingNoReservations() throws Exception {
MiniYARNCluster cluster = setupMiniYARNCluster();
YarnClient client = setupYarnClient(cluster);
try {
Clock clock = new UTCClock();
long arrival = clock.getTime();
long duration = 60000;
long deadline = (long) (arrival + 1.05 * duration);
ReservationSubmissionRequest sRequest = submitReservationTestHelper(client, arrival, deadline, duration);
// List reservations, search by very large start time.
ReservationListRequest request = ReservationListRequest.newInstance(ReservationSystemTestUtil.reservationQ, "", Long.MAX_VALUE, -1, false);
ReservationListResponse response = client.listReservations(request);
// Ensure all reservations are filtered out.
Assert.assertNotNull(response);
Assert.assertEquals(response.getReservationAllocationState().size(), 0);
duration = 30000;
deadline = sRequest.getReservationDefinition().getDeadline();
// List reservations, search by start time after the reservation
// end time.
request = ReservationListRequest.newInstance(ReservationSystemTestUtil.reservationQ, "", deadline + duration, deadline + 2 * duration, false);
response = client.listReservations(request);
// Ensure all reservations are filtered out.
Assert.assertNotNull(response);
Assert.assertEquals(response.getReservationAllocationState().size(), 0);
arrival = clock.getTime();
// List reservations, search by end time before the reservation start
// time.
request = ReservationListRequest.newInstance(ReservationSystemTestUtil.reservationQ, "", 0, arrival - duration, false);
response = client.listReservations(request);
// Ensure all reservations are filtered out.
Assert.assertNotNull(response);
Assert.assertEquals(response.getReservationAllocationState().size(), 0);
// List reservations, search by very small end time.
request = ReservationListRequest.newInstance(ReservationSystemTestUtil.reservationQ, "", 0, 1, false);
response = client.listReservations(request);
// Ensure all reservations are filtered out.
Assert.assertNotNull(response);
Assert.assertEquals(response.getReservationAllocationState().size(), 0);
} finally {
// clean-up
if (client != null) {
client.stop();
}
cluster.stop();
}
}
use of org.apache.hadoop.yarn.util.Clock in project hadoop by apache.
the class TestYarnClient method testUpdateReservation.
@Test
public void testUpdateReservation() throws Exception {
MiniYARNCluster cluster = setupMiniYARNCluster();
YarnClient client = setupYarnClient(cluster);
try {
Clock clock = new UTCClock();
long arrival = clock.getTime();
long duration = 60000;
long deadline = (long) (arrival + 1.05 * duration);
ReservationSubmissionRequest sRequest = submitReservationTestHelper(client, arrival, deadline, duration);
ReservationDefinition rDef = sRequest.getReservationDefinition();
ReservationRequest rr = rDef.getReservationRequests().getReservationResources().get(0);
ReservationId reservationID = sRequest.getReservationId();
rr.setNumContainers(5);
arrival = clock.getTime();
duration = 30000;
deadline = (long) (arrival + 1.05 * duration);
rr.setDuration(duration);
rDef.setArrival(arrival);
rDef.setDeadline(deadline);
ReservationUpdateRequest uRequest = ReservationUpdateRequest.newInstance(rDef, reservationID);
ReservationUpdateResponse uResponse = client.updateReservation(uRequest);
Assert.assertNotNull(uResponse);
System.out.println("Update reservation response: " + uResponse);
} finally {
// clean-up
if (client != null) {
client.stop();
}
cluster.stop();
}
}
use of org.apache.hadoop.yarn.util.Clock in project apex-core by apache.
the class CheckpointTest method testUpdateRecoveryCheckpoint.
@Test
public void testUpdateRecoveryCheckpoint() throws Exception {
Clock clock = new SystemClock();
dag.setAttribute(com.datatorrent.api.Context.OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
GenericTestOperator o3SL = dag.addOperator("o3SL", StatelessOperator.class);
dag.addStream("o1.output1", o1.outport1, o2.inport1);
dag.addStream("o2.output1", o2.outport1, o3SL.inport1);
StreamingContainerManager dnm = new StreamingContainerManager(dag);
PhysicalPlan plan = dnm.getPhysicalPlan();
for (PTOperator oper : plan.getAllOperators().values()) {
Assert.assertEquals("activation windowId " + oper, Checkpoint.INITIAL_CHECKPOINT, oper.getRecoveryCheckpoint());
Assert.assertEquals("checkpoints " + oper, Collections.emptyList(), oper.checkpoints);
}
List<PTOperator> nodes1 = plan.getOperators(dag.getMeta(o1));
Assert.assertNotNull(nodes1);
Assert.assertEquals(1, nodes1.size());
PTOperator o1p1 = nodes1.get(0);
PTOperator o2p1 = plan.getOperators(dag.getMeta(o2)).get(0);
PTOperator o3SLp1 = plan.getOperators(dag.getMeta(o3SL)).get(0);
// recovery checkpoint won't update in deploy state
for (PTOperator oper : plan.getAllOperators().values()) {
Assert.assertEquals("", PTOperator.State.PENDING_DEPLOY, oper.getState());
}
dnm.updateRecoveryCheckpoints(o2p1, new UpdateCheckpointsContext(clock), false);
Assert.assertEquals("no checkpoints " + o2p1, Checkpoint.INITIAL_CHECKPOINT, o2p1.getRecoveryCheckpoint());
UpdateCheckpointsContext ctx = new UpdateCheckpointsContext(clock);
dnm.updateRecoveryCheckpoints(o1p1, ctx, false);
Assert.assertEquals("no checkpoints " + o1p1, Checkpoint.INITIAL_CHECKPOINT, o1p1.getRecoveryCheckpoint());
Assert.assertEquals("number dependencies " + ctx.visited, 3, ctx.visited.size());
// adding checkpoints to upstream only does not move recovery checkpoint
Checkpoint cp3 = new Checkpoint(3L, 0, 0);
Checkpoint cp5 = new Checkpoint(5L, 0, 0);
Checkpoint cp4 = new Checkpoint(4L, 0, 0);
o1p1.checkpoints.add(cp3);
o1p1.checkpoints.add(cp5);
dnm.updateRecoveryCheckpoints(o1p1, new UpdateCheckpointsContext(clock), false);
Assert.assertEquals("checkpoint " + o1p1, Checkpoint.INITIAL_CHECKPOINT, o1p1.getRecoveryCheckpoint());
o2p1.checkpoints.add(new Checkpoint(3L, 0, 0));
dnm.updateRecoveryCheckpoints(o1p1, new UpdateCheckpointsContext(clock), false);
Assert.assertEquals("checkpoint " + o1p1, Checkpoint.INITIAL_CHECKPOINT, o1p1.getRecoveryCheckpoint());
Assert.assertEquals("checkpoint " + o2p1, Checkpoint.INITIAL_CHECKPOINT, o2p1.getRecoveryCheckpoint());
// set leaf operator checkpoint
dnm.addCheckpoint(o3SLp1, cp5);
dnm.updateRecoveryCheckpoints(o1p1, new UpdateCheckpointsContext(clock), false);
Assert.assertEquals("checkpoint " + o1p1, Checkpoint.INITIAL_CHECKPOINT, o1p1.getRecoveryCheckpoint());
Assert.assertEquals("checkpoint " + o2p1, Checkpoint.INITIAL_CHECKPOINT, o2p1.getRecoveryCheckpoint());
// set all operators as active to enable recovery window id update
for (PTOperator oper : plan.getAllOperators().values()) {
oper.setState(PTOperator.State.ACTIVE);
}
dnm.updateRecoveryCheckpoints(o1p1, new UpdateCheckpointsContext(clock), false);
Assert.assertEquals("checkpoint " + o1p1, cp3, o1p1.getRecoveryCheckpoint());
Assert.assertEquals("checkpoint " + o2p1, cp3, o1p1.getRecoveryCheckpoint());
Assert.assertEquals("checkpoint " + o3SLp1, cp5, o3SLp1.getRecoveryCheckpoint());
Assert.assertNull("checkpoint null for stateless operator " + o3SLp1, o3SLp1.stats.checkpointStats);
o2p1.checkpoints.add(cp4);
dnm.updateRecoveryCheckpoints(o1p1, new UpdateCheckpointsContext(clock), false);
Assert.assertEquals("checkpoint " + o1p1, cp3, o1p1.getRecoveryCheckpoint());
Assert.assertEquals("checkpoint " + o2p1, cp4, o2p1.getRecoveryCheckpoint());
o1p1.checkpoints.add(1, cp4);
Assert.assertEquals(o1p1.checkpoints, getCheckpoints(3L, 4L, 5L));
dnm.updateRecoveryCheckpoints(o1p1, new UpdateCheckpointsContext(clock), false);
Assert.assertEquals("checkpoint " + o1p1, cp4, o1p1.getRecoveryCheckpoint());
Assert.assertEquals(o1p1.checkpoints, getCheckpoints(4L, 5L));
// out of sequence windowIds should be sorted
dnm.addCheckpoint(o2p1, new Checkpoint(2L, 0, 0));
Assert.assertEquals("add first", getCheckpoints(2L, 4L), o2p1.checkpoints);
dnm.addCheckpoint(o2p1, new Checkpoint(3L, 0, 0));
Assert.assertEquals("add middle", getCheckpoints(2L, 3L, 4L), o2p1.checkpoints);
dnm.addCheckpoint(o2p1, new Checkpoint(4L, 0, 0));
Assert.assertEquals("ignore duplicate", getCheckpoints(2L, 3L, 4L), o2p1.checkpoints);
dnm.addCheckpoint(o2p1, new Checkpoint(5L, 0, 0));
Assert.assertEquals("add latest", getCheckpoints(2L, 3L, 4L, 5L), o2p1.checkpoints);
}
use of org.apache.hadoop.yarn.util.Clock in project apex-core by apache.
the class CheckpointTest method testUpdateRecoveryCheckpointWithCycle.
@Test
public void testUpdateRecoveryCheckpointWithCycle() throws Exception {
Clock clock = new SystemClock();
dag.setAttribute(com.datatorrent.api.Context.OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
// Simulate a DAG with a loop which has a unifier operator
TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);
GenericTestOperator o4 = dag.addOperator("o4", GenericTestOperator.class);
DefaultDelayOperator d = dag.addOperator("d", DefaultDelayOperator.class);
dag.addStream("o1.output1", o1.outport, o2.inport1);
dag.addStream("o2.output1", o2.outport1, o3.inport1);
dag.addStream("o3.output1", o3.outport1, o4.inport1);
dag.addStream("o4.output1", o4.outport1, d.input);
dag.addStream("d.output", d.output, o2.inport2);
dag.setOperatorAttribute(o3, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<Operator>(2));
dag.validate();
StreamingContainerManager dnm = new StreamingContainerManager(dag);
PhysicalPlan plan = dnm.getPhysicalPlan();
for (PTOperator oper : plan.getAllOperators().values()) {
Assert.assertEquals("Initial activation windowId" + oper, Checkpoint.INITIAL_CHECKPOINT, oper.getRecoveryCheckpoint());
Assert.assertEquals("Checkpoints empty" + oper, Collections.emptyList(), oper.checkpoints);
}
Checkpoint cp1 = new Checkpoint(1L, 0, 0);
Checkpoint cp2 = new Checkpoint(2L, 0, 0);
Map<OperatorMeta, Set<OperatorMeta>> checkpointGroups = dnm.getCheckpointGroups();
Map<Integer, PTOperator> allOperators = plan.getAllOperators();
for (PTOperator operator : allOperators.values()) {
operator.setState(PTOperator.State.ACTIVE);
operator.checkpoints.add(cp1);
dnm.updateRecoveryCheckpoints(operator, new UpdateCheckpointsContext(clock, false, checkpointGroups), false);
}
List<PTOperator> physicalO1 = plan.getOperators(dag.getOperatorMeta("o1"));
physicalO1.get(0).checkpoints.add(cp2);
dnm.updateRecoveryCheckpoints(physicalO1.get(0), new UpdateCheckpointsContext(clock, false, checkpointGroups), false);
Assert.assertEquals("Recovery checkpoint updated ", physicalO1.get(0).getRecoveryCheckpoint(), cp1);
}
Aggregations