use of org.apache.hadoop.ozone.protocol.commands.CommandStatus in project ozone by apache.
the class TestReportPublisher method testCommandStatusPublisher.
@Test
public void testCommandStatusPublisher() throws InterruptedException {
StateContext dummyContext = Mockito.mock(StateContext.class);
ReportPublisher publisher = new CommandStatusReportPublisher();
final Map<Long, CommandStatus> cmdStatusMap = new ConcurrentHashMap<>();
when(dummyContext.getCommandStatusMap()).thenReturn(cmdStatusMap);
publisher.setConf(config);
ScheduledExecutorService executorService = HadoopExecutors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Unit test ReportManager Thread - %d").build());
publisher.init(dummyContext, executorService);
Assert.assertNull(((CommandStatusReportPublisher) publisher).getReport());
// Insert to status object to state context map and then get the report.
CommandStatus obj1 = CommandStatus.CommandStatusBuilder.newBuilder().setCmdId(HddsIdFactory.getLongId()).setType(Type.deleteBlocksCommand).setStatus(Status.PENDING).build();
CommandStatus obj2 = CommandStatus.CommandStatusBuilder.newBuilder().setCmdId(HddsIdFactory.getLongId()).setType(Type.closeContainerCommand).setStatus(Status.EXECUTED).build();
cmdStatusMap.put(obj1.getCmdId(), obj1);
cmdStatusMap.put(obj2.getCmdId(), obj2);
// We are not sending the commands whose status is PENDING.
Assert.assertEquals("Should publish report with 2 status objects", 1, ((CommandStatusReportPublisher) publisher).getReport().getCmdStatusCount());
executorService.shutdown();
}
use of org.apache.hadoop.ozone.protocol.commands.CommandStatus in project ozone by apache.
the class DeleteBlocksCommandHandler method processCmd.
private void processCmd(DeleteCmdInfo cmd) {
LOG.debug("Processing block deletion command.");
ContainerBlocksDeletionACKProto blockDeletionACK = null;
long startTime = Time.monotonicNow();
boolean cmdExecuted = false;
try {
// move blocks to deleting state.
// this is a metadata update, the actual deletion happens in another
// recycling thread.
List<DeletedBlocksTransaction> containerBlocks = cmd.getCmd().blocksTobeDeleted();
DeletedContainerBlocksSummary summary = DeletedContainerBlocksSummary.getFrom(containerBlocks);
LOG.info("Start to delete container blocks, TXIDs={}, " + "numOfContainers={}, numOfBlocks={}", summary.getTxIDSummary(), summary.getNumOfContainers(), summary.getNumOfBlocks());
ContainerBlocksDeletionACKProto.Builder resultBuilder = ContainerBlocksDeletionACKProto.newBuilder();
List<Future> futures = new ArrayList<>();
for (int i = 0; i < containerBlocks.size(); i++) {
DeletedBlocksTransaction tx = containerBlocks.get(i);
Future future = executor.submit(new ProcessTransactionTask(tx, resultBuilder));
futures.add(future);
}
// Wait for tasks to finish
futures.forEach(f -> {
try {
f.get();
} catch (InterruptedException | ExecutionException e) {
LOG.error("task failed.", e);
Thread.currentThread().interrupt();
}
});
resultBuilder.setDnId(cmd.getContext().getParent().getDatanodeDetails().getUuid().toString());
blockDeletionACK = resultBuilder.build();
// TODO Or we should wait until the blocks are actually deleted?
if (!containerBlocks.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Sending following block deletion ACK to SCM");
for (DeleteBlockTransactionResult result : blockDeletionACK.getResultsList()) {
LOG.debug("{} : {}", result.getTxID(), result.getSuccess());
}
}
}
cmdExecuted = true;
} finally {
final ContainerBlocksDeletionACKProto deleteAck = blockDeletionACK;
final boolean status = cmdExecuted;
Consumer<CommandStatus> statusUpdater = (cmdStatus) -> {
cmdStatus.setStatus(status);
((DeleteBlockCommandStatus) cmdStatus).setBlocksDeletionAck(deleteAck);
};
updateCommandStatus(cmd.getContext(), cmd.getCmd(), statusUpdater, LOG);
long endTime = Time.monotonicNow();
totalTime += endTime - startTime;
invocationCount++;
}
}
use of org.apache.hadoop.ozone.protocol.commands.CommandStatus in project ozone by apache.
the class CommandStatusReportPublisher method getReport.
@Override
protected CommandStatusReportsProto getReport() {
Map<Long, CommandStatus> map = this.getContext().getCommandStatusMap();
Iterator<Long> iterator = map.keySet().iterator();
CommandStatusReportsProto.Builder builder = CommandStatusReportsProto.newBuilder();
iterator.forEachRemaining(key -> {
CommandStatus cmdStatus = map.get(key);
// CommandHandler will change its status when it works on this command.
if (!cmdStatus.getStatus().equals(Status.PENDING)) {
builder.addCmdStatus(cmdStatus.getProtoBufMessage());
map.remove(key);
}
});
return builder.getCmdStatusCount() > 0 ? builder.build() : null;
}
use of org.apache.hadoop.ozone.protocol.commands.CommandStatus in project ozone by apache.
the class TestEndPoint method testHeartbeatWithCommandStatusReport.
@Test
public void testHeartbeatWithCommandStatusReport() throws Exception {
DatanodeDetails dataNode = randomDatanodeDetails();
try (EndpointStateMachine rpcEndPoint = createEndpoint(SCMTestUtils.getConf(), serverAddress, 1000)) {
// Add some scmCommands for heartbeat response
addScmCommands();
SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder().setDatanodeDetails(dataNode.getProtoBufMessage()).setNodeReport(HddsTestUtils.createNodeReport(Arrays.asList(getStorageReports(dataNode.getUuid())), Arrays.asList(getMetadataStorageReports(dataNode.getUuid())))).build();
SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint().sendHeartbeat(request);
assertNotNull(responseProto);
assertEquals(3, responseProto.getCommandsCount());
assertEquals(0, scmServerImpl.getCommandStatusReportCount());
// Send heartbeat again from heartbeat endpoint task
final StateContext stateContext = heartbeatTaskHelper(serverAddress, 3000);
Map<Long, CommandStatus> map = stateContext.getCommandStatusMap();
assertNotNull(map);
assertEquals("Should have 1 objects", 1, map.size());
assertTrue(map.containsKey(3L));
assertEquals(Type.deleteBlocksCommand, map.get(3L).getType());
assertEquals(Status.PENDING, map.get(3L).getStatus());
scmServerImpl.clearScmCommandRequests();
}
}
Aggregations