use of org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse in project ozone by apache.
the class ScmBlockLocationProtocolClientSideTranslatorPB method addSCM.
/**
* Request to add SCM to existing SCM HA group.
* @return status
* @throws IOException
*/
@Override
public boolean addSCM(AddSCMRequest request) throws IOException {
HddsProtos.AddScmRequestProto requestProto = request.getProtobuf();
HddsProtos.AddScmResponseProto resp;
SCMBlockLocationRequest wrapper = createSCMBlockRequest(Type.AddScm).setAddScmRequestProto(requestProto).build();
final SCMBlockLocationResponse wrappedResponse = handleError(submitRequest(wrapper));
resp = wrappedResponse.getAddScmResponse();
return resp.getSuccess();
}
use of org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse in project ozone by apache.
the class ScmBlockLocationProtocolClientSideTranslatorPB method getScmInfo.
/**
* Gets the cluster Id and Scm Id from SCM.
* @return ScmInfo
* @throws IOException
*/
@Override
public ScmInfo getScmInfo() throws IOException {
HddsProtos.GetScmInfoRequestProto request = HddsProtos.GetScmInfoRequestProto.getDefaultInstance();
HddsProtos.GetScmInfoResponseProto resp;
SCMBlockLocationRequest wrapper = createSCMBlockRequest(Type.GetScmInfo).setGetScmInfoRequest(request).build();
final SCMBlockLocationResponse wrappedResponse = handleError(submitRequest(wrapper));
resp = wrappedResponse.getGetScmInfoResponse();
ScmInfo.Builder builder = new ScmInfo.Builder().setClusterId(resp.getClusterId()).setScmId(resp.getScmId());
return builder.build();
}
use of org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse in project ozone by apache.
the class ScmBlockLocationProtocolClientSideTranslatorPB method deleteKeyBlocks.
/**
* Delete the set of keys specified.
*
* @param keyBlocksInfoList batch of block keys to delete.
* @return list of block deletion results.
* @throws IOException if there is any failure.
*/
@Override
public List<DeleteBlockGroupResult> deleteKeyBlocks(List<BlockGroup> keyBlocksInfoList) throws IOException {
List<KeyBlocks> keyBlocksProto = keyBlocksInfoList.stream().map(BlockGroup::getProto).collect(Collectors.toList());
DeleteScmKeyBlocksRequestProto request = DeleteScmKeyBlocksRequestProto.newBuilder().addAllKeyBlocks(keyBlocksProto).build();
SCMBlockLocationRequest wrapper = createSCMBlockRequest(Type.DeleteScmKeyBlocks).setDeleteScmKeyBlocksRequest(request).build();
final SCMBlockLocationResponse wrappedResponse = handleError(submitRequest(wrapper));
final DeleteScmKeyBlocksResponseProto resp = wrappedResponse.getDeleteScmKeyBlocksResponse();
List<DeleteBlockGroupResult> results = new ArrayList<>(resp.getResultsCount());
results.addAll(resp.getResultsList().stream().map(result -> new DeleteBlockGroupResult(result.getObjectKey(), DeleteBlockGroupResult.convertBlockResultProto(result.getBlockResultsList()))).collect(Collectors.toList()));
return results;
}
use of org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse in project ozone by apache.
the class ScmBlockLocationProtocolClientSideTranslatorPB method allocateBlock.
/**
* Asks SCM where a block should be allocated. SCM responds with the
* set of datanodes that should be used creating this block.
*
* @param size - size of the block.
* @param num - number of blocks.
* @param replicationConfig - replication configuration of the blocks.
* @param excludeList - exclude list while allocating blocks.
* @return allocated block accessing info (key, pipeline).
* @throws IOException
*/
@Override
public List<AllocatedBlock> allocateBlock(long size, int num, ReplicationConfig replicationConfig, String owner, ExcludeList excludeList) throws IOException {
Preconditions.checkArgument(size > 0, "block size must be greater than 0");
final AllocateScmBlockRequestProto.Builder requestBuilder = AllocateScmBlockRequestProto.newBuilder().setSize(size).setNumBlocks(num).setType(replicationConfig.getReplicationType()).setOwner(owner).setExcludeList(excludeList.getProtoBuf());
switch(replicationConfig.getReplicationType()) {
case STAND_ALONE:
requestBuilder.setFactor(((StandaloneReplicationConfig) replicationConfig).getReplicationFactor());
break;
case RATIS:
requestBuilder.setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor());
break;
default:
throw new IllegalArgumentException("Unsupported replication type " + replicationConfig.getReplicationType());
}
AllocateScmBlockRequestProto request = requestBuilder.build();
SCMBlockLocationRequest wrapper = createSCMBlockRequest(Type.AllocateScmBlock).setAllocateScmBlockRequest(request).build();
final SCMBlockLocationResponse wrappedResponse = handleError(submitRequest(wrapper));
final AllocateScmBlockResponseProto response = wrappedResponse.getAllocateScmBlockResponse();
List<AllocatedBlock> blocks = new ArrayList<>(response.getBlocksCount());
for (AllocateBlockResponse resp : response.getBlocksList()) {
AllocatedBlock.Builder builder = new AllocatedBlock.Builder().setContainerBlockID(ContainerBlockID.getFromProtobuf(resp.getContainerBlockID())).setPipeline(Pipeline.getFromProtobuf(resp.getPipeline()));
blocks.add(builder.build());
}
return blocks;
}
use of org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse in project ozone by apache.
the class ScmBlockLocationProtocolClientSideTranslatorPB method sortDatanodes.
/**
* Sort the datanodes based on distance from client.
* @return List<DatanodeDetails></>
* @throws IOException
*/
@Override
public List<DatanodeDetails> sortDatanodes(List<String> nodes, String clientMachine) throws IOException {
SortDatanodesRequestProto request = SortDatanodesRequestProto.newBuilder().addAllNodeNetworkName(nodes).setClient(clientMachine).build();
SCMBlockLocationRequest wrapper = createSCMBlockRequest(Type.SortDatanodes).setSortDatanodesRequest(request).build();
final SCMBlockLocationResponse wrappedResponse = handleError(submitRequest(wrapper));
SortDatanodesResponseProto resp = wrappedResponse.getSortDatanodesResponse();
List<DatanodeDetails> results = new ArrayList<>(resp.getNodeCount());
results.addAll(resp.getNodeList().stream().map(node -> DatanodeDetails.getFromProtoBuf(node)).collect(Collectors.toList()));
return results;
}
Aggregations