use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class ChunkKeyHandler method execute.
@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException {
containerOperationClient = new ContainerOperationClient(createOzoneConfiguration());
xceiverClientManager = containerOperationClient.getXceiverClientManager();
ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient();
address.ensureKeyAddress();
JsonElement element;
JsonObject result = new JsonObject();
String volumeName = address.getVolumeName();
String bucketName = address.getBucketName();
String keyName = address.getKeyName();
List<ContainerProtos.ChunkInfo> tempchunks = null;
List<ChunkDetails> chunkDetailsList = new ArrayList<ChunkDetails>();
HashSet<String> chunkPaths = new HashSet<>();
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).build();
OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
// querying the keyLocations.The OM is queried to get containerID and
// localID pertaining to a given key
List<OmKeyLocationInfo> locationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
// for zero-sized key
if (locationInfos.isEmpty()) {
System.out.println("No Key Locations Found");
return;
}
ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion.getConfiguredVersion(getConf());
JsonArray responseArrayList = new JsonArray();
for (OmKeyLocationInfo keyLocation : locationInfos) {
ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo();
ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo();
long containerId = keyLocation.getContainerID();
chunkPaths.clear();
Pipeline pipeline = keyLocation.getPipeline();
if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
pipeline = Pipeline.newBuilder(pipeline).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build();
}
xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline);
// Datanode is queried to get chunk information.Thus querying the
// OM,SCM and datanode helps us get chunk location information
ContainerProtos.DatanodeBlockID datanodeBlockID = keyLocation.getBlockID().getDatanodeBlockIDProtobuf();
// doing a getBlock on all nodes
HashMap<DatanodeDetails, ContainerProtos.GetBlockResponseProto> responses = null;
try {
responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, datanodeBlockID, keyLocation.getToken());
} catch (InterruptedException e) {
LOG.error("Execution interrupted due to " + e);
Thread.currentThread().interrupt();
}
JsonArray responseFromAllNodes = new JsonArray();
for (Map.Entry<DatanodeDetails, ContainerProtos.GetBlockResponseProto> entry : responses.entrySet()) {
JsonObject jsonObj = new JsonObject();
if (entry.getValue() == null) {
LOG.error("Cant execute getBlock on this node");
continue;
}
tempchunks = entry.getValue().getBlockData().getChunksList();
ContainerProtos.ContainerDataProto containerData = containerOperationClient.readContainer(keyLocation.getContainerID(), keyLocation.getPipeline());
for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) {
String fileName = containerLayoutVersion.getChunkFile(new File(getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), ChunkInfo.getFromProtoBuf(chunkInfo)).toString();
chunkPaths.add(fileName);
ChunkDetails chunkDetails = new ChunkDetails();
chunkDetails.setChunkName(fileName);
chunkDetails.setChunkOffset(chunkInfo.getOffset());
chunkDetailsList.add(chunkDetails);
}
containerChunkInfoVerbose.setContainerPath(containerData.getContainerPath());
containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline());
containerChunkInfoVerbose.setChunkInfos(chunkDetailsList);
containerChunkInfo.setFiles(chunkPaths);
containerChunkInfo.setPipelineID(keyLocation.getPipeline().getId().getId());
Gson gson = new GsonBuilder().create();
if (isVerbose()) {
element = gson.toJsonTree(containerChunkInfoVerbose);
} else {
element = gson.toJsonTree(containerChunkInfo);
}
jsonObj.addProperty("Datanode-HostName", entry.getKey().getHostName());
jsonObj.addProperty("Datanode-IP", entry.getKey().getIpAddress());
jsonObj.addProperty("Container-ID", containerId);
jsonObj.addProperty("Block-ID", keyLocation.getLocalID());
jsonObj.add("Locations", element);
responseFromAllNodes.add(jsonObj);
xceiverClientManager.releaseClientForReadData(xceiverClient, false);
}
responseArrayList.add(responseFromAllNodes);
}
result.add("KeyLocations", responseArrayList);
Gson gson2 = new GsonBuilder().setPrettyPrinting().create();
String prettyJson = gson2.toJson(result);
System.out.println(prettyJson);
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class ContainerTestHelper method newWriteChunkRequestBuilder.
public static Builder newWriteChunkRequestBuilder(Pipeline pipeline, BlockID blockID, int datalen, int seq) throws IOException {
LOG.trace("writeChunk {} (blockID={}) to pipeline={}", datalen, blockID, pipeline);
ContainerProtos.WriteChunkRequestProto.Builder writeRequest = ContainerProtos.WriteChunkRequestProto.newBuilder();
writeRequest.setBlockID(blockID.getDatanodeBlockIDProtobuf());
ChunkBuffer data = getData(datalen);
ChunkInfo info = getChunk(blockID.getLocalID(), seq, 0, datalen);
setDataChecksum(info, data);
writeRequest.setChunkData(info.getProtoBufMessage());
writeRequest.setData(data.toByteString());
Builder request = ContainerCommandRequestProto.newBuilder();
request.setCmdType(ContainerProtos.Type.WriteChunk);
request.setContainerID(blockID.getContainerID());
request.setWriteChunk(writeRequest);
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
return request;
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class KeyValueHandler method handleReadChunk.
/**
* Handle Read Chunk operation. Calls ChunkManager to process the request.
*/
ContainerCommandResponseProto handleReadChunk(ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) {
if (!request.hasReadChunk()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Malformed Read Chunk request. trace ID: {}", request.getTraceID());
}
return malformedRequest(request);
}
ChunkBuffer data;
try {
BlockID blockID = BlockID.getFromProtobuf(request.getReadChunk().getBlockID());
ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(request.getReadChunk().getChunkData());
Preconditions.checkNotNull(chunkInfo);
checkContainerIsHealthy(kvContainer, blockID, Type.ReadChunk);
BlockUtils.verifyBCSId(kvContainer, blockID);
if (dispatcherContext == null) {
dispatcherContext = new DispatcherContext.Builder().build();
}
boolean isReadChunkV0 = getReadChunkVersion(request.getReadChunk()).equals(ContainerProtos.ReadChunkVersion.V0);
if (isReadChunkV0) {
// For older clients, set ReadDataIntoSingleBuffer to true so that
// all the data read from chunk file is returned as a single
// ByteString. Older clients cannot process data returned as a list
// of ByteStrings.
chunkInfo.setReadDataIntoSingleBuffer(true);
}
data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, dispatcherContext);
// For client reads, the client is expected to validate.
if (dispatcherContext.isReadFromTmpFile()) {
validateChunkChecksumData(data, chunkInfo);
}
metrics.incContainerBytesStats(Type.ReadChunk, chunkInfo.getLen());
} catch (StorageContainerException ex) {
return ContainerUtils.logAndReturnError(LOG, ex, request);
} catch (IOException ex) {
return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Read Chunk failed", ex, IO_EXCEPTION), request);
}
Preconditions.checkNotNull(data, "Chunk data is null");
return getReadChunkResponse(request, data, byteBufferToByteString);
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class KeyValueHandler method deleteBlock.
/**
* Called by BlockDeletingService to delete all the chunks in a block
* before proceeding to delete the block info from DB.
*/
@Override
public void deleteBlock(Container container, BlockData blockData) throws IOException {
chunkManager.deleteChunks(container, blockData);
if (LOG.isDebugEnabled()) {
for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
ChunkInfo info = ChunkInfo.getFromProtoBuf(chunkInfo);
LOG.debug("block {} chunk {} deleted", blockData.getBlockID(), info);
}
}
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class TestContainerPersistence method testPutBlockWithLotsOfChunks.
/**
* Tests a put block and read block.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
public void testPutBlockWithLotsOfChunks() throws IOException, NoSuchAlgorithmException {
final int chunkCount = 2;
final int datalen = 1024;
long totalSize = 0L;
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
List<ChunkInfo> chunkList = new LinkedList<>();
for (int x = 0; x < chunkCount; x++) {
ChunkInfo info = new ChunkInfo(String.format("%d.data", blockID.getLocalID()), x * datalen, datalen);
ChunkBuffer data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
totalSize += datalen;
chunkList.add(info);
}
long bytesUsed = container.getContainerData().getBytesUsed();
Assert.assertEquals(totalSize, bytesUsed);
long writeBytes = container.getContainerData().getWriteBytes();
Assert.assertEquals(chunkCount * datalen, writeBytes);
long readCount = container.getContainerData().getReadCount();
Assert.assertEquals(0, readCount);
long writeCount = container.getContainerData().getWriteCount();
Assert.assertEquals(chunkCount, writeCount);
BlockData blockData = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunkProtoList = new LinkedList<>();
for (ChunkInfo i : chunkList) {
chunkProtoList.add(i.getProtoBufMessage());
}
blockData.setChunks(chunkProtoList);
blockManager.putBlock(container, blockData);
BlockData readBlockData = blockManager.getBlock(container, blockData.getBlockID());
ChunkInfo lastChunk = chunkList.get(chunkList.size() - 1);
ChunkInfo readChunk = ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(readBlockData.getChunks().size() - 1));
Assert.assertEquals(lastChunk.getChecksumData(), readChunk.getChecksumData());
}
Aggregations