use of org.apache.hadoop.ozone.common.ChecksumData in project ozone by apache.
the class DatanodeChunkValidator method validateChunk.
private void validateChunk(long stepNo) throws Exception {
ContainerCommandRequestProto request = createReadChunkRequest(stepNo);
timer.time(() -> {
try {
ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
ChecksumData checksumOfChunk = computeChecksum(response);
if (!checksumReference.equals(checksumOfChunk)) {
throw new IllegalStateException("Reference (=first) message checksum doesn't match " + "with checksum of chunk " + response.getReadChunk().getChunkData().getChunkName());
}
} catch (IOException e) {
LOG.warn("Could not read chunk due to IOException: ", e);
}
});
}
use of org.apache.hadoop.ozone.common.ChecksumData in project ozone by apache.
the class KeyValueContainerCheck method verifyChecksum.
private static void verifyChecksum(BlockData block, ContainerProtos.ChunkInfo chunk, File chunkFile, ContainerLayoutVersion layout, DataTransferThrottler throttler, Canceler canceler) throws IOException {
ChecksumData checksumData = ChecksumData.getFromProtoBuf(chunk.getChecksumData());
int checksumCount = checksumData.getChecksums().size();
int bytesPerChecksum = checksumData.getBytesPerChecksum();
Checksum cal = new Checksum(checksumData.getChecksumType(), bytesPerChecksum);
ByteBuffer buffer = ByteBuffer.allocate(bytesPerChecksum);
long bytesRead = 0;
try (FileChannel channel = FileChannel.open(chunkFile.toPath(), ChunkUtils.READ_OPTIONS, ChunkUtils.NO_ATTRIBUTES)) {
if (layout == ContainerLayoutVersion.FILE_PER_BLOCK) {
channel.position(chunk.getOffset());
}
for (int i = 0; i < checksumCount; i++) {
// limit last read for FILE_PER_BLOCK, to avoid reading next chunk
if (layout == ContainerLayoutVersion.FILE_PER_BLOCK && i == checksumCount - 1 && chunk.getLen() % bytesPerChecksum != 0) {
buffer.limit((int) (chunk.getLen() % bytesPerChecksum));
}
int v = channel.read(buffer);
if (v == -1) {
break;
}
bytesRead += v;
buffer.flip();
throttler.throttle(v, canceler);
ByteString expected = checksumData.getChecksums().get(i);
ByteString actual = cal.computeChecksum(buffer).getChecksums().get(0);
if (!expected.equals(actual)) {
throw new OzoneChecksumException(String.format("Inconsistent read for chunk=%s" + " checksum item %d" + " expected checksum %s" + " actual checksum %s" + " for block %s", ChunkInfo.getFromProtoBuf(chunk), i, Arrays.toString(expected.toByteArray()), Arrays.toString(actual.toByteArray()), block.getBlockID()));
}
}
if (bytesRead != chunk.getLen()) {
throw new OzoneChecksumException(String.format("Inconsistent read for chunk=%s expected length=%d" + " actual length=%d for block %s", chunk.getChunkName(), chunk.getLen(), bytesRead, block.getBlockID()));
}
}
}
use of org.apache.hadoop.ozone.common.ChecksumData in project ozone by apache.
the class TestKeyValueContainerIntegrityChecks method createContainerWithBlocks.
/**
* Creates a container with normal and deleted blocks.
* First it will insert normal blocks, and then it will insert
* deleted blocks.
*/
protected KeyValueContainer createContainerWithBlocks(long containerId, int normalBlocks, int deletedBlocks) throws Exception {
String strBlock = "block";
String strChunk = "-chunkFile";
long totalBlocks = normalBlocks + deletedBlocks;
int bytesPerChecksum = 2 * UNIT_LEN;
Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256, bytesPerChecksum);
byte[] chunkData = RandomStringUtils.randomAscii(CHUNK_LEN).getBytes(UTF_8);
ChecksumData checksumData = checksum.computeChecksum(chunkData);
DispatcherContext writeStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
DispatcherContext commitStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
KeyValueContainerData containerData = new KeyValueContainerData(containerId, containerLayoutTestInfo.getLayout(), (long) CHUNKS_PER_BLOCK * CHUNK_LEN * totalBlocks, UUID.randomUUID().toString(), UUID.randomUUID().toString());
KeyValueContainer container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString());
try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, conf)) {
assertNotNull(containerData.getChunksPath());
File chunksPath = new File(containerData.getChunksPath());
containerLayoutTestInfo.validateFileCount(chunksPath, 0, 0);
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
for (int i = 0; i < totalBlocks; i++) {
BlockID blockID = new BlockID(containerId, i);
BlockData blockData = new BlockData(blockID);
chunkList.clear();
for (long chunkCount = 0; chunkCount < CHUNKS_PER_BLOCK; chunkCount++) {
String chunkName = strBlock + i + strChunk + chunkCount;
long offset = chunkCount * CHUNK_LEN;
ChunkInfo info = new ChunkInfo(chunkName, offset, CHUNK_LEN);
info.setChecksumData(checksumData);
chunkList.add(info.getProtoBufMessage());
chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), writeStage);
chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), commitStage);
}
blockData.setChunks(chunkList);
// normal key
String key = Long.toString(blockID.getLocalID());
if (i >= normalBlocks) {
// deleted key
key = OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID();
}
metadataStore.getStore().getBlockDataTable().put(key, blockData);
}
containerLayoutTestInfo.validateFileCount(chunksPath, totalBlocks, totalBlocks * CHUNKS_PER_BLOCK);
}
return container;
}
use of org.apache.hadoop.ozone.common.ChecksumData in project ozone by apache.
the class ContainerProtocolCalls method writeSmallFile.
/**
* Allows writing a small file using single RPC. This takes the container
* name, block name and data to write sends all that data to the container
* using a single RPC. This API is designed to be used for files which are
* smaller than 1 MB.
*
* @param client - client that communicates with the container.
* @param blockID - ID of the block
* @param data - Data to be written into the container.
* @param token a token for this block (may be null)
* @return container protocol writeSmallFile response
* @throws IOException
*/
public static PutSmallFileResponseProto writeSmallFile(XceiverClientSpi client, BlockID blockID, byte[] data, Token<OzoneBlockTokenIdentifier> token) throws IOException {
BlockData containerBlockData = BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf()).build();
PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(containerBlockData);
KeyValue keyValue = KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true").build();
Checksum checksum = new Checksum(ChecksumType.CRC32, 256);
final ChecksumData checksumData = checksum.computeChecksum(data);
ChunkInfo chunk = ChunkInfo.newBuilder().setChunkName(blockID.getLocalID() + "_chunk").setOffset(0).setLen(data.length).addMetadata(keyValue).setChecksumData(checksumData.getProtoBufMessage()).build();
PutSmallFileRequestProto putSmallFileRequest = PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk).setBlock(createBlockRequest).setData(ByteString.copyFrom(data)).build();
String id = client.getPipeline().getFirstNode().getUuidString();
ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutSmallFile).setContainerID(blockID.getContainerID()).setDatanodeUuid(id).setPutSmallFile(putSmallFileRequest);
if (token != null) {
builder.setEncodedToken(token.encodeToUrlString());
}
ContainerCommandRequestProto request = builder.build();
ContainerCommandResponseProto response = client.sendCommand(request, getValidatorList());
return response.getPutSmallFile();
}
use of org.apache.hadoop.ozone.common.ChecksumData in project ozone by apache.
the class TestContainerPersistence method testWritReadManyChunks.
/**
* Writes many chunks of the same block into different chunk files and
* verifies that we have that data in many files.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
public void testWritReadManyChunks() throws IOException {
final int datalen = 1024;
final int chunkCount = 1024;
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
List<ChunkInfo> chunks = new ArrayList<>(chunkCount);
BlockData blockData = new BlockData(blockID);
for (int x = 0; x < chunkCount; x++) {
ChunkInfo info = getChunk(blockID.getLocalID(), x, x * datalen, datalen);
ChunkBuffer data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
chunks.add(info);
blockData.addChunk(info.getProtoBufMessage());
}
blockManager.putBlock(container, blockData);
KeyValueContainerData cNewData = (KeyValueContainerData) container.getContainerData();
Assert.assertNotNull(cNewData);
Path dataDir = Paths.get(cNewData.getChunksPath());
// Read chunk via file system and verify.
Checksum checksum = new Checksum(ChecksumType.CRC32, 1024 * 1024);
// Read chunk via ReadChunk call.
for (int x = 0; x < chunkCount; x++) {
ChunkInfo info = chunks.get(x);
ChunkBuffer data = chunkManager.readChunk(container, blockID, info, getDispatcherContext());
ChecksumData checksumData = checksum.computeChecksum(data);
Assert.assertEquals(info.getChecksumData(), checksumData);
}
}
Aggregations