use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.
the class TestKeyValueContainerIntegrityChecks method createContainerWithBlocks.
/**
* Creates a container with normal and deleted blocks.
* First it will insert normal blocks, and then it will insert
* deleted blocks.
*/
protected KeyValueContainer createContainerWithBlocks(long containerId, int normalBlocks, int deletedBlocks) throws Exception {
String strBlock = "block";
String strChunk = "-chunkFile";
long totalBlocks = normalBlocks + deletedBlocks;
int bytesPerChecksum = 2 * UNIT_LEN;
Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256, bytesPerChecksum);
byte[] chunkData = RandomStringUtils.randomAscii(CHUNK_LEN).getBytes(UTF_8);
ChecksumData checksumData = checksum.computeChecksum(chunkData);
DispatcherContext writeStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
DispatcherContext commitStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
KeyValueContainerData containerData = new KeyValueContainerData(containerId, containerLayoutTestInfo.getLayout(), (long) CHUNKS_PER_BLOCK * CHUNK_LEN * totalBlocks, UUID.randomUUID().toString(), UUID.randomUUID().toString());
KeyValueContainer container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString());
try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, conf)) {
assertNotNull(containerData.getChunksPath());
File chunksPath = new File(containerData.getChunksPath());
containerLayoutTestInfo.validateFileCount(chunksPath, 0, 0);
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
for (int i = 0; i < totalBlocks; i++) {
BlockID blockID = new BlockID(containerId, i);
BlockData blockData = new BlockData(blockID);
chunkList.clear();
for (long chunkCount = 0; chunkCount < CHUNKS_PER_BLOCK; chunkCount++) {
String chunkName = strBlock + i + strChunk + chunkCount;
long offset = chunkCount * CHUNK_LEN;
ChunkInfo info = new ChunkInfo(chunkName, offset, CHUNK_LEN);
info.setChecksumData(checksumData);
chunkList.add(info.getProtoBufMessage());
chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), writeStage);
chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), commitStage);
}
blockData.setChunks(chunkList);
// normal key
String key = Long.toString(blockID.getLocalID());
if (i >= normalBlocks) {
// deleted key
key = OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID();
}
metadataStore.getStore().getBlockDataTable().put(key, blockData);
}
containerLayoutTestInfo.validateFileCount(chunksPath, totalBlocks, totalBlocks * CHUNKS_PER_BLOCK);
}
return container;
}
use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.
the class TestBlockInputStream method setup.
@Before
public void setup() throws Exception {
BlockID blockID = new BlockID(new ContainerBlockID(1, 1));
checksum = new Checksum(ChecksumType.NONE, CHUNK_SIZE);
createChunkList(5);
blockStream = new DummyBlockInputStream(blockID, blockSize, null, null, false, null, refreshPipeline, chunks, chunkDataMap);
}
use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.
the class ContainerProtocolCalls method writeSmallFile.
/**
* Allows writing a small file using single RPC. This takes the container
* name, block name and data to write sends all that data to the container
* using a single RPC. This API is designed to be used for files which are
* smaller than 1 MB.
*
* @param client - client that communicates with the container.
* @param blockID - ID of the block
* @param data - Data to be written into the container.
* @param token a token for this block (may be null)
* @return container protocol writeSmallFile response
* @throws IOException
*/
public static PutSmallFileResponseProto writeSmallFile(XceiverClientSpi client, BlockID blockID, byte[] data, Token<OzoneBlockTokenIdentifier> token) throws IOException {
BlockData containerBlockData = BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf()).build();
PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(containerBlockData);
KeyValue keyValue = KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true").build();
Checksum checksum = new Checksum(ChecksumType.CRC32, 256);
final ChecksumData checksumData = checksum.computeChecksum(data);
ChunkInfo chunk = ChunkInfo.newBuilder().setChunkName(blockID.getLocalID() + "_chunk").setOffset(0).setLen(data.length).addMetadata(keyValue).setChecksumData(checksumData.getProtoBufMessage()).build();
PutSmallFileRequestProto putSmallFileRequest = PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk).setBlock(createBlockRequest).setData(ByteString.copyFrom(data)).build();
String id = client.getPipeline().getFirstNode().getUuidString();
ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutSmallFile).setContainerID(blockID.getContainerID()).setDatanodeUuid(id).setPutSmallFile(putSmallFileRequest);
if (token != null) {
builder.setEncodedToken(token.encodeToUrlString());
}
ContainerCommandRequestProto request = builder.build();
ContainerCommandResponseProto response = client.sendCommand(request, getValidatorList());
return response.getPutSmallFile();
}
use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.
the class TestContainerPersistence method testWritReadManyChunks.
/**
* Writes many chunks of the same block into different chunk files and
* verifies that we have that data in many files.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
public void testWritReadManyChunks() throws IOException {
final int datalen = 1024;
final int chunkCount = 1024;
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
List<ChunkInfo> chunks = new ArrayList<>(chunkCount);
BlockData blockData = new BlockData(blockID);
for (int x = 0; x < chunkCount; x++) {
ChunkInfo info = getChunk(blockID.getLocalID(), x, x * datalen, datalen);
ChunkBuffer data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
chunks.add(info);
blockData.addChunk(info.getProtoBufMessage());
}
blockManager.putBlock(container, blockData);
KeyValueContainerData cNewData = (KeyValueContainerData) container.getContainerData();
Assert.assertNotNull(cNewData);
Path dataDir = Paths.get(cNewData.getChunksPath());
// Read chunk via file system and verify.
Checksum checksum = new Checksum(ChecksumType.CRC32, 1024 * 1024);
// Read chunk via ReadChunk call.
for (int x = 0; x < chunkCount; x++) {
ChunkInfo info = chunks.get(x);
ChunkBuffer data = chunkManager.readChunk(container, blockID, info, getDispatcherContext());
ChecksumData checksumData = checksum.computeChecksum(data);
Assert.assertEquals(info.getChecksumData(), checksumData);
}
}
use of org.apache.hadoop.ozone.common.Checksum in project ozone by apache.
the class DatanodeChunkGenerator method runTest.
private void runTest() throws IOException {
timer = getMetrics().timer("chunk-write");
byte[] data = RandomStringUtils.randomAscii(chunkSize).getBytes(StandardCharsets.UTF_8);
dataToWrite = ByteString.copyFrom(data);
Checksum checksum = new Checksum(ChecksumType.CRC32, chunkSize);
checksumProtobuf = checksum.computeChecksum(data).getProtoBufMessage();
runTests(this::writeChunk);
}
Aggregations