use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo in project ozone by apache.
the class DatanodeChunkGenerator method writeChunk.
private void writeChunk(long stepNo) throws Exception {
// Always use this fake blockid.
DatanodeBlockID blockId = DatanodeBlockID.newBuilder().setContainerID(1L).setLocalID(stepNo % 20).build();
ChunkInfo chunkInfo = ChunkInfo.newBuilder().setChunkName(getPrefix() + "_testdata_chunk_" + stepNo).setOffset((stepNo / 20) * chunkSize).setLen(chunkSize).setChecksumData(checksumProtobuf).build();
WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto.newBuilder().setBlockID(blockId).setChunkData(chunkInfo).setData(dataToWrite);
XceiverClientSpi clientSpi = xceiverClients.get((int) (stepNo % (xceiverClients.size())));
sendWriteChunkRequest(blockId, writeChunkRequest, clientSpi);
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo in project ozone by apache.
the class TestChunkInputStream method connectsToNewPipeline.
@Test
public void connectsToNewPipeline() throws Exception {
// GIVEN
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
Pipeline newPipeline = MockPipeline.createSingleNodePipeline();
XceiverClientFactory clientFactory = mock(XceiverClientFactory.class);
XceiverClientSpi client = mock(XceiverClientSpi.class);
when(clientFactory.acquireClientForReadData(pipeline)).thenReturn(client);
AtomicReference<Pipeline> pipelineRef = new AtomicReference<>(pipeline);
ChunkInputStream subject = new ChunkInputStream(chunkInfo, null, clientFactory, pipelineRef::get, false, null) {
@Override
protected ByteBuffer[] readChunk(ChunkInfo readChunkInfo) {
return ByteString.copyFrom(chunkData).asReadOnlyByteBufferList().toArray(new ByteBuffer[0]);
}
};
try {
// WHEN
subject.unbuffer();
pipelineRef.set(newPipeline);
int b = subject.read();
// THEN
Assert.assertNotEquals(-1, b);
verify(clientFactory).acquireClientForReadData(newPipeline);
} finally {
subject.close();
}
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo in project ozone by apache.
the class TestContainerCommandRequestMessage method newPutSmallFile.
static ContainerCommandRequestProto newPutSmallFile(BlockID blockID, ByteString data) {
final BlockData.Builder blockData = BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf());
final PutBlockRequestProto.Builder putBlockRequest = PutBlockRequestProto.newBuilder().setBlockData(blockData);
final KeyValue keyValue = KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true").build();
final ChunkInfo chunk = ChunkInfo.newBuilder().setChunkName(blockID.getLocalID() + "_chunk").setOffset(0).setLen(data.size()).addMetadata(keyValue).setChecksumData(checksum(data).getProtoBufMessage()).build();
final PutSmallFileRequestProto putSmallFileRequest = PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk).setBlock(putBlockRequest).setData(data).build();
return ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutSmallFile).setContainerID(blockID.getContainerID()).setDatanodeUuid(UUID.randomUUID().toString()).setPutSmallFile(putSmallFileRequest).build();
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo in project ozone by apache.
the class TestContainerCommandRequestMessage method newWriteChunk.
static ContainerCommandRequestProto newWriteChunk(BlockID blockID, ByteString data) {
final ChunkInfo chunk = ChunkInfo.newBuilder().setChunkName(blockID.getLocalID() + "_chunk_" + 1).setOffset(0).setLen(data.size()).setChecksumData(checksum(data).getProtoBufMessage()).build();
final WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf()).setChunkData(chunk).setData(data);
return ContainerCommandRequestProto.newBuilder().setCmdType(Type.WriteChunk).setContainerID(blockID.getContainerID()).setDatanodeUuid(UUID.randomUUID().toString()).setWriteChunk(writeChunkRequest).build();
}
use of org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo in project ozone by apache.
the class TestBlockInputStream method testRefreshOnReadFailure.
@Test
public void testRefreshOnReadFailure() throws Exception {
// GIVEN
BlockID blockID = new BlockID(new ContainerBlockID(1, 1));
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
Pipeline newPipeline = MockPipeline.createSingleNodePipeline();
final int len = 200;
final ChunkInputStream stream = mock(ChunkInputStream.class);
when(stream.read(any(), anyInt(), anyInt())).thenThrow(new StorageContainerException("test", CONTAINER_NOT_FOUND)).thenReturn(len);
when(stream.getRemaining()).thenReturn((long) len);
when(refreshPipeline.apply(blockID)).thenReturn(newPipeline);
BlockInputStream subject = new DummyBlockInputStream(blockID, blockSize, pipeline, null, false, null, refreshPipeline, chunks, null) {
@Override
protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) {
return stream;
}
};
try {
subject.initialize();
// WHEN
byte[] b = new byte[len];
int bytesRead = subject.read(b, 0, len);
// THEN
Assert.assertEquals(len, bytesRead);
verify(refreshPipeline).apply(blockID);
} finally {
subject.close();
}
}
Aggregations