use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestChunkManagerDummyImpl method dummyManagerReadsAnyChunk.
@Test
public void dummyManagerReadsAnyChunk() throws Exception {
ChunkManager dummy = createTestSubject();
ChunkBuffer dataRead = dummy.readChunk(getKeyValueContainer(), getBlockID(), getChunkInfo(), getDispatcherContext());
assertNotNull(dataRead);
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestChunkUtils method serialRead.
@Test
public void serialRead() throws Exception {
String s = "Hello World";
byte[] array = s.getBytes(UTF_8);
ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array));
Path tempFile = Files.createTempFile(PREFIX, "serial");
try {
File file = tempFile.toFile();
long len = data.limit();
long offset = 0;
ChunkUtils.writeData(file, data, offset, len, null, true);
ByteBuffer[] readBuffers = BufferUtils.assignByteBuffers(len, len);
ChunkUtils.readData(file, readBuffers, offset, len, null);
// There should be only one element in readBuffers
Assert.assertEquals(1, readBuffers.length);
ByteBuffer readBuffer = readBuffers[0];
assertArrayEquals(array, readBuffer.array());
assertEquals(len, readBuffer.remaining());
} catch (Exception e) {
LOG.error("Failed to read data", e);
} finally {
Files.deleteIfExists(tempFile);
}
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class BufferPool method releaseBuffer.
void releaseBuffer(ChunkBuffer chunkBuffer) {
// always remove from head of the list and append at last
final ChunkBuffer buffer = bufferList.remove(0);
// Ensure the buffer to be removed is always at the head of the list.
Preconditions.checkArgument(buffer == chunkBuffer);
buffer.clear();
bufferList.add(buffer);
Preconditions.checkArgument(currentBufferIndex >= 0);
currentBufferIndex--;
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestBlockDeletingService method putChunksInBlock.
private void putChunksInBlock(int numOfChunksPerBlock, int i, List<ContainerProtos.ChunkInfo> chunks, ChunkBuffer buffer, ChunkManager chunkManager, KeyValueContainer container, BlockID blockID) {
long chunkLength = 100;
try {
for (int k = 0; k < numOfChunksPerBlock; k++) {
final String chunkName = String.format("block.%d.chunk.%d", i, k);
final long offset = k * chunkLength;
ContainerProtos.ChunkInfo info = ContainerProtos.ChunkInfo.newBuilder().setChunkName(chunkName).setLen(chunkLength).setOffset(offset).setChecksumData(Checksum.getNoChecksumDataProto()).build();
chunks.add(info);
ChunkInfo chunkInfo = new ChunkInfo(chunkName, offset, chunkLength);
ChunkBuffer chunkData = buffer.duplicate(0, (int) chunkLength);
chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, WRITE_STAGE);
chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, COMMIT_STAGE);
}
} catch (IOException ex) {
LOG.warn("Putting chunks in blocks was not successful for BlockID: " + blockID);
}
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestContainerPersistence method writeChunkHelper.
private ChunkInfo writeChunkHelper(BlockID blockID) throws IOException {
final int datalen = 1024;
long commitBytesBefore = 0;
long commitBytesAfter = 0;
long commitDecrement = 0;
long testContainerID = blockID.getContainerID();
Container container = containerSet.getContainer(testContainerID);
if (container == null) {
container = addContainer(containerSet, testContainerID);
}
ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
ChunkBuffer data = getData(datalen);
setDataChecksum(info, data);
commitBytesBefore = container.getContainerData().getVolume().getCommittedBytes();
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
commitBytesAfter = container.getContainerData().getVolume().getCommittedBytes();
commitDecrement = commitBytesBefore - commitBytesAfter;
// did we decrement commit bytes by the amount of data we wrote?
Assert.assertTrue(commitDecrement == info.getLen());
return info;
}
Aggregations