use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestContainerPersistence method testWritReadManyChunks.
/**
* Writes many chunks of the same block into different chunk files and
* verifies that we have that data in many files.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
public void testWritReadManyChunks() throws IOException {
final int datalen = 1024;
final int chunkCount = 1024;
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
List<ChunkInfo> chunks = new ArrayList<>(chunkCount);
BlockData blockData = new BlockData(blockID);
for (int x = 0; x < chunkCount; x++) {
ChunkInfo info = getChunk(blockID.getLocalID(), x, x * datalen, datalen);
ChunkBuffer data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
chunks.add(info);
blockData.addChunk(info.getProtoBufMessage());
}
blockManager.putBlock(container, blockData);
KeyValueContainerData cNewData = (KeyValueContainerData) container.getContainerData();
Assert.assertNotNull(cNewData);
Path dataDir = Paths.get(cNewData.getChunksPath());
// Read chunk via file system and verify.
Checksum checksum = new Checksum(ChecksumType.CRC32, 1024 * 1024);
// Read chunk via ReadChunk call.
for (int x = 0; x < chunkCount; x++) {
ChunkInfo info = chunks.get(x);
ChunkBuffer data = chunkManager.readChunk(container, blockID, info, getDispatcherContext());
ChecksumData checksumData = checksum.computeChecksum(data);
Assert.assertEquals(info.getChecksumData(), checksumData);
}
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestChunkUtils method concurrentReadOfSameFile.
@Test
public void concurrentReadOfSameFile() throws Exception {
String s = "Hello World";
byte[] array = s.getBytes(UTF_8);
ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array));
Path tempFile = Files.createTempFile(PREFIX, "concurrent");
try {
long len = data.limit();
long offset = 0;
File file = tempFile.toFile();
ChunkUtils.writeData(file, data, offset, len, null, true);
int threads = 10;
ExecutorService executor = new ThreadPoolExecutor(threads, threads, 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
AtomicInteger processed = new AtomicInteger();
AtomicBoolean failed = new AtomicBoolean();
for (int i = 0; i < threads; i++) {
final int threadNumber = i;
executor.execute(() -> {
try {
ByteBuffer[] readBuffers = BufferUtils.assignByteBuffers(len, len);
ChunkUtils.readData(file, readBuffers, offset, len, null);
// There should be only one element in readBuffers
Assert.assertEquals(1, readBuffers.length);
ByteBuffer readBuffer = readBuffers[0];
LOG.info("Read data ({}): {}", threadNumber, new String(readBuffer.array(), UTF_8));
if (!Arrays.equals(array, readBuffer.array())) {
failed.set(true);
}
assertEquals(len, readBuffer.remaining());
} catch (Exception e) {
LOG.error("Failed to read data ({})", threadNumber, e);
failed.set(true);
}
processed.incrementAndGet();
});
}
try {
GenericTestUtils.waitFor(() -> processed.get() == threads, 100, (int) TimeUnit.SECONDS.toMillis(5));
} finally {
executor.shutdownNow();
}
assertFalse(failed.get());
} finally {
Files.deleteIfExists(tempFile);
}
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestContainerPersistence method testDeleteChunk.
/**
* Writes a chunk and deletes it, re-reads to make sure it is gone.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
public void testDeleteChunk() throws IOException, NoSuchAlgorithmException {
final int datalen = 1024;
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
ChunkInfo info = getChunk(blockID.getLocalID(), 1, 0, datalen);
ChunkBuffer data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
chunkManager.deleteChunk(container, blockID, info);
exception.expect(StorageContainerException.class);
chunkManager.readChunk(container, blockID, info, getDispatcherContext());
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestContainerPersistence method testOverWrite.
/**
* Writes a single chunk and tries to overwrite that chunk without over write
* flag then re-tries with overwrite flag.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
public void testOverWrite() throws IOException, NoSuchAlgorithmException {
final int datalen = 1024;
long testContainerID = getTestContainerID();
KeyValueContainer container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
ChunkBuffer data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
data.rewind();
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
data.rewind();
// With the overwrite flag it should work now.
info.addMetadata(OzoneConsts.CHUNK_OVERWRITE, "true");
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
long bytesUsed = container.getContainerData().getBytesUsed();
Assert.assertEquals(datalen, bytesUsed);
long bytesWrite = container.getContainerData().getWriteBytes();
Assert.assertEquals(datalen * 3, bytesWrite);
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestFilePerBlockStrategy method testPartialRead.
/**
* Test partial within a single chunk.
*/
@Test
public void testPartialRead() throws Exception {
final int datalen = 1024;
final int start = datalen / 4;
final int length = datalen / 2;
KeyValueContainer container = getKeyValueContainer();
BlockID blockID = getBlockID();
ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
ChunkBuffer data = ContainerTestHelper.getData(datalen);
setDataChecksum(info, data);
DispatcherContext ctx = getDispatcherContext();
ChunkManager subject = createTestSubject();
subject.writeChunk(container, blockID, info, data, ctx);
ChunkBuffer readData = subject.readChunk(container, blockID, info, ctx);
// data will be ChunkBufferImplWithByteBuffer and readData will return
// ChunkBufferImplWithByteBufferList. Hence, convert both ByteStrings
// before comparing.
assertEquals(data.rewind().toByteString(), readData.rewind().toByteString());
ChunkInfo info2 = getChunk(blockID.getLocalID(), 0, start, length);
ChunkBuffer readData2 = subject.readChunk(container, blockID, info2, ctx);
assertEquals(length, info2.getLen());
assertEquals(data.rewind().toByteString().substring(start, start + length), readData2.rewind().toByteString());
}
Aggregations