Search in sources :

Example 11 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class TestContainerPersistence method testWritReadManyChunks.

/**
 * Writes many chunks of the same block into different chunk files and
 * verifies that we have that data in many files.
 *
 * @throws IOException
 * @throws NoSuchAlgorithmException
 */
@Test
public void testWritReadManyChunks() throws IOException {
    final int datalen = 1024;
    final int chunkCount = 1024;
    long testContainerID = getTestContainerID();
    Container container = addContainer(containerSet, testContainerID);
    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
    List<ChunkInfo> chunks = new ArrayList<>(chunkCount);
    BlockData blockData = new BlockData(blockID);
    for (int x = 0; x < chunkCount; x++) {
        ChunkInfo info = getChunk(blockID.getLocalID(), x, x * datalen, datalen);
        ChunkBuffer data = getData(datalen);
        setDataChecksum(info, data);
        chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
        chunks.add(info);
        blockData.addChunk(info.getProtoBufMessage());
    }
    blockManager.putBlock(container, blockData);
    KeyValueContainerData cNewData = (KeyValueContainerData) container.getContainerData();
    Assert.assertNotNull(cNewData);
    Path dataDir = Paths.get(cNewData.getChunksPath());
    // Read chunk via file system and verify.
    Checksum checksum = new Checksum(ChecksumType.CRC32, 1024 * 1024);
    // Read chunk via ReadChunk call.
    for (int x = 0; x < chunkCount; x++) {
        ChunkInfo info = chunks.get(x);
        ChunkBuffer data = chunkManager.readChunk(container, blockID, info, getDispatcherContext());
        ChecksumData checksumData = checksum.computeChecksum(data);
        Assert.assertEquals(info.getChecksumData(), checksumData);
    }
}
Also used : Path(java.nio.file.Path) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) ArrayList(java.util.ArrayList) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ContainerTestHelper.setDataChecksum(org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum) Checksum(org.apache.hadoop.ozone.common.Checksum) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) Test(org.junit.Test)

Example 12 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class TestChunkUtils method concurrentReadOfSameFile.

@Test
public void concurrentReadOfSameFile() throws Exception {
    String s = "Hello World";
    byte[] array = s.getBytes(UTF_8);
    ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array));
    Path tempFile = Files.createTempFile(PREFIX, "concurrent");
    try {
        long len = data.limit();
        long offset = 0;
        File file = tempFile.toFile();
        ChunkUtils.writeData(file, data, offset, len, null, true);
        int threads = 10;
        ExecutorService executor = new ThreadPoolExecutor(threads, threads, 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
        AtomicInteger processed = new AtomicInteger();
        AtomicBoolean failed = new AtomicBoolean();
        for (int i = 0; i < threads; i++) {
            final int threadNumber = i;
            executor.execute(() -> {
                try {
                    ByteBuffer[] readBuffers = BufferUtils.assignByteBuffers(len, len);
                    ChunkUtils.readData(file, readBuffers, offset, len, null);
                    // There should be only one element in readBuffers
                    Assert.assertEquals(1, readBuffers.length);
                    ByteBuffer readBuffer = readBuffers[0];
                    LOG.info("Read data ({}): {}", threadNumber, new String(readBuffer.array(), UTF_8));
                    if (!Arrays.equals(array, readBuffer.array())) {
                        failed.set(true);
                    }
                    assertEquals(len, readBuffer.remaining());
                } catch (Exception e) {
                    LOG.error("Failed to read data ({})", threadNumber, e);
                    failed.set(true);
                }
                processed.incrementAndGet();
            });
        }
        try {
            GenericTestUtils.waitFor(() -> processed.get() == threads, 100, (int) TimeUnit.SECONDS.toMillis(5));
        } finally {
            executor.shutdownNow();
        }
        assertFalse(failed.get());
    } finally {
        Files.deleteIfExists(tempFile);
    }
}
Also used : Path(java.nio.file.Path) ByteBuffer(java.nio.ByteBuffer) IOException(java.io.IOException) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) File(java.io.File) Test(org.junit.Test)

Example 13 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class TestContainerPersistence method testDeleteChunk.

/**
 * Writes a chunk and deletes it, re-reads to make sure it is gone.
 *
 * @throws IOException
 * @throws NoSuchAlgorithmException
 */
@Test
public void testDeleteChunk() throws IOException, NoSuchAlgorithmException {
    final int datalen = 1024;
    long testContainerID = getTestContainerID();
    Container container = addContainer(containerSet, testContainerID);
    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
    ChunkInfo info = getChunk(blockID.getLocalID(), 1, 0, datalen);
    ChunkBuffer data = getData(datalen);
    setDataChecksum(info, data);
    chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
    chunkManager.deleteChunk(container, blockID, info);
    exception.expect(StorageContainerException.class);
    chunkManager.readChunk(container, blockID, info, getDispatcherContext());
}
Also used : KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) Test(org.junit.Test)

Example 14 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class TestContainerPersistence method testOverWrite.

/**
 * Writes a single chunk and tries to overwrite that chunk without over write
 * flag then re-tries with overwrite flag.
 *
 * @throws IOException
 * @throws NoSuchAlgorithmException
 */
@Test
public void testOverWrite() throws IOException, NoSuchAlgorithmException {
    final int datalen = 1024;
    long testContainerID = getTestContainerID();
    KeyValueContainer container = addContainer(containerSet, testContainerID);
    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
    ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
    ChunkBuffer data = getData(datalen);
    setDataChecksum(info, data);
    chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
    data.rewind();
    chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
    data.rewind();
    // With the overwrite flag it should work now.
    info.addMetadata(OzoneConsts.CHUNK_OVERWRITE, "true");
    chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
    long bytesUsed = container.getContainerData().getBytesUsed();
    Assert.assertEquals(datalen, bytesUsed);
    long bytesWrite = container.getContainerData().getWriteBytes();
    Assert.assertEquals(datalen * 3, bytesWrite);
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Test(org.junit.Test)

Example 15 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class TestFilePerBlockStrategy method testPartialRead.

/**
 * Test partial within a single chunk.
 */
@Test
public void testPartialRead() throws Exception {
    final int datalen = 1024;
    final int start = datalen / 4;
    final int length = datalen / 2;
    KeyValueContainer container = getKeyValueContainer();
    BlockID blockID = getBlockID();
    ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
    ChunkBuffer data = ContainerTestHelper.getData(datalen);
    setDataChecksum(info, data);
    DispatcherContext ctx = getDispatcherContext();
    ChunkManager subject = createTestSubject();
    subject.writeChunk(container, blockID, info, data, ctx);
    ChunkBuffer readData = subject.readChunk(container, blockID, info, ctx);
    // data will be ChunkBufferImplWithByteBuffer and readData will return
    // ChunkBufferImplWithByteBufferList. Hence, convert both ByteStrings
    // before comparing.
    assertEquals(data.rewind().toByteString(), readData.rewind().toByteString());
    ChunkInfo info2 = getChunk(blockID.getLocalID(), 0, start, length);
    ChunkBuffer readData2 = subject.readChunk(container, blockID, info2, ctx);
    assertEquals(length, info2.getLen());
    assertEquals(data.rewind().toByteString().substring(start, start + length), readData2.rewind().toByteString());
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) DispatcherContext(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) ChunkManager(org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager) Test(org.junit.Test)

Aggregations

ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)30 BlockID (org.apache.hadoop.hdds.client.BlockID)14 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)14 Test (org.junit.Test)13 IOException (java.io.IOException)10 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)9 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)8 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)8 ArrayList (java.util.ArrayList)6 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)5 CompletableFuture (java.util.concurrent.CompletableFuture)4 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)4 XceiverClientReply (org.apache.hadoop.hdds.scm.XceiverClientReply)4 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)4 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)4 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)4 ChunkManager (org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager)4 ByteBuffer (java.nio.ByteBuffer)3 Path (java.nio.file.Path)3 LinkedList (java.util.LinkedList)3