Search in sources :

Example 1 with KeyValueContainerData

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.

the class ChunkManagerDiskWrite method call.

@Override
public Void call() throws Exception {
    try {
        init();
        OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
        VolumeSet volumeSet = new MutableVolumeSet("dnid", "clusterid", ozoneConfiguration, null, StorageVolume.VolumeType.DATA_VOLUME, null);
        Random random = new Random();
        VolumeChoosingPolicy volumeChoicePolicy = new RoundRobinVolumeChoosingPolicy();
        final int threadCount = getThreadNo();
        // create a dedicated (NEW) container for each thread
        for (int i = 1; i <= threadCount; i++) {
            // use a non-negative container id
            long containerId = random.nextLong() & 0x0F_FF_FF_FF_FF_FF_FF_FFL;
            KeyValueContainerData keyValueContainerData = new KeyValueContainerData(containerId, containerLayout, 1_000_000L, getPrefix(), "nodeid");
            KeyValueContainer keyValueContainer = new KeyValueContainer(keyValueContainerData, ozoneConfiguration);
            keyValueContainer.create(volumeSet, volumeChoicePolicy, "scmid");
            containersPerThread.put(i, keyValueContainer);
        }
        blockSize = chunkSize * chunksPerBlock;
        data = randomAscii(chunkSize).getBytes(UTF_8);
        chunkManager = ChunkManagerFactory.createChunkManager(ozoneConfiguration, null, null);
        timer = getMetrics().timer("chunk-write");
        LOG.info("Running chunk write test: threads={} chunkSize={} " + "chunksPerBlock={} layout={}", threadCount, chunkSize, chunksPerBlock, containerLayout);
        runTests(this::writeChunk);
    } finally {
        if (chunkManager != null) {
            chunkManager.shutdown();
        }
    }
    return null;
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) Random(java.util.Random) VolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy) RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)

Example 2 with KeyValueContainerData

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.

the class FilePerBlockStrategy method writeChunk.

@Override
public void writeChunk(Container container, BlockID blockID, ChunkInfo info, ChunkBuffer data, DispatcherContext dispatcherContext) throws StorageContainerException {
    checkLayoutVersion(container);
    Preconditions.checkNotNull(dispatcherContext);
    DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
    if (info.getLen() <= 0) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Skip writing empty chunk {} in stage {}", info, stage);
        }
        return;
    }
    if (stage == COMMIT_DATA) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Ignore chunk {} in stage {}", info, stage);
        }
        return;
    }
    KeyValueContainerData containerData = (KeyValueContainerData) container.getContainerData();
    File chunkFile = getChunkFile(container, blockID, info);
    boolean overwrite = validateChunkForOverwrite(chunkFile, info);
    long len = info.getLen();
    long offset = info.getOffset();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Writing chunk {} (overwrite: {}) in stage {} to file {}", info, overwrite, stage, chunkFile);
    }
    HddsVolume volume = containerData.getVolume();
    FileChannel channel = null;
    try {
        channel = files.getChannel(chunkFile, doSyncWrite);
    } catch (IOException e) {
        onFailure(volume);
        throw e;
    }
    // check whether offset matches block file length if its an overwrite
    if (!overwrite) {
        ChunkUtils.validateChunkSize(chunkFile, info);
    }
    ChunkUtils.writeData(channel, chunkFile.getName(), data, offset, len, volume);
    containerData.updateWriteStats(len, overwrite);
}
Also used : HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) DispatcherContext(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext) FileChannel(java.nio.channels.FileChannel) IOException(java.io.IOException) UncheckedIOException(java.io.UncheckedIOException) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)

Example 3 with KeyValueContainerData

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.

the class FilePerChunkStrategy method writeChunk.

/**
 * writes a given chunk.
 *
 * @param container - Container for the chunk
 * @param blockID - ID of the block
 * @param info - ChunkInfo
 * @param data - data of the chunk
 * @param dispatcherContext - dispatcherContextInfo
 * @throws StorageContainerException
 */
@Override
public void writeChunk(Container container, BlockID blockID, ChunkInfo info, ChunkBuffer data, DispatcherContext dispatcherContext) throws StorageContainerException {
    checkLayoutVersion(container);
    Preconditions.checkNotNull(dispatcherContext);
    DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage();
    try {
        KeyValueContainer kvContainer = (KeyValueContainer) container;
        KeyValueContainerData containerData = kvContainer.getContainerData();
        HddsVolume volume = containerData.getVolume();
        File chunkFile = getChunkFile(kvContainer, blockID, info);
        boolean isOverwrite = ChunkUtils.validateChunkForOverwrite(chunkFile, info);
        File tmpChunkFile = getTmpChunkFile(chunkFile, dispatcherContext);
        if (LOG.isDebugEnabled()) {
            LOG.debug("writing chunk:{} chunk stage:{} chunk file:{} tmp chunk file:{}", info.getChunkName(), stage, chunkFile, tmpChunkFile);
        }
        long len = info.getLen();
        // ignore offset in chunk info
        long offset = 0;
        switch(stage) {
            case WRITE_DATA:
                if (isOverwrite) {
                    // if the actual chunk file already exists here while writing the temp
                    // chunk file, then it means the same ozone client request has
                    // generated two raft log entries. This can happen either because
                    // retryCache expired in Ratis (or log index mismatch/corruption in
                    // Ratis). This can be solved by two approaches as of now:
                    // 1. Read the complete data in the actual chunk file ,
                    // verify the data integrity and in case it mismatches , either
                    // 2. Delete the chunk File and write the chunk again. For now,
                    // let's rewrite the chunk file
                    // TODO: once the checksum support for write chunks gets plugged in,
                    // the checksum needs to be verified for the actual chunk file and
                    // the data to be written here which should be efficient and
                    // it matches we can safely return without rewriting.
                    LOG.warn("ChunkFile already exists {}. Deleting it.", chunkFile);
                    FileUtil.fullyDelete(chunkFile);
                }
                if (tmpChunkFile.exists()) {
                    // If the tmp chunk file already exists it means the raft log got
                    // appended, but later on the log entry got truncated in Ratis leaving
                    // behind garbage.
                    // TODO: once the checksum support for data chunks gets plugged in,
                    // instead of rewriting the chunk here, let's compare the checkSums
                    LOG.warn("tmpChunkFile already exists {}. Overwriting it.", tmpChunkFile);
                }
                // Initially writes to temporary chunk file.
                ChunkUtils.writeData(tmpChunkFile, data, offset, len, volume, doSyncWrite);
                // committed here.
                break;
            case COMMIT_DATA:
                // to actual chunk file.
                if (isOverwrite) {
                    // if the actual chunk file already exists , it implies the write
                    // chunk transaction in the containerStateMachine is getting
                    // reapplied. This can happen when a node restarts.
                    // TODO: verify the checkSums for the existing chunkFile and the
                    // chunkInfo to be committed here
                    LOG.warn("ChunkFile already exists {}", chunkFile);
                    return;
                }
                // While committing a chunk , just rename the tmp chunk file which has
                // the same term and log index appended as the current transaction
                commitChunk(tmpChunkFile, chunkFile);
                // Increment container stats here, as we commit the data.
                containerData.updateWriteStats(len, isOverwrite);
                break;
            case COMBINED:
                // directly write to the chunk file
                ChunkUtils.writeData(chunkFile, data, offset, len, volume, doSyncWrite);
                containerData.updateWriteStats(len, isOverwrite);
                break;
            default:
                throw new IOException("Can not identify write operation.");
        }
    } catch (StorageContainerException ex) {
        throw ex;
    } catch (IOException ex) {
        throw new StorageContainerException("Internal error: ", ex, IO_EXCEPTION);
    }
}
Also used : HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) DispatcherContext(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext) IOException(java.io.IOException) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) File(java.io.File) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)

Example 4 with KeyValueContainerData

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.

the class TestStorageContainerManagerHelper method getContainerMetadata.

private ReferenceCountedDB getContainerMetadata(Long containerID) throws IOException {
    ContainerWithPipeline containerWithPipeline = cluster.getStorageContainerManager().getClientProtocolServer().getContainerWithPipeline(containerID);
    DatanodeDetails dn = containerWithPipeline.getPipeline().getFirstNode();
    OzoneContainer containerServer = getContainerServerByDatanodeUuid(dn.getUuidString());
    KeyValueContainerData containerData = (KeyValueContainerData) containerServer.getContainerSet().getContainer(containerID).getContainerData();
    return BlockUtils.getDB(containerData, conf);
}
Also used : DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) OzoneContainer(org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)

Example 5 with KeyValueContainerData

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.

the class TestContainerStateMachineFailures method testApplyTransactionIdempotencyWithClosedContainer.

@Test
public void testApplyTransactionIdempotencyWithClosedContainer() throws Exception {
    OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
    // First write and flush creates a container in the datanode
    key.write("ratis".getBytes(UTF_8));
    key.flush();
    key.write("ratis".getBytes(UTF_8));
    KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
    List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
    Assert.assertEquals(1, locationInfoList.size());
    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
    HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo, cluster);
    ContainerData containerData = dn.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(omKeyLocationInfo.getContainerID()).getContainerData();
    Assert.assertTrue(containerData instanceof KeyValueContainerData);
    key.close();
    ContainerStateMachine stateMachine = (ContainerStateMachine) TestHelper.getStateMachine(dn, omKeyLocationInfo.getPipeline());
    SimpleStateMachineStorage storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
    Path parentPath = storage.findLatestSnapshot().getFile().getPath();
    stateMachine.takeSnapshot();
    Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
    FileInfo snapshot = storage.findLatestSnapshot().getFile();
    Assert.assertNotNull(snapshot);
    long containerID = omKeyLocationInfo.getContainerID();
    Pipeline pipeline = cluster.getStorageContainerLocationClient().getContainerWithPipeline(containerID).getPipeline();
    XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
    ContainerProtos.ContainerCommandRequestProto.Builder request = ContainerProtos.ContainerCommandRequestProto.newBuilder();
    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
    request.setCmdType(ContainerProtos.Type.CloseContainer);
    request.setContainerID(containerID);
    request.setCloseContainer(ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
    try {
        xceiverClient.sendCommand(request.build());
    } catch (IOException e) {
        Assert.fail("Exception should not be thrown");
    }
    Assert.assertTrue(TestHelper.getDatanodeService(omKeyLocationInfo, cluster).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerState() == ContainerProtos.ContainerDataProto.State.CLOSED);
    Assert.assertTrue(stateMachine.isStateMachineHealthy());
    try {
        stateMachine.takeSnapshot();
    } catch (IOException ioe) {
        Assert.fail("Exception should not be thrown");
    }
    FileInfo latestSnapshot = storage.findLatestSnapshot().getFile();
    Assert.assertFalse(snapshot.getPath().equals(latestSnapshot.getPath()));
}
Also used : Path(java.nio.file.Path) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) IOException(java.io.IOException) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) ContainerStateMachine(org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine) FileInfo(org.apache.ratis.server.storage.FileInfo) SimpleStateMachineStorage(org.apache.ratis.statemachine.impl.SimpleStateMachineStorage) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) ContainerData(org.apache.hadoop.ozone.container.common.impl.ContainerData) Test(org.junit.Test)

Aggregations

KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)61 Test (org.junit.Test)32 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)29 File (java.io.File)24 IOException (java.io.IOException)12 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)12 HddsVolume (org.apache.hadoop.ozone.container.common.volume.HddsVolume)12 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)10 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)10 ReferenceCountedDB (org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)10 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)9 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)9 OzoneContainer (org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer)9 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)9 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)8 ContainerData (org.apache.hadoop.ozone.container.common.impl.ContainerData)8 ContainerSet (org.apache.hadoop.ozone.container.common.impl.ContainerSet)8 RoundRobinVolumeChoosingPolicy (org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy)8 ArrayList (java.util.ArrayList)7 MutableVolumeSet (org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet)7