Search in sources :

Example 1 with KeyOutputStream

use of org.apache.hadoop.ozone.client.io.KeyOutputStream in project ozone by apache.

the class RpcClient method createMultipartKey.

@Override
public OzoneOutputStream createMultipartKey(String volumeName, String bucketName, String keyName, long size, int partNumber, String uploadID) throws IOException {
    verifyVolumeName(volumeName);
    verifyBucketName(bucketName);
    if (checkKeyNameEnabled) {
        HddsClientUtils.verifyKeyName(keyName);
    }
    HddsClientUtils.checkNotNull(keyName, uploadID);
    Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000, "Part " + "number should be greater than zero and less than or equal to 10000");
    Preconditions.checkArgument(size >= 0, "size should be greater than or " + "equal to zero");
    String requestId = UUID.randomUUID().toString();
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setDataSize(size).setIsMultipartKey(true).setMultipartUploadID(uploadID).setMultipartUploadPartNumber(partNumber).setAcls(getAclList()).build();
    OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
    KeyOutputStream keyOutputStream = new KeyOutputStream.Builder().setHandler(openKey).setXceiverClientManager(xceiverClientManager).setOmClient(ozoneManagerClient).setRequestID(requestId).setReplicationConfig(openKey.getKeyInfo().getReplicationConfig()).setMultipartNumber(partNumber).setMultipartUploadID(uploadID).setIsMultipartKey(true).enableUnsafeByteBufferConversion(unsafeByteBufferConversion).setConfig(clientConfig).build();
    keyOutputStream.addPreallocateBlocks(openKey.getKeyInfo().getLatestVersionLocations(), openKey.getOpenVersion());
    FileEncryptionInfo feInfo = openKey.getKeyInfo().getFileEncryptionInfo();
    if (feInfo != null) {
        KeyProvider.KeyVersion decrypted = getDEK(feInfo);
        final CryptoOutputStream cryptoOut = new CryptoOutputStream(keyOutputStream, OzoneKMSUtil.getCryptoCodec(conf, feInfo), decrypted.getMaterial(), feInfo.getIV());
        return new OzoneOutputStream(cryptoOut);
    } else {
        return new OzoneOutputStream(keyOutputStream);
    }
}
Also used : KeyProvider(org.apache.hadoop.crypto.key.KeyProvider) CryptoOutputStream(org.apache.hadoop.crypto.CryptoOutputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OpenKeySession(org.apache.hadoop.ozone.om.helpers.OpenKeySession) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs)

Example 2 with KeyOutputStream

use of org.apache.hadoop.ozone.client.io.KeyOutputStream in project ozone by apache.

the class TestObjectStoreWithFSO method testLookupKey.

@Test
public void testLookupKey() throws Exception {
    String parent = "a/b/c/";
    String fileName = "key" + RandomStringUtils.randomNumeric(5);
    String key = parent + fileName;
    OzoneClient client = cluster.getClient();
    ObjectStore objectStore = client.getObjectStore();
    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
    Table<String, OmKeyInfo> openFileTable = cluster.getOzoneManager().getMetadataManager().getOpenKeyTable(getBucketLayout());
    String data = "random data";
    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key, data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
    KeyOutputStream keyOutputStream = (KeyOutputStream) ozoneOutputStream.getOutputStream();
    long clientID = keyOutputStream.getClientID();
    OmDirectoryInfo dirPathC = getDirInfo(parent);
    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
    // after file creation
    verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), false);
    ozoneOutputStream.write(data.getBytes(StandardCharsets.UTF_8), 0, data.length());
    // open key
    try {
        ozoneBucket.getKey(key);
        fail("Should throw exception as fileName is not visible and its still " + "open for writing!");
    } catch (OMException ome) {
        // expected
        assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
    }
    ozoneOutputStream.close();
    OzoneKeyDetails keyDetails = ozoneBucket.getKey(key);
    Assert.assertEquals(key, keyDetails.getName());
    Table<String, OmKeyInfo> fileTable = cluster.getOzoneManager().getMetadataManager().getKeyTable(getBucketLayout());
    // When closing the key, entry should be removed from openFileTable
    // and it should be added to fileTable.
    verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), false);
    verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), true);
    ozoneBucket.deleteKey(key);
    // get deleted key
    try {
        ozoneBucket.getKey(key);
        fail("Should throw exception as fileName not exists!");
    } catch (OMException ome) {
        // expected
        assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
    }
    // after key delete
    verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), true);
    verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), true);
}
Also used : ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OzoneClient(org.apache.hadoop.ozone.client.OzoneClient) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OmDirectoryInfo(org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) Test(org.junit.Test)

Example 3 with KeyOutputStream

use of org.apache.hadoop.ozone.client.io.KeyOutputStream in project ozone by apache.

the class TestContainerReplicationEndToEnd method testContainerReplication.

/**
 * The test simulates end to end container replication.
 */
@Test
public void testContainerReplication() throws Exception {
    String keyName = "testContainerReplication";
    OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey(keyName, 0, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
    byte[] testData = "ratis".getBytes(UTF_8);
    // First write and flush creates a container in the datanode
    key.write(testData);
    key.flush();
    KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
    List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
    Assert.assertEquals(1, locationInfoList.size());
    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
    long containerID = omKeyLocationInfo.getContainerID();
    PipelineID pipelineID = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID)).getPipelineID();
    Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(pipelineID);
    key.close();
    HddsProtos.LifeCycleState containerState = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID)).getState();
    LoggerFactory.getLogger(TestContainerReplicationEndToEnd.class).info("Current Container State is {}", containerState);
    if ((containerState != HddsProtos.LifeCycleState.CLOSING) && (containerState != HddsProtos.LifeCycleState.CLOSED)) {
        cluster.getStorageContainerManager().getContainerManager().updateContainerState(ContainerID.valueOf(containerID), HddsProtos.LifeCycleEvent.FINALIZE);
    }
    // wait for container to move to OPEN state in SCM
    Thread.sleep(2 * containerReportInterval);
    DatanodeDetails oldReplicaNode = pipeline.getFirstNode();
    // now move the container to the closed on the datanode.
    XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
    ContainerProtos.ContainerCommandRequestProto.Builder request = ContainerProtos.ContainerCommandRequestProto.newBuilder();
    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
    request.setCmdType(ContainerProtos.Type.CloseContainer);
    request.setContainerID(containerID);
    request.setCloseContainer(ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
    xceiverClient.sendCommand(request.build());
    // wait for container to move to closed state in SCM
    Thread.sleep(2 * containerReportInterval);
    Assert.assertTrue(cluster.getStorageContainerManager().getContainerInfo(containerID).getState() == HddsProtos.LifeCycleState.CLOSED);
    // shutdown the replica node
    cluster.shutdownHddsDatanode(oldReplicaNode);
    // now the container is under replicated and will be moved to a different dn
    HddsDatanodeService dnService = null;
    for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
        Predicate<DatanodeDetails> p = i -> i.getUuid().equals(dn.getDatanodeDetails().getUuid());
        if (!pipeline.getNodes().stream().anyMatch(p)) {
            dnService = dn;
        }
    }
    Assert.assertNotNull(dnService);
    final HddsDatanodeService newReplicaNode = dnService;
    // wait for the container to get replicated
    GenericTestUtils.waitFor(() -> {
        return newReplicaNode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID) != null;
    }, 500, 100000);
    Assert.assertTrue(newReplicaNode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerData().getBlockCommitSequenceId() > 0);
    // wait for SCM to update the replica Map
    Thread.sleep(5 * containerReportInterval);
    // the key again
    for (DatanodeDetails dn : pipeline.getNodes()) {
        cluster.shutdownHddsDatanode(dn);
    }
    // This will try to read the data from the dn to which the container got
    // replicated after the container got closed.
    TestHelper.validateData(keyName, testData, objectStore, volumeName, bucketName);
}
Also used : ScmConfigKeys(org.apache.hadoop.hdds.scm.ScmConfigKeys) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) DatanodeRatisServerConfig(org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig) BeforeClass(org.junit.BeforeClass) OZONE_SCM_STALENODE_INTERVAL(org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) LoggerFactory(org.slf4j.LoggerFactory) MiniOzoneCluster(org.apache.hadoop.ozone.MiniOzoneCluster) HashMap(java.util.HashMap) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Duration(java.time.Duration) OZONE_SCM_PIPELINE_DESTROY_TIMEOUT(org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT) AfterClass(org.junit.AfterClass) XceiverClientManager(org.apache.hadoop.hdds.scm.XceiverClientManager) HDDS_CONTAINER_REPORT_INTERVAL(org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL) Predicate(java.util.function.Predicate) UTF_8(java.nio.charset.StandardCharsets.UTF_8) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerID(org.apache.hadoop.hdds.scm.container.ContainerID) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test) IOException(java.io.IOException) ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) OzoneClientFactory(org.apache.hadoop.ozone.client.OzoneClientFactory) OZONE_DATANODE_PIPELINE_LIMIT(org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT) ReplicationFactor(org.apache.hadoop.hdds.client.ReplicationFactor) File(java.io.File) ReplicationType(org.apache.hadoop.hdds.client.ReplicationType) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) TestHelper(org.apache.hadoop.ozone.container.TestHelper) ReplicationManagerConfiguration(org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) Assert(org.junit.Assert) OzoneClient(org.apache.hadoop.ozone.client.OzoneClient) GenericTestUtils(org.apache.ozone.test.GenericTestUtils) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) Test(org.junit.Test)

Example 4 with KeyOutputStream

use of org.apache.hadoop.ozone.client.io.KeyOutputStream in project ozone by apache.

the class TestDiscardPreallocatedBlocks method testDiscardPreallocatedBlocks.

@Test
public void testDiscardPreallocatedBlocks() throws Exception {
    String keyName = getKeyName();
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 2 * blockSize);
    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    // With the initial size provided, it should have pre allocated 2 blocks
    Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
    long containerID1 = keyOutputStream.getStreamEntries().get(0).getBlockID().getContainerID();
    long containerID2 = keyOutputStream.getStreamEntries().get(1).getBlockID().getContainerID();
    Assert.assertEquals(containerID1, containerID2);
    String dataString = ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
    byte[] data = dataString.getBytes(UTF_8);
    key.write(data);
    List<OmKeyLocationInfo> locationInfos = new ArrayList<>(keyOutputStream.getLocationInfoList());
    List<BlockOutputStreamEntry> locationStreamInfos = new ArrayList<>(keyOutputStream.getStreamEntries());
    long containerID = locationInfos.get(0).getContainerID();
    ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
    Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
    List<DatanodeDetails> datanodes = pipeline.getNodes();
    Assert.assertEquals(3, datanodes.size());
    waitForContainerClose(key);
    dataString = ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
    data = dataString.getBytes(UTF_8);
    key.write(data);
    Assert.assertEquals(3, keyOutputStream.getStreamEntries().size());
    // the 1st block got written. Now all the containers are closed, so the 2nd
    // pre allocated block will be removed from the list and new block should
    // have been allocated
    Assert.assertTrue(keyOutputStream.getLocationInfoList().get(0).getBlockID().equals(locationInfos.get(0).getBlockID()));
    Assert.assertFalse(locationStreamInfos.get(1).getBlockID().equals(keyOutputStream.getLocationInfoList().get(1).getBlockID()));
    key.close();
}
Also used : ArrayList(java.util.ArrayList) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) BlockOutputStreamEntry(org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) Test(org.junit.Test)

Example 5 with KeyOutputStream

use of org.apache.hadoop.ozone.client.io.KeyOutputStream in project ozone by apache.

the class TestBlockOutputStreamFlushDelay method testFlushChunk.

@Test
public void testFlushChunk() throws Exception {
    XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics();
    long writeChunkCount = metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
    long putBlockCount = metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock);
    long pendingWriteChunkCount = metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
    long pendingPutBlockCount = metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock);
    long totalOpCount = metrics.getTotalOpCount();
    String keyName = getKeyName();
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
    int dataLength = flushSize;
    // write data equal to 2 chunks
    byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
    key.write(data1);
    Assert.assertEquals(pendingWriteChunkCount + 2, metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
    Assert.assertEquals(pendingPutBlockCount + 1, metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
    Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
    OutputStream stream = keyOutputStream.getStreamEntries().get(0).getOutputStream();
    Assert.assertTrue(stream instanceof BlockOutputStream);
    RatisBlockOutputStream blockOutputStream = (RatisBlockOutputStream) stream;
    // we have just written data equal flush Size = 2 chunks, at this time
    // buffer pool will have 2 buffers allocated worth of chunk size
    Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
    // writtenDataLength as well flushedDataLength will be updated here
    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
    Assert.assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength());
    Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength());
    Assert.assertEquals(0, blockOutputStream.getCommitIndex2flushedDataMap().size());
    // Now do a flush.
    key.flush();
    Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
    // The previously written data is equal to flushSize,so no action is
    // triggered when execute flush.
    Assert.assertEquals(pendingWriteChunkCount + 2, metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
    Assert.assertEquals(pendingPutBlockCount + 1, metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
    // Since the data in the buffer is already flushed, flush here will have
    // no impact on the counters and data structures
    Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
    // No action is triggered when execute flush and BlockOutputStream will not
    // be updated.
    Assert.assertEquals(dataLength, blockOutputStream.getBufferPool().computeBufferData());
    Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
    Assert.assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength());
    Assert.assertEquals(0, blockOutputStream.getCommitIndex2flushedDataMap().size());
    Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength());
    // now close the stream, It will update the ack length after watchForCommit
    key.close();
    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
    // make sure the bufferPool is empty
    Assert.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
    Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
    Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
    Assert.assertEquals(pendingWriteChunkCount, metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
    Assert.assertEquals(pendingPutBlockCount, metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
    Assert.assertEquals(writeChunkCount + 2, metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
    Assert.assertEquals(putBlockCount + 2, metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
    Assert.assertEquals(totalOpCount + 4, metrics.getTotalOpCount());
    Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
    validateData(keyName, data1);
}
Also used : XceiverClientMetrics(org.apache.hadoop.hdds.scm.XceiverClientMetrics) RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream) OutputStream(java.io.OutputStream) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream) BlockOutputStream(org.apache.hadoop.hdds.scm.storage.BlockOutputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) RatisBlockOutputStream(org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream) Test(org.junit.Test)

Aggregations

KeyOutputStream (org.apache.hadoop.ozone.client.io.KeyOutputStream)69 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)67 Test (org.junit.Test)57 OutputStream (java.io.OutputStream)32 BlockOutputStream (org.apache.hadoop.hdds.scm.storage.BlockOutputStream)32 RatisBlockOutputStream (org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream)29 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)27 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)21 XceiverClientRatis (org.apache.hadoop.hdds.scm.XceiverClientRatis)19 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)18 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)18 ContainerNotOpenException (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException)14 IOException (java.io.IOException)12 XceiverClientMetrics (org.apache.hadoop.hdds.scm.XceiverClientMetrics)12 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)12 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)10 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)10 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)9 BlockOutputStreamEntry (org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry)9 File (java.io.File)8