use of org.apache.hadoop.ozone.client.io.KeyOutputStream in project ozone by apache.
the class RpcClient method createMultipartKey.
@Override
public OzoneOutputStream createMultipartKey(String volumeName, String bucketName, String keyName, long size, int partNumber, String uploadID) throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
if (checkKeyNameEnabled) {
HddsClientUtils.verifyKeyName(keyName);
}
HddsClientUtils.checkNotNull(keyName, uploadID);
Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000, "Part " + "number should be greater than zero and less than or equal to 10000");
Preconditions.checkArgument(size >= 0, "size should be greater than or " + "equal to zero");
String requestId = UUID.randomUUID().toString();
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setDataSize(size).setIsMultipartKey(true).setMultipartUploadID(uploadID).setMultipartUploadPartNumber(partNumber).setAcls(getAclList()).build();
OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
KeyOutputStream keyOutputStream = new KeyOutputStream.Builder().setHandler(openKey).setXceiverClientManager(xceiverClientManager).setOmClient(ozoneManagerClient).setRequestID(requestId).setReplicationConfig(openKey.getKeyInfo().getReplicationConfig()).setMultipartNumber(partNumber).setMultipartUploadID(uploadID).setIsMultipartKey(true).enableUnsafeByteBufferConversion(unsafeByteBufferConversion).setConfig(clientConfig).build();
keyOutputStream.addPreallocateBlocks(openKey.getKeyInfo().getLatestVersionLocations(), openKey.getOpenVersion());
FileEncryptionInfo feInfo = openKey.getKeyInfo().getFileEncryptionInfo();
if (feInfo != null) {
KeyProvider.KeyVersion decrypted = getDEK(feInfo);
final CryptoOutputStream cryptoOut = new CryptoOutputStream(keyOutputStream, OzoneKMSUtil.getCryptoCodec(conf, feInfo), decrypted.getMaterial(), feInfo.getIV());
return new OzoneOutputStream(cryptoOut);
} else {
return new OzoneOutputStream(keyOutputStream);
}
}
use of org.apache.hadoop.ozone.client.io.KeyOutputStream in project ozone by apache.
the class TestObjectStoreWithFSO method testLookupKey.
@Test
public void testLookupKey() throws Exception {
String parent = "a/b/c/";
String fileName = "key" + RandomStringUtils.randomNumeric(5);
String key = parent + fileName;
OzoneClient client = cluster.getClient();
ObjectStore objectStore = client.getObjectStore();
OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
Table<String, OmKeyInfo> openFileTable = cluster.getOzoneManager().getMetadataManager().getOpenKeyTable(getBucketLayout());
String data = "random data";
OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key, data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
KeyOutputStream keyOutputStream = (KeyOutputStream) ozoneOutputStream.getOutputStream();
long clientID = keyOutputStream.getClientID();
OmDirectoryInfo dirPathC = getDirInfo(parent);
Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
// after file creation
verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), false);
ozoneOutputStream.write(data.getBytes(StandardCharsets.UTF_8), 0, data.length());
// open key
try {
ozoneBucket.getKey(key);
fail("Should throw exception as fileName is not visible and its still " + "open for writing!");
} catch (OMException ome) {
// expected
assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
}
ozoneOutputStream.close();
OzoneKeyDetails keyDetails = ozoneBucket.getKey(key);
Assert.assertEquals(key, keyDetails.getName());
Table<String, OmKeyInfo> fileTable = cluster.getOzoneManager().getMetadataManager().getKeyTable(getBucketLayout());
// When closing the key, entry should be removed from openFileTable
// and it should be added to fileTable.
verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), false);
verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), true);
ozoneBucket.deleteKey(key);
// get deleted key
try {
ozoneBucket.getKey(key);
fail("Should throw exception as fileName not exists!");
} catch (OMException ome) {
// expected
assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
}
// after key delete
verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), true);
verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), true);
}
use of org.apache.hadoop.ozone.client.io.KeyOutputStream in project ozone by apache.
the class TestContainerReplicationEndToEnd method testContainerReplication.
/**
* The test simulates end to end container replication.
*/
@Test
public void testContainerReplication() throws Exception {
String keyName = "testContainerReplication";
OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName).createKey(keyName, 0, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
byte[] testData = "ratis".getBytes(UTF_8);
// First write and flush creates a container in the datanode
key.write(testData);
key.flush();
KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream();
List<OmKeyLocationInfo> locationInfoList = groupOutputStream.getLocationInfoList();
Assert.assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
long containerID = omKeyLocationInfo.getContainerID();
PipelineID pipelineID = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID)).getPipelineID();
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(pipelineID);
key.close();
HddsProtos.LifeCycleState containerState = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID)).getState();
LoggerFactory.getLogger(TestContainerReplicationEndToEnd.class).info("Current Container State is {}", containerState);
if ((containerState != HddsProtos.LifeCycleState.CLOSING) && (containerState != HddsProtos.LifeCycleState.CLOSED)) {
cluster.getStorageContainerManager().getContainerManager().updateContainerState(ContainerID.valueOf(containerID), HddsProtos.LifeCycleEvent.FINALIZE);
}
// wait for container to move to OPEN state in SCM
Thread.sleep(2 * containerReportInterval);
DatanodeDetails oldReplicaNode = pipeline.getFirstNode();
// now move the container to the closed on the datanode.
XceiverClientSpi xceiverClient = xceiverClientManager.acquireClient(pipeline);
ContainerProtos.ContainerCommandRequestProto.Builder request = ContainerProtos.ContainerCommandRequestProto.newBuilder();
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
request.setCmdType(ContainerProtos.Type.CloseContainer);
request.setContainerID(containerID);
request.setCloseContainer(ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
xceiverClient.sendCommand(request.build());
// wait for container to move to closed state in SCM
Thread.sleep(2 * containerReportInterval);
Assert.assertTrue(cluster.getStorageContainerManager().getContainerInfo(containerID).getState() == HddsProtos.LifeCycleState.CLOSED);
// shutdown the replica node
cluster.shutdownHddsDatanode(oldReplicaNode);
// now the container is under replicated and will be moved to a different dn
HddsDatanodeService dnService = null;
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
Predicate<DatanodeDetails> p = i -> i.getUuid().equals(dn.getDatanodeDetails().getUuid());
if (!pipeline.getNodes().stream().anyMatch(p)) {
dnService = dn;
}
}
Assert.assertNotNull(dnService);
final HddsDatanodeService newReplicaNode = dnService;
// wait for the container to get replicated
GenericTestUtils.waitFor(() -> {
return newReplicaNode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID) != null;
}, 500, 100000);
Assert.assertTrue(newReplicaNode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerData().getBlockCommitSequenceId() > 0);
// wait for SCM to update the replica Map
Thread.sleep(5 * containerReportInterval);
// the key again
for (DatanodeDetails dn : pipeline.getNodes()) {
cluster.shutdownHddsDatanode(dn);
}
// This will try to read the data from the dn to which the container got
// replicated after the container got closed.
TestHelper.validateData(keyName, testData, objectStore, volumeName, bucketName);
}
use of org.apache.hadoop.ozone.client.io.KeyOutputStream in project ozone by apache.
the class TestDiscardPreallocatedBlocks method testDiscardPreallocatedBlocks.
@Test
public void testDiscardPreallocatedBlocks() throws Exception {
String keyName = getKeyName();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 2 * blockSize);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
// With the initial size provided, it should have pre allocated 2 blocks
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
long containerID1 = keyOutputStream.getStreamEntries().get(0).getBlockID().getContainerID();
long containerID2 = keyOutputStream.getStreamEntries().get(1).getBlockID().getContainerID();
Assert.assertEquals(containerID1, containerID2);
String dataString = ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
byte[] data = dataString.getBytes(UTF_8);
key.write(data);
List<OmKeyLocationInfo> locationInfos = new ArrayList<>(keyOutputStream.getLocationInfoList());
List<BlockOutputStreamEntry> locationStreamInfos = new ArrayList<>(keyOutputStream.getStreamEntries());
long containerID = locationInfos.get(0).getContainerID();
ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
Assert.assertEquals(3, datanodes.size());
waitForContainerClose(key);
dataString = ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
data = dataString.getBytes(UTF_8);
key.write(data);
Assert.assertEquals(3, keyOutputStream.getStreamEntries().size());
// the 1st block got written. Now all the containers are closed, so the 2nd
// pre allocated block will be removed from the list and new block should
// have been allocated
Assert.assertTrue(keyOutputStream.getLocationInfoList().get(0).getBlockID().equals(locationInfos.get(0).getBlockID()));
Assert.assertFalse(locationStreamInfos.get(1).getBlockID().equals(keyOutputStream.getLocationInfoList().get(1).getBlockID()));
key.close();
}
use of org.apache.hadoop.ozone.client.io.KeyOutputStream in project ozone by apache.
the class TestBlockOutputStreamFlushDelay method testFlushChunk.
@Test
public void testFlushChunk() throws Exception {
XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics();
long writeChunkCount = metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
long putBlockCount = metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock);
long pendingWriteChunkCount = metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
long pendingPutBlockCount = metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock);
long totalOpCount = metrics.getTotalOpCount();
String keyName = getKeyName();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
int dataLength = flushSize;
// write data equal to 2 chunks
byte[] data1 = ContainerTestHelper.getFixedLengthString(keyString, dataLength).getBytes(UTF_8);
key.write(data1);
Assert.assertEquals(pendingWriteChunkCount + 2, metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
Assert.assertEquals(pendingPutBlockCount + 1, metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
OutputStream stream = keyOutputStream.getStreamEntries().get(0).getOutputStream();
Assert.assertTrue(stream instanceof BlockOutputStream);
RatisBlockOutputStream blockOutputStream = (RatisBlockOutputStream) stream;
// we have just written data equal flush Size = 2 chunks, at this time
// buffer pool will have 2 buffers allocated worth of chunk size
Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
// writtenDataLength as well flushedDataLength will be updated here
Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
Assert.assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength());
Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength());
Assert.assertEquals(0, blockOutputStream.getCommitIndex2flushedDataMap().size());
// Now do a flush.
key.flush();
Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
// The previously written data is equal to flushSize,so no action is
// triggered when execute flush.
Assert.assertEquals(pendingWriteChunkCount + 2, metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
Assert.assertEquals(pendingPutBlockCount + 1, metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
// Since the data in the buffer is already flushed, flush here will have
// no impact on the counters and data structures
Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize());
// No action is triggered when execute flush and BlockOutputStream will not
// be updated.
Assert.assertEquals(dataLength, blockOutputStream.getBufferPool().computeBufferData());
Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength());
Assert.assertEquals(dataLength, blockOutputStream.getTotalDataFlushedLength());
Assert.assertEquals(0, blockOutputStream.getCommitIndex2flushedDataMap().size());
Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength());
// now close the stream, It will update the ack length after watchForCommit
key.close();
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
// make sure the bufferPool is empty
Assert.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
Assert.assertEquals(pendingWriteChunkCount, metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
Assert.assertEquals(pendingPutBlockCount, metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
Assert.assertEquals(writeChunkCount + 2, metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
Assert.assertEquals(putBlockCount + 2, metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
Assert.assertEquals(totalOpCount + 4, metrics.getTotalOpCount());
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
validateData(keyName, data1);
}
Aggregations