use of org.apache.hadoop.ozone.client.io.OzoneOutputStream in project ozone by apache.
the class RpcClient method createMultipartKey.
@Override
public OzoneOutputStream createMultipartKey(String volumeName, String bucketName, String keyName, long size, int partNumber, String uploadID) throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
if (checkKeyNameEnabled) {
HddsClientUtils.verifyKeyName(keyName);
}
HddsClientUtils.checkNotNull(keyName, uploadID);
Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000, "Part " + "number should be greater than zero and less than or equal to 10000");
Preconditions.checkArgument(size >= 0, "size should be greater than or " + "equal to zero");
String requestId = UUID.randomUUID().toString();
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setDataSize(size).setIsMultipartKey(true).setMultipartUploadID(uploadID).setMultipartUploadPartNumber(partNumber).setAcls(getAclList()).build();
OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
KeyOutputStream keyOutputStream = new KeyOutputStream.Builder().setHandler(openKey).setXceiverClientManager(xceiverClientManager).setOmClient(ozoneManagerClient).setRequestID(requestId).setReplicationConfig(openKey.getKeyInfo().getReplicationConfig()).setMultipartNumber(partNumber).setMultipartUploadID(uploadID).setIsMultipartKey(true).enableUnsafeByteBufferConversion(unsafeByteBufferConversion).setConfig(clientConfig).build();
keyOutputStream.addPreallocateBlocks(openKey.getKeyInfo().getLatestVersionLocations(), openKey.getOpenVersion());
FileEncryptionInfo feInfo = openKey.getKeyInfo().getFileEncryptionInfo();
if (feInfo != null) {
KeyProvider.KeyVersion decrypted = getDEK(feInfo);
final CryptoOutputStream cryptoOut = new CryptoOutputStream(keyOutputStream, OzoneKMSUtil.getCryptoCodec(conf, feInfo), decrypted.getMaterial(), feInfo.getIV());
return new OzoneOutputStream(cryptoOut);
} else {
return new OzoneOutputStream(keyOutputStream);
}
}
use of org.apache.hadoop.ozone.client.io.OzoneOutputStream in project ozone by apache.
the class TestOzoneClient method testPutKeyRatisOneNode.
@Test
public void testPutKeyRatisOneNode() throws IOException {
Instant testStartTime = Instant.now();
String value = "sample value";
OzoneBucket bucket = getOzoneBucket();
for (int i = 0; i < 10; i++) {
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
Assert.assertEquals(value.length(), is.read(fileContent));
is.close();
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
}
}
use of org.apache.hadoop.ozone.client.io.OzoneOutputStream in project ozone by apache.
the class TestReconWithOzoneManagerHA method testReconGetsSnapshotFromLeader.
@Test
public void testReconGetsSnapshotFromLeader() throws Exception {
AtomicReference<OzoneManager> ozoneManager = new AtomicReference<>();
// Wait for OM leader election to finish
GenericTestUtils.waitFor(() -> {
OzoneManager om = cluster.getOMLeader();
ozoneManager.set(om);
return om != null;
}, 100, 120000);
Assert.assertNotNull("Timed out waiting OM leader election to finish: " + "no leader or more than one leader.", ozoneManager);
Assert.assertTrue("Should have gotten the leader!", ozoneManager.get().isLeaderReady());
OzoneManagerServiceProviderImpl impl = (OzoneManagerServiceProviderImpl) cluster.getReconServer().getOzoneManagerServiceProvider();
String hostname = ozoneManager.get().getHttpServer().getHttpAddress().getHostName();
String expectedUrl = "http://" + (hostname.equals("0.0.0.0") ? "localhost" : hostname) + ":" + ozoneManager.get().getHttpServer().getHttpAddress().getPort() + OZONE_DB_CHECKPOINT_HTTP_ENDPOINT;
String snapshotUrl = impl.getOzoneManagerSnapshotUrl();
Assert.assertEquals("OM Snapshot should be requested from the leader.", expectedUrl, snapshotUrl);
// Write some data
String keyPrefix = "ratis";
OzoneOutputStream key = objectStore.getVolume(VOL_NAME).getBucket(VOL_NAME).createKey(keyPrefix, 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
key.write(keyPrefix.getBytes(UTF_8));
key.flush();
key.close();
// Sync data to Recon
impl.syncDataFromOM();
ReconContainerMetadataManager reconContainerMetadataManager = cluster.getReconServer().getReconContainerMetadataManager();
TableIterator iterator = reconContainerMetadataManager.getContainerTableIterator();
String reconKeyPrefix = null;
while (iterator.hasNext()) {
Table.KeyValue<ContainerKeyPrefix, Integer> keyValue = (Table.KeyValue<ContainerKeyPrefix, Integer>) iterator.next();
reconKeyPrefix = keyValue.getKey().getKeyPrefix();
}
Assert.assertEquals("Container data should be synced to recon.", String.format("/%s/%s/%s", VOL_NAME, VOL_NAME, keyPrefix), reconKeyPrefix);
}
use of org.apache.hadoop.ozone.client.io.OzoneOutputStream in project ozone by apache.
the class TestStorageContainerManagerHA method testPutKey.
public void testPutKey() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
Instant testStartTime = Instant.now();
ObjectStore store = OzoneClientFactory.getRpcClient(cluster.getConf()).getObjectStore();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
is.read(fileContent);
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
is.close();
final OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
final OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
final List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
long index = -1;
for (StorageContainerManager scm : cluster.getStorageContainerManagers()) {
if (scm.checkLeader()) {
index = getLastAppliedIndex(scm);
}
}
Assert.assertFalse(index == -1);
long finalIndex = index;
// Ensure all follower scms have caught up with the leader
GenericTestUtils.waitFor(() -> areAllScmInSync(finalIndex), 100, 10000);
final long containerID = keyLocationInfos.get(0).getContainerID();
for (int k = 0; k < numOfSCMs; k++) {
StorageContainerManager scm = cluster.getStorageContainerManagers().get(k);
// flush to DB on each SCM
((SCMRatisServerImpl) scm.getScmHAManager().getRatisServer()).getStateMachine().takeSnapshot();
Assert.assertTrue(scm.getContainerManager().containerExist(ContainerID.valueOf(containerID)));
Assert.assertNotNull(scm.getScmMetadataStore().getContainerTable().get(ContainerID.valueOf(containerID)));
}
}
use of org.apache.hadoop.ozone.client.io.OzoneOutputStream in project ozone by apache.
the class TestOzoneManagerHA method createKeyTest.
protected void createKeyTest(boolean checkSuccess) throws Exception {
String userName = "user" + RandomStringUtils.randomNumeric(5);
String adminName = "admin" + RandomStringUtils.randomNumeric(5);
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
VolumeArgs createVolumeArgs = VolumeArgs.newBuilder().setOwner(userName).setAdmin(adminName).build();
try {
getObjectStore().createVolume(volumeName, createVolumeArgs);
OzoneVolume retVolumeinfo = getObjectStore().getVolume(volumeName);
Assert.assertTrue(retVolumeinfo.getName().equals(volumeName));
Assert.assertTrue(retVolumeinfo.getOwner().equals(userName));
Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName));
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
retVolumeinfo.createBucket(bucketName);
OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
Assert.assertTrue(ozoneBucket.getVolumeName().equals(volumeName));
String value = "random data";
OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, value.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length());
ozoneOutputStream.close();
OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
ozoneInputStream.read(fileContent);
Assert.assertEquals(value, new String(fileContent, UTF_8));
} catch (ConnectException | RemoteException e) {
if (!checkSuccess) {
// last running OM as it would fail to get a quorum.
if (e instanceof RemoteException) {
GenericTestUtils.assertExceptionContains("OMNotLeaderException", e);
}
} else {
throw e;
}
}
}
Aggregations