use of org.apache.hadoop.hdds.client.ReplicationConfig in project ozone by apache.
the class PutKeyHandler method execute.
@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException {
String volumeName = address.getVolumeName();
String bucketName = address.getBucketName();
String keyName = address.getKeyName();
File dataFile = new File(fileName);
if (isVerbose()) {
try (InputStream stream = new FileInputStream(dataFile)) {
String hash = DigestUtils.md5Hex(stream);
out().printf("File Hash : %s%n", hash);
}
}
ReplicationConfig replicationConfig = ReplicationConfig.parse(replicationType, replication, getConf());
OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
OzoneBucket bucket = vol.getBucket(bucketName);
Map<String, String> keyMetadata = new HashMap<>();
String gdprEnabled = bucket.getMetadata().get(OzoneConsts.GDPR_FLAG);
if (Boolean.parseBoolean(gdprEnabled)) {
keyMetadata.put(OzoneConsts.GDPR_FLAG, Boolean.TRUE.toString());
}
int chunkSize = (int) getConf().getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY, OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES);
try (InputStream input = new FileInputStream(dataFile);
OutputStream output = bucket.createKey(keyName, dataFile.length(), replicationConfig, keyMetadata)) {
IOUtils.copyBytes(input, output, chunkSize);
}
}
use of org.apache.hadoop.hdds.client.ReplicationConfig in project ozone by apache.
the class ContainerInfo method fromProtobuf.
public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) {
ContainerInfo.Builder builder = new ContainerInfo.Builder();
final ReplicationConfig config = ReplicationConfig.fromProtoTypeAndFactor(info.getReplicationType(), info.getReplicationFactor());
builder.setUsedBytes(info.getUsedBytes()).setNumberOfKeys(info.getNumberOfKeys()).setState(info.getState()).setStateEnterTime(info.getStateEnterTime()).setOwner(info.getOwner()).setContainerID(info.getContainerID()).setDeleteTransactionId(info.getDeleteTransactionId()).setReplicationConfig(config).setSequenceId(info.getSequenceId()).build();
if (info.hasPipelineID()) {
builder.setPipelineID(PipelineID.getFromProtobuf(info.getPipelineID()));
}
return builder.build();
}
use of org.apache.hadoop.hdds.client.ReplicationConfig in project ozone by apache.
the class SCMUpgradeFinalizer method postFinalizeUpgrade.
public void postFinalizeUpgrade(StorageContainerManager scm) throws IOException {
// Don 't wait for next heartbeat from datanodes in order to move them to
// Healthy - Readonly state. Force them to Healthy ReadOnly state so that
// we can resume pipeline creation right away.
scm.getScmNodeManager().forceNodesToHealthyReadOnly();
PipelineManager pipelineManager = scm.getPipelineManager();
pipelineManager.resumePipelineCreation();
// Wait for at least one pipeline to be created before finishing
// finalization, so clients can write.
boolean hasPipeline = false;
while (!hasPipeline) {
ReplicationConfig ratisThree = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
int pipelineCount = pipelineManager.getPipelines(ratisThree, Pipeline.PipelineState.OPEN).size();
hasPipeline = (pipelineCount >= 1);
if (!hasPipeline) {
LOG.info("Waiting for at least one open pipeline after SCM " + "finalization.");
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
// Try again on next loop iteration.
Thread.currentThread().interrupt();
}
} else {
LOG.info("Open pipeline found after SCM finalization");
}
}
emitFinishedMsg();
}
use of org.apache.hadoop.hdds.client.ReplicationConfig in project ozone by apache.
the class TestOzoneRpcClientAbstract method createRequiredForVersioningTest.
private void createRequiredForVersioningTest(String volumeName, String bucketName, String keyName, boolean versioning) throws Exception {
ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
// Bucket created with versioning false.
volume.createBucket(bucketName, BucketArgs.newBuilder().setVersioning(versioning).build());
OzoneBucket bucket = volume.getBucket(bucketName);
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, replicationConfig, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
// Override key
out = bucket.createKey(keyName, value.getBytes(UTF_8).length, replicationConfig, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
}
use of org.apache.hadoop.hdds.client.ReplicationConfig in project ozone by apache.
the class S3InitiateMultipartUploadRequestWithFSO method validateAndUpdateCache.
@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex, OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest();
KeyArgs keyArgs = multipartInfoInitiateRequest.getKeyArgs();
Preconditions.checkNotNull(keyArgs.getMultipartUploadID());
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
String volumeName = keyArgs.getVolumeName();
String bucketName = keyArgs.getBucketName();
final String requestedVolume = volumeName;
final String requestedBucket = bucketName;
String keyName = keyArgs.getKeyName();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
ozoneManager.getMetrics().incNumInitiateMultipartUploads();
boolean acquiredBucketLock = false;
IOException exception = null;
OmMultipartKeyInfo multipartKeyInfo = null;
OmKeyInfo omKeyInfo = null;
List<OmDirectoryInfo> missingParentInfos;
Result result = null;
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
OMClientResponse omClientResponse = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
bucketName = keyArgs.getBucketName();
// TODO to support S3 ACL later.
acquiredBucketLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName, keyName, Paths.get(keyName));
// check if the directory already existed in OM
checkDirectoryResult(keyName, pathInfoFSO.getDirectoryResult());
// add all missing parents to dir table
missingParentInfos = OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo(ozoneManager, keyArgs, pathInfoFSO, transactionLogIndex);
// We are adding uploadId to key, because if multiple users try to
// perform multipart upload on the same key, each will try to upload, who
// ever finally commit the key, we see that key in ozone. Suppose if we
// don't add id, and use the same key /volume/bucket/key, when multiple
// users try to upload the key, we update the parts of the key's from
// multiple users to same key, and the key output can be a mix of the
// parts from multiple users.
// So on same key if multiple time multipart upload is initiated we
// store multiple entries in the openKey Table.
// Checked AWS S3, when we try to run multipart upload, each time a
// new uploadId is returned. And also even if a key exist when initiate
// multipart upload request is received, it returns multipart upload id
// for the key.
String multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, keyArgs.getMultipartUploadID());
String multipartOpenKey = omMetadataManager.getMultipartKey(pathInfoFSO.getLastKnownParentId(), pathInfoFSO.getLeafNodeName(), keyArgs.getMultipartUploadID());
// Even if this key already exists in the KeyTable, it would be taken
// care of in the final complete multipart upload. AWS S3 behavior is
// also like this, even when key exists in a bucket, user can still
// initiate MPU.
final ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor());
multipartKeyInfo = new OmMultipartKeyInfo.Builder().setUploadID(keyArgs.getMultipartUploadID()).setCreationTime(keyArgs.getModificationTime()).setReplicationConfig(replicationConfig).setObjectID(pathInfoFSO.getLeafNodeObjectId()).setUpdateID(transactionLogIndex).setParentID(pathInfoFSO.getLastKnownParentId()).build();
omKeyInfo = new OmKeyInfo.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyArgs.getKeyName()).setCreationTime(keyArgs.getModificationTime()).setModificationTime(keyArgs.getModificationTime()).setReplicationConfig(replicationConfig).setOmKeyLocationInfos(Collections.singletonList(new OmKeyLocationInfoGroup(0, new ArrayList<>()))).setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())).setObjectID(pathInfoFSO.getLeafNodeObjectId()).setUpdateID(transactionLogIndex).setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null).setParentObjectID(pathInfoFSO.getLastKnownParentId()).build();
// Add cache entries for the prefix directories.
// Skip adding for the file key itself, until Key Commit.
OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, Optional.absent(), Optional.of(missingParentInfos), transactionLogIndex);
OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(), transactionLogIndex);
// Add to cache
omMetadataManager.getMultipartInfoTable().addCacheEntry(new CacheKey<>(multipartKey), new CacheValue<>(Optional.of(multipartKeyInfo), transactionLogIndex));
omClientResponse = new S3InitiateMultipartUploadResponseWithFSO(omResponse.setInitiateMultiPartUploadResponse(MultipartInfoInitiateResponse.newBuilder().setVolumeName(requestedVolume).setBucketName(requestedBucket).setKeyName(keyName).setMultipartUploadID(keyArgs.getMultipartUploadID())).build(), multipartKeyInfo, omKeyInfo, multipartKey, missingParentInfos, getBucketLayout());
result = Result.SUCCESS;
} catch (IOException ex) {
result = Result.FAILURE;
exception = ex;
omClientResponse = new S3InitiateMultipartUploadResponseWithFSO(createErrorOMResponse(omResponse, exception), getBucketLayout());
} finally {
addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper);
if (acquiredBucketLock) {
omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
}
logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName, bucketName, keyName, exception, result);
return omClientResponse;
}
Aggregations