Search in sources :

Example 1 with ReplicationConfig

use of org.apache.hadoop.hdds.client.ReplicationConfig in project ozone by apache.

the class PutKeyHandler method execute.

@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException {
    String volumeName = address.getVolumeName();
    String bucketName = address.getBucketName();
    String keyName = address.getKeyName();
    File dataFile = new File(fileName);
    if (isVerbose()) {
        try (InputStream stream = new FileInputStream(dataFile)) {
            String hash = DigestUtils.md5Hex(stream);
            out().printf("File Hash : %s%n", hash);
        }
    }
    ReplicationConfig replicationConfig = ReplicationConfig.parse(replicationType, replication, getConf());
    OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
    OzoneBucket bucket = vol.getBucket(bucketName);
    Map<String, String> keyMetadata = new HashMap<>();
    String gdprEnabled = bucket.getMetadata().get(OzoneConsts.GDPR_FLAG);
    if (Boolean.parseBoolean(gdprEnabled)) {
        keyMetadata.put(OzoneConsts.GDPR_FLAG, Boolean.TRUE.toString());
    }
    int chunkSize = (int) getConf().getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY, OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES);
    try (InputStream input = new FileInputStream(dataFile);
        OutputStream output = bucket.createKey(keyName, dataFile.length(), replicationConfig, keyMetadata)) {
        IOUtils.copyBytes(input, output, chunkSize);
    }
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) HashMap(java.util.HashMap) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) OutputStream(java.io.OutputStream) File(java.io.File) FileInputStream(java.io.FileInputStream)

Example 2 with ReplicationConfig

use of org.apache.hadoop.hdds.client.ReplicationConfig in project ozone by apache.

the class ContainerInfo method fromProtobuf.

public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) {
    ContainerInfo.Builder builder = new ContainerInfo.Builder();
    final ReplicationConfig config = ReplicationConfig.fromProtoTypeAndFactor(info.getReplicationType(), info.getReplicationFactor());
    builder.setUsedBytes(info.getUsedBytes()).setNumberOfKeys(info.getNumberOfKeys()).setState(info.getState()).setStateEnterTime(info.getStateEnterTime()).setOwner(info.getOwner()).setContainerID(info.getContainerID()).setDeleteTransactionId(info.getDeleteTransactionId()).setReplicationConfig(config).setSequenceId(info.getSequenceId()).build();
    if (info.hasPipelineID()) {
        builder.setPipelineID(PipelineID.getFromProtobuf(info.getPipelineID()));
    }
    return builder.build();
}
Also used : ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) HashCodeBuilder(org.apache.commons.lang3.builder.HashCodeBuilder) EqualsBuilder(org.apache.commons.lang3.builder.EqualsBuilder)

Example 3 with ReplicationConfig

use of org.apache.hadoop.hdds.client.ReplicationConfig in project ozone by apache.

the class SCMUpgradeFinalizer method postFinalizeUpgrade.

public void postFinalizeUpgrade(StorageContainerManager scm) throws IOException {
    // Don 't wait for next heartbeat from datanodes in order to move them to
    // Healthy - Readonly state. Force them to Healthy ReadOnly state so that
    // we can resume pipeline creation right away.
    scm.getScmNodeManager().forceNodesToHealthyReadOnly();
    PipelineManager pipelineManager = scm.getPipelineManager();
    pipelineManager.resumePipelineCreation();
    // Wait for at least one pipeline to be created before finishing
    // finalization, so clients can write.
    boolean hasPipeline = false;
    while (!hasPipeline) {
        ReplicationConfig ratisThree = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
        int pipelineCount = pipelineManager.getPipelines(ratisThree, Pipeline.PipelineState.OPEN).size();
        hasPipeline = (pipelineCount >= 1);
        if (!hasPipeline) {
            LOG.info("Waiting for at least one open pipeline after SCM " + "finalization.");
            try {
                Thread.sleep(5000);
            } catch (InterruptedException e) {
                // Try again on next loop iteration.
                Thread.currentThread().interrupt();
            }
        } else {
            LOG.info("Open pipeline found after SCM finalization");
        }
    }
    emitFinishedMsg();
}
Also used : ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) PipelineManager(org.apache.hadoop.hdds.scm.pipeline.PipelineManager)

Example 4 with ReplicationConfig

use of org.apache.hadoop.hdds.client.ReplicationConfig in project ozone by apache.

the class TestOzoneRpcClientAbstract method createRequiredForVersioningTest.

private void createRequiredForVersioningTest(String volumeName, String bucketName, String keyName, boolean versioning) throws Exception {
    ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    // Bucket created with versioning false.
    volume.createBucket(bucketName, BucketArgs.newBuilder().setVersioning(versioning).build());
    OzoneBucket bucket = volume.getBucket(bucketName);
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, replicationConfig, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    // Override key
    out = bucket.createKey(keyName, value.getBytes(UTF_8).length, replicationConfig, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream)

Example 5 with ReplicationConfig

use of org.apache.hadoop.hdds.client.ReplicationConfig in project ozone by apache.

the class S3InitiateMultipartUploadRequestWithFSO method validateAndUpdateCache.

@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex, OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
    MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest();
    KeyArgs keyArgs = multipartInfoInitiateRequest.getKeyArgs();
    Preconditions.checkNotNull(keyArgs.getMultipartUploadID());
    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
    String volumeName = keyArgs.getVolumeName();
    String bucketName = keyArgs.getBucketName();
    final String requestedVolume = volumeName;
    final String requestedBucket = bucketName;
    String keyName = keyArgs.getKeyName();
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    ozoneManager.getMetrics().incNumInitiateMultipartUploads();
    boolean acquiredBucketLock = false;
    IOException exception = null;
    OmMultipartKeyInfo multipartKeyInfo = null;
    OmKeyInfo omKeyInfo = null;
    List<OmDirectoryInfo> missingParentInfos;
    Result result = null;
    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
    OMClientResponse omClientResponse = null;
    try {
        keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
        volumeName = keyArgs.getVolumeName();
        bucketName = keyArgs.getBucketName();
        // TODO to support S3 ACL later.
        acquiredBucketLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
        validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
        OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName, keyName, Paths.get(keyName));
        // check if the directory already existed in OM
        checkDirectoryResult(keyName, pathInfoFSO.getDirectoryResult());
        // add all missing parents to dir table
        missingParentInfos = OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo(ozoneManager, keyArgs, pathInfoFSO, transactionLogIndex);
        // We are adding uploadId to key, because if multiple users try to
        // perform multipart upload on the same key, each will try to upload, who
        // ever finally commit the key, we see that key in ozone. Suppose if we
        // don't add id, and use the same key /volume/bucket/key, when multiple
        // users try to upload the key, we update the parts of the key's from
        // multiple users to same key, and the key output can be a mix of the
        // parts from multiple users.
        // So on same key if multiple time multipart upload is initiated we
        // store multiple entries in the openKey Table.
        // Checked AWS S3, when we try to run multipart upload, each time a
        // new uploadId is returned. And also even if a key exist when initiate
        // multipart upload request is received, it returns multipart upload id
        // for the key.
        String multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, keyArgs.getMultipartUploadID());
        String multipartOpenKey = omMetadataManager.getMultipartKey(pathInfoFSO.getLastKnownParentId(), pathInfoFSO.getLeafNodeName(), keyArgs.getMultipartUploadID());
        // Even if this key already exists in the KeyTable, it would be taken
        // care of in the final complete multipart upload. AWS S3 behavior is
        // also like this, even when key exists in a bucket, user can still
        // initiate MPU.
        final ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor());
        multipartKeyInfo = new OmMultipartKeyInfo.Builder().setUploadID(keyArgs.getMultipartUploadID()).setCreationTime(keyArgs.getModificationTime()).setReplicationConfig(replicationConfig).setObjectID(pathInfoFSO.getLeafNodeObjectId()).setUpdateID(transactionLogIndex).setParentID(pathInfoFSO.getLastKnownParentId()).build();
        omKeyInfo = new OmKeyInfo.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyArgs.getKeyName()).setCreationTime(keyArgs.getModificationTime()).setModificationTime(keyArgs.getModificationTime()).setReplicationConfig(replicationConfig).setOmKeyLocationInfos(Collections.singletonList(new OmKeyLocationInfoGroup(0, new ArrayList<>()))).setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())).setObjectID(pathInfoFSO.getLeafNodeObjectId()).setUpdateID(transactionLogIndex).setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null).setParentObjectID(pathInfoFSO.getLastKnownParentId()).build();
        // Add cache entries for the prefix directories.
        // Skip adding for the file key itself, until Key Commit.
        OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, Optional.absent(), Optional.of(missingParentInfos), transactionLogIndex);
        OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(), transactionLogIndex);
        // Add to cache
        omMetadataManager.getMultipartInfoTable().addCacheEntry(new CacheKey<>(multipartKey), new CacheValue<>(Optional.of(multipartKeyInfo), transactionLogIndex));
        omClientResponse = new S3InitiateMultipartUploadResponseWithFSO(omResponse.setInitiateMultiPartUploadResponse(MultipartInfoInitiateResponse.newBuilder().setVolumeName(requestedVolume).setBucketName(requestedBucket).setKeyName(keyName).setMultipartUploadID(keyArgs.getMultipartUploadID())).build(), multipartKeyInfo, omKeyInfo, multipartKey, missingParentInfos, getBucketLayout());
        result = Result.SUCCESS;
    } catch (IOException ex) {
        result = Result.FAILURE;
        exception = ex;
        omClientResponse = new S3InitiateMultipartUploadResponseWithFSO(createErrorOMResponse(omResponse, exception), getBucketLayout());
    } finally {
        addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper);
        if (acquiredBucketLock) {
            omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
        }
    }
    logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName, bucketName, keyName, exception, result);
    return omClientResponse;
}
Also used : OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) ArrayList(java.util.ArrayList) KeyArgs(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs) IOException(java.io.IOException) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) OmMultipartKeyInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo) OMFileRequest(org.apache.hadoop.ozone.om.request.file.OMFileRequest) S3InitiateMultipartUploadResponseWithFSO(org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponseWithFSO) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OmDirectoryInfo(org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo) MultipartInfoInitiateRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest)

Aggregations

ReplicationConfig (org.apache.hadoop.hdds.client.ReplicationConfig)15 HashMap (java.util.HashMap)6 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)5 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)5 IOException (java.io.IOException)4 ArrayList (java.util.ArrayList)4 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)4 RatisReplicationConfig (org.apache.hadoop.hdds.client.RatisReplicationConfig)3 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)3 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)3 OmMultipartKeyInfo (org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo)3 InputStream (java.io.InputStream)2 OutputStream (java.io.OutputStream)2 EqualsBuilder (org.apache.commons.lang3.builder.EqualsBuilder)2 HashCodeBuilder (org.apache.commons.lang3.builder.HashCodeBuilder)2 HddsProtos (org.apache.hadoop.hdds.protocol.proto.HddsProtos)2 OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)2 OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)2 OmKeyLocationInfoGroup (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup)2 OMClientResponse (org.apache.hadoop.ozone.om.response.OMClientResponse)2