Search in sources :

Example 16 with OMException

use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.

the class TestBucketManagerImpl method testCreateAlreadyExistingBucket.

@Test
@Ignore("Bucket Manager does not use cache, Disable it for now.")
public void testCreateAlreadyExistingBucket() throws Exception {
    thrown.expectMessage("Bucket already exist");
    OmMetadataManagerImpl metaMgr = createSampleVol();
    try {
        BucketManager bucketManager = new BucketManagerImpl(metaMgr);
        OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setVolumeName("sampleVol").setBucketName("bucketOne").build();
        bucketManager.createBucket(bucketInfo);
        bucketManager.createBucket(bucketInfo);
    } catch (OMException omEx) {
        Assert.assertEquals(ResultCodes.BUCKET_ALREADY_EXISTS, omEx.getResult());
        throw omEx;
    } finally {
        metaMgr.getStore().close();
    }
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 17 with OMException

use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.

the class OzoneDelegationTokenSecretManager method renewToken.

/**
 * Renew a delegation token.
 *
 * @param token the token to renew
 * @param renewer the full principal name of the user doing the renewal
 * @return the new expiration time
 * @throws InvalidToken if the token is invalid
 * @throws AccessControlException if the user can't renew token
 */
@Override
public synchronized long renewToken(Token<OzoneTokenIdentifier> token, String renewer) throws IOException {
    ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
    DataInputStream in = new DataInputStream(buf);
    OzoneTokenIdentifier id = OzoneTokenIdentifier.readProtoBuf(in);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Token renewal for identifier: {}, total currentTokens: {}", formatTokenId(id), currentTokens.size());
    }
    long now = Time.now();
    if (id.getMaxDate() < now) {
        throw new OMException(renewer + " tried to renew an expired token " + formatTokenId(id) + " max expiration date: " + Time.formatTime(id.getMaxDate()) + " currentTime: " + Time.formatTime(now), TOKEN_EXPIRED);
    }
    validateToken(id);
    if ((id.getRenewer() == null) || (id.getRenewer().toString().isEmpty())) {
        throw new AccessControlException(renewer + " tried to renew a token " + formatTokenId(id) + " without a renewer");
    }
    if (!id.getRenewer().toString().equals(renewer)) {
        throw new AccessControlException(renewer + " tries to renew a token " + formatTokenId(id) + " with non-matching renewer " + id.getRenewer());
    }
    long renewTime = Math.min(id.getMaxDate(), now + getTokenRenewInterval());
    // This will be removed, when HA/Non-HA code is merged.
    if (!isRatisEnabled) {
        try {
            addToTokenStore(id, token.getPassword(), renewTime);
        } catch (IOException e) {
            LOG.error("Unable to update token " + id.getSequenceNumber(), e);
        }
    }
    return renewTime;
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Example 18 with OMException

use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.

the class TestOzoneFSWithObjectStoreCreate method testCreateDirectoryFirstThenKeyAndFileWithSameName.

@Test
public void testCreateDirectoryFirstThenKeyAndFileWithSameName() throws Exception {
    o3fs.mkdirs(new Path("/t1/t2"));
    try {
        o3fs.create(new Path("/t1/t2"));
        fail("testCreateDirectoryFirstThenFileWithSameName failed");
    } catch (FileAlreadyExistsException ex) {
        Assert.assertTrue(ex.getMessage().contains(NOT_A_FILE.name()));
    }
    OzoneVolume ozoneVolume = cluster.getRpcClient().getObjectStore().getVolume(volumeName);
    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
    ozoneBucket.createDirectory("t1/t2");
    try {
        ozoneBucket.createKey("t1/t2", 0);
        fail("testCreateDirectoryFirstThenFileWithSameName failed");
    } catch (OMException ex) {
        Assert.assertTrue(ex instanceof OMException);
        Assert.assertEquals(NOT_A_FILE, ex.getResult());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) Test(org.junit.Test)

Example 19 with OMException

use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.

the class TestOzoneFSWithObjectStoreCreate method testMPUFailDuetoDirectoryCreationBeforeComplete.

@Test
public void testMPUFailDuetoDirectoryCreationBeforeComplete() throws Exception {
    OzoneVolume ozoneVolume = cluster.getRpcClient().getObjectStore().getVolume(volumeName);
    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
    String keyName = "/dir1/dir2/mpukey";
    OmMultipartInfo omMultipartInfo = ozoneBucket.initiateMultipartUpload(keyName);
    Assert.assertNotNull(omMultipartInfo);
    OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey(keyName, 10, 1, omMultipartInfo.getUploadID());
    byte[] b = new byte[10];
    Arrays.fill(b, (byte) 96);
    ozoneOutputStream.write(b);
    // Before close, create directory with same name.
    o3fs.mkdirs(new Path(keyName));
    // This should succeed, as we check during creation of part or during
    // complete MPU.
    ozoneOutputStream.close();
    Map<Integer, String> partsMap = new HashMap<>();
    partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName());
    // Should fail, as we have directory with same name.
    try {
        ozoneBucket.completeMultipartUpload(keyName, omMultipartInfo.getUploadID(), partsMap);
        fail("testMPUFailDuetoDirectoryCreationBeforeComplete failed");
    } catch (OMException ex) {
        Assert.assertTrue(ex instanceof OMException);
        Assert.assertEquals(NOT_A_FILE, ex.getResult());
    }
    // Delete directory
    o3fs.delete(new Path(keyName), true);
    // And try again for complete MPU. This should succeed.
    ozoneBucket.completeMultipartUpload(keyName, omMultipartInfo.getUploadID(), partsMap);
    try (FSDataInputStream ozoneInputStream = o3fs.open(new Path(keyName))) {
        byte[] buffer = new byte[10];
        // This read will not change the offset inside the file
        int readBytes = ozoneInputStream.read(0, buffer, 0, 10);
        String readData = new String(buffer, 0, readBytes, UTF_8);
        Assert.assertEquals(new String(b, 0, b.length, UTF_8), readData);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) OmMultipartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartInfo) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) Test(org.junit.Test)

Example 20 with OMException

use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.

the class OzoneManager method instantiatePrepareStateOnStartup.

/**
 * Determines if the prepare gate should be enabled on this OM after OM
 * is restarted.
 * This must be done after metadataManager is instantiated
 * and before the RPC server is started.
 */
private void instantiatePrepareStateOnStartup() throws IOException {
    TransactionInfo txnInfo = metadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY);
    if (txnInfo == null) {
        // No prepare request could be received if there are not transactions.
        prepareState = new OzoneManagerPrepareState(configuration);
    } else {
        prepareState = new OzoneManagerPrepareState(configuration, txnInfo.getTransactionIndex());
        TransactionInfo dbPrepareValue = metadataManager.getTransactionInfoTable().get(PREPARE_MARKER_KEY);
        boolean hasMarkerFile = (prepareState.getState().getStatus() == PrepareStatus.PREPARE_COMPLETED);
        boolean hasDBMarker = (dbPrepareValue != null);
        if (hasDBMarker) {
            long dbPrepareIndex = dbPrepareValue.getTransactionIndex();
            if (hasMarkerFile) {
                long prepareFileIndex = prepareState.getState().getIndex();
                // since this is synced through Ratis, to avoid divergence.
                if (prepareFileIndex != dbPrepareIndex) {
                    LOG.warn("Prepare marker file index {} does not match DB prepare " + "index {}. Writing DB index to prepare file and maintaining " + "prepared state.", prepareFileIndex, dbPrepareIndex);
                    prepareState.finishPrepare(dbPrepareIndex);
                }
            // Else, marker and DB are present and match, so OM is prepared.
            } else {
                // Prepare cancelled with startup flag to remove marker file.
                // Persist this to the DB.
                // If the startup flag is used it should be used on all OMs to avoid
                // divergence.
                metadataManager.getTransactionInfoTable().delete(PREPARE_MARKER_KEY);
            }
        } else if (hasMarkerFile) {
            // through, OM should replay it so both the DB and marker file exist.
            throw new OMException("Prepare marker file found on startup without " + "a corresponding database entry. Corrupt prepare state.", ResultCodes.PREPARE_FAILED);
        }
    // Else, no DB or marker file, OM is not prepared.
    }
}
Also used : TransactionInfo(org.apache.hadoop.hdds.utils.TransactionInfo) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Aggregations

OMException (org.apache.hadoop.ozone.om.exceptions.OMException)179 IOException (java.io.IOException)83 OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)44 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)43 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)39 Test (org.junit.Test)37 ArrayList (java.util.ArrayList)30 OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)30 OMClientResponse (org.apache.hadoop.ozone.om.response.OMClientResponse)30 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)28 OMResponse (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)28 OMMetrics (org.apache.hadoop.ozone.om.OMMetrics)22 OmVolumeArgs (org.apache.hadoop.ozone.om.helpers.OmVolumeArgs)20 OzoneFileStatus (org.apache.hadoop.ozone.om.helpers.OzoneFileStatus)18 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)17 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)17 OzoneManagerProtocolProtos (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos)17 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)16 KeyArgs (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs)15 AuditLogger (org.apache.hadoop.ozone.audit.AuditLogger)14