use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.
the class TestBucketManagerImpl method testCreateAlreadyExistingBucket.
@Test
@Ignore("Bucket Manager does not use cache, Disable it for now.")
public void testCreateAlreadyExistingBucket() throws Exception {
thrown.expectMessage("Bucket already exist");
OmMetadataManagerImpl metaMgr = createSampleVol();
try {
BucketManager bucketManager = new BucketManagerImpl(metaMgr);
OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setVolumeName("sampleVol").setBucketName("bucketOne").build();
bucketManager.createBucket(bucketInfo);
bucketManager.createBucket(bucketInfo);
} catch (OMException omEx) {
Assert.assertEquals(ResultCodes.BUCKET_ALREADY_EXISTS, omEx.getResult());
throw omEx;
} finally {
metaMgr.getStore().close();
}
}
use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.
the class OzoneDelegationTokenSecretManager method renewToken.
/**
* Renew a delegation token.
*
* @param token the token to renew
* @param renewer the full principal name of the user doing the renewal
* @return the new expiration time
* @throws InvalidToken if the token is invalid
* @throws AccessControlException if the user can't renew token
*/
@Override
public synchronized long renewToken(Token<OzoneTokenIdentifier> token, String renewer) throws IOException {
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
OzoneTokenIdentifier id = OzoneTokenIdentifier.readProtoBuf(in);
if (LOG.isDebugEnabled()) {
LOG.debug("Token renewal for identifier: {}, total currentTokens: {}", formatTokenId(id), currentTokens.size());
}
long now = Time.now();
if (id.getMaxDate() < now) {
throw new OMException(renewer + " tried to renew an expired token " + formatTokenId(id) + " max expiration date: " + Time.formatTime(id.getMaxDate()) + " currentTime: " + Time.formatTime(now), TOKEN_EXPIRED);
}
validateToken(id);
if ((id.getRenewer() == null) || (id.getRenewer().toString().isEmpty())) {
throw new AccessControlException(renewer + " tried to renew a token " + formatTokenId(id) + " without a renewer");
}
if (!id.getRenewer().toString().equals(renewer)) {
throw new AccessControlException(renewer + " tries to renew a token " + formatTokenId(id) + " with non-matching renewer " + id.getRenewer());
}
long renewTime = Math.min(id.getMaxDate(), now + getTokenRenewInterval());
// This will be removed, when HA/Non-HA code is merged.
if (!isRatisEnabled) {
try {
addToTokenStore(id, token.getPassword(), renewTime);
} catch (IOException e) {
LOG.error("Unable to update token " + id.getSequenceNumber(), e);
}
}
return renewTime;
}
use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.
the class TestOzoneFSWithObjectStoreCreate method testCreateDirectoryFirstThenKeyAndFileWithSameName.
@Test
public void testCreateDirectoryFirstThenKeyAndFileWithSameName() throws Exception {
o3fs.mkdirs(new Path("/t1/t2"));
try {
o3fs.create(new Path("/t1/t2"));
fail("testCreateDirectoryFirstThenFileWithSameName failed");
} catch (FileAlreadyExistsException ex) {
Assert.assertTrue(ex.getMessage().contains(NOT_A_FILE.name()));
}
OzoneVolume ozoneVolume = cluster.getRpcClient().getObjectStore().getVolume(volumeName);
OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
ozoneBucket.createDirectory("t1/t2");
try {
ozoneBucket.createKey("t1/t2", 0);
fail("testCreateDirectoryFirstThenFileWithSameName failed");
} catch (OMException ex) {
Assert.assertTrue(ex instanceof OMException);
Assert.assertEquals(NOT_A_FILE, ex.getResult());
}
}
use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.
the class TestOzoneFSWithObjectStoreCreate method testMPUFailDuetoDirectoryCreationBeforeComplete.
@Test
public void testMPUFailDuetoDirectoryCreationBeforeComplete() throws Exception {
OzoneVolume ozoneVolume = cluster.getRpcClient().getObjectStore().getVolume(volumeName);
OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
String keyName = "/dir1/dir2/mpukey";
OmMultipartInfo omMultipartInfo = ozoneBucket.initiateMultipartUpload(keyName);
Assert.assertNotNull(omMultipartInfo);
OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey(keyName, 10, 1, omMultipartInfo.getUploadID());
byte[] b = new byte[10];
Arrays.fill(b, (byte) 96);
ozoneOutputStream.write(b);
// Before close, create directory with same name.
o3fs.mkdirs(new Path(keyName));
// This should succeed, as we check during creation of part or during
// complete MPU.
ozoneOutputStream.close();
Map<Integer, String> partsMap = new HashMap<>();
partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName());
// Should fail, as we have directory with same name.
try {
ozoneBucket.completeMultipartUpload(keyName, omMultipartInfo.getUploadID(), partsMap);
fail("testMPUFailDuetoDirectoryCreationBeforeComplete failed");
} catch (OMException ex) {
Assert.assertTrue(ex instanceof OMException);
Assert.assertEquals(NOT_A_FILE, ex.getResult());
}
// Delete directory
o3fs.delete(new Path(keyName), true);
// And try again for complete MPU. This should succeed.
ozoneBucket.completeMultipartUpload(keyName, omMultipartInfo.getUploadID(), partsMap);
try (FSDataInputStream ozoneInputStream = o3fs.open(new Path(keyName))) {
byte[] buffer = new byte[10];
// This read will not change the offset inside the file
int readBytes = ozoneInputStream.read(0, buffer, 0, 10);
String readData = new String(buffer, 0, readBytes, UTF_8);
Assert.assertEquals(new String(b, 0, b.length, UTF_8), readData);
}
}
use of org.apache.hadoop.ozone.om.exceptions.OMException in project ozone by apache.
the class OzoneManager method instantiatePrepareStateOnStartup.
/**
* Determines if the prepare gate should be enabled on this OM after OM
* is restarted.
* This must be done after metadataManager is instantiated
* and before the RPC server is started.
*/
private void instantiatePrepareStateOnStartup() throws IOException {
TransactionInfo txnInfo = metadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY);
if (txnInfo == null) {
// No prepare request could be received if there are not transactions.
prepareState = new OzoneManagerPrepareState(configuration);
} else {
prepareState = new OzoneManagerPrepareState(configuration, txnInfo.getTransactionIndex());
TransactionInfo dbPrepareValue = metadataManager.getTransactionInfoTable().get(PREPARE_MARKER_KEY);
boolean hasMarkerFile = (prepareState.getState().getStatus() == PrepareStatus.PREPARE_COMPLETED);
boolean hasDBMarker = (dbPrepareValue != null);
if (hasDBMarker) {
long dbPrepareIndex = dbPrepareValue.getTransactionIndex();
if (hasMarkerFile) {
long prepareFileIndex = prepareState.getState().getIndex();
// since this is synced through Ratis, to avoid divergence.
if (prepareFileIndex != dbPrepareIndex) {
LOG.warn("Prepare marker file index {} does not match DB prepare " + "index {}. Writing DB index to prepare file and maintaining " + "prepared state.", prepareFileIndex, dbPrepareIndex);
prepareState.finishPrepare(dbPrepareIndex);
}
// Else, marker and DB are present and match, so OM is prepared.
} else {
// Prepare cancelled with startup flag to remove marker file.
// Persist this to the DB.
// If the startup flag is used it should be used on all OMs to avoid
// divergence.
metadataManager.getTransactionInfoTable().delete(PREPARE_MARKER_KEY);
}
} else if (hasMarkerFile) {
// through, OM should replay it so both the DB and marker file exist.
throw new OMException("Prepare marker file found on startup without " + "a corresponding database entry. Corrupt prepare state.", ResultCodes.PREPARE_FAILED);
}
// Else, no DB or marker file, OM is not prepared.
}
}
Aggregations