use of com.github.ambry.store.StoreErrorCodes in project ambry by linkedin.
the class AmbryServerRequestsTest method miscUndeleteFailuresTest.
/**
* Exercises various failure paths for UNDELETEs
* @throws Exception
*/
private void miscUndeleteFailuresTest() throws Exception {
PartitionId id = clusterMap.getWritablePartitionIds(DEFAULT_PARTITION_CLASS).get(0);
// store exceptions
for (StoreErrorCodes code : StoreErrorCodes.values()) {
MockStorageManager.storeException = code == StoreErrorCodes.ID_Undeleted ? new IdUndeletedStoreException("expected", (short) 1) : new StoreException("expected", code);
ServerErrorCode expectedErrorCode = ErrorMapping.getStoreErrorMapping(code);
sendAndVerifyOperationRequest(RequestOrResponseType.UndeleteRequest, Collections.singletonList(id), expectedErrorCode, true, null);
MockStorageManager.storeException = null;
}
// runtime exception
MockStorageManager.runtimeException = new RuntimeException("expected");
sendAndVerifyOperationRequest(RequestOrResponseType.UndeleteRequest, Collections.singletonList(id), ServerErrorCode.Unknown_Error, true, null);
MockStorageManager.runtimeException = null;
// store is not started/is stopped/otherwise unavailable - Replica_Unavailable
storageManager.returnNullStore = true;
sendAndVerifyOperationRequest(RequestOrResponseType.UndeleteRequest, Collections.singletonList(id), ServerErrorCode.Replica_Unavailable, false, null);
storageManager.returnNullStore = false;
// PartitionUnknown is hard to simulate without betraying knowledge of the internals of MockClusterMap.
// disk down
ReplicaId replicaId = findReplica(id);
clusterMap.onReplicaEvent(replicaId, ReplicaEventType.Disk_Error);
sendAndVerifyOperationRequest(RequestOrResponseType.UndeleteRequest, Collections.singletonList(id), ServerErrorCode.Disk_Unavailable, false, null);
clusterMap.onReplicaEvent(replicaId, ReplicaEventType.Disk_Ok);
// request disabled is checked in request control tests
}
use of com.github.ambry.store.StoreErrorCodes in project ambry by linkedin.
the class AmbryServerRequestsTest method miscTtlUpdateFailuresTest.
/**
* Exercises various failure paths for TTL updates
* @throws InterruptedException
* @throws IOException
*/
private void miscTtlUpdateFailuresTest() throws InterruptedException, IOException {
PartitionId id = clusterMap.getWritablePartitionIds(DEFAULT_PARTITION_CLASS).get(0);
// store exceptions
for (StoreErrorCodes code : StoreErrorCodes.values()) {
MockStorageManager.storeException = new StoreException("expected", code);
ServerErrorCode expectedErrorCode = ErrorMapping.getStoreErrorMapping(code);
sendAndVerifyOperationRequest(RequestOrResponseType.TtlUpdateRequest, Collections.singletonList(id), expectedErrorCode, true, null);
MockStorageManager.storeException = null;
}
// runtime exception
MockStorageManager.runtimeException = new RuntimeException("expected");
sendAndVerifyOperationRequest(RequestOrResponseType.TtlUpdateRequest, Collections.singletonList(id), ServerErrorCode.Unknown_Error, true, null);
MockStorageManager.runtimeException = null;
// store is not started/is stopped/otherwise unavailable - Replica_Unavailable
storageManager.returnNullStore = true;
sendAndVerifyOperationRequest(RequestOrResponseType.TtlUpdateRequest, Collections.singletonList(id), ServerErrorCode.Replica_Unavailable, false, null);
storageManager.returnNullStore = false;
// PartitionUnknown is hard to simulate without betraying knowledge of the internals of MockClusterMap.
// disk down
ReplicaId replicaId = findReplica(id);
clusterMap.onReplicaEvent(replicaId, ReplicaEventType.Disk_Error);
sendAndVerifyOperationRequest(RequestOrResponseType.TtlUpdateRequest, Collections.singletonList(id), ServerErrorCode.Disk_Unavailable, false, null);
clusterMap.onReplicaEvent(replicaId, ReplicaEventType.Disk_Ok);
// request disabled is checked in request control tests
}
use of com.github.ambry.store.StoreErrorCodes in project ambry by linkedin.
the class CloudBlobStore method updateTtl.
/**
* {@inheritDoc}
* Currently, the only supported operation is to set the TTL to infinite (i.e. no arbitrary increase or decrease)
* @param infos The list of messages that need to be updated.
* @throws StoreException
*/
@Override
public void updateTtl(List<MessageInfo> infos) throws StoreException {
checkStarted();
// Note: We skipped uploading the blob on PUT record if the TTL was below threshold (threshold should be 0 for non DR cases).
try {
for (MessageInfo msgInfo : infos) {
if (msgInfo.getExpirationTimeInMs() != Utils.Infinite_Time) {
throw new StoreException("CloudBlobStore only supports removing the expiration time", StoreErrorCodes.Update_Not_Allowed);
}
if (msgInfo.isTtlUpdated()) {
BlobId blobId = (BlobId) msgInfo.getStoreKey();
requestAgent.doWithRetries(() -> updateTtlIfNeeded(blobId), "UpdateTtl", partitionId.toPathString());
} else {
logger.error("updateTtl() is called but msgInfo.isTtlUpdated is not set. msgInfo: {}", msgInfo);
vcrMetrics.updateTtlNotSetError.inc();
}
}
} catch (CloudStorageException ex) {
if (ex.getCause() instanceof StoreException) {
throw (StoreException) ex.getCause();
}
StoreErrorCodes errorCode = (ex.getStatusCode() == STATUS_NOT_FOUND) ? StoreErrorCodes.ID_Not_Found : StoreErrorCodes.IOError;
throw new StoreException(ex, errorCode);
}
}
use of com.github.ambry.store.StoreErrorCodes in project ambry by linkedin.
the class LatchBasedInMemoryCloudDestination method getBlobMetadata.
@Override
public Map<String, CloudBlobMetadata> getBlobMetadata(List<BlobId> blobIds) throws CloudStorageException {
StoreErrorCodes serverError = hardError != null ? hardError : serverErrors.size() > 0 ? serverErrors.poll() : null;
if (serverError != null) {
throw new CloudStorageException("getBlobMetadata simulated error", new StoreException("getBlobMetadata simulated error", serverError));
}
Map<String, CloudBlobMetadata> result = new HashMap<>();
for (BlobId blobId : blobIds) {
if (map.containsKey(blobId)) {
result.put(blobId.toString(), map.get(blobId).getFirst());
}
}
return result;
}
use of com.github.ambry.store.StoreErrorCodes in project ambry by linkedin.
the class LatchBasedInMemoryCloudDestination method uploadBlob.
@Override
public synchronized boolean uploadBlob(BlobId blobId, long blobSize, CloudBlobMetadata cloudBlobMetadata, InputStream blobInputStream) throws CloudStorageException {
StoreErrorCodes serverError = hardError != null ? hardError : serverErrors.size() > 0 ? serverErrors.poll() : null;
if (serverError != null) {
throw new CloudStorageException("uploadBlob simulated error", new StoreException("uploadBlob simulated error", serverError));
}
if (map.containsKey(blobId)) {
return false;
}
// Note: blobSize can be -1 when we dont know the actual blob size being uploaded.
// So we have to do buffered reads to handle that case.
int bufferSz = (blobSize == -1) ? 1024 : (int) blobSize;
byte[] buffer = new byte[bufferSz];
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
try {
int bytesRead = blobInputStream.read(buffer);
while (bytesRead > 0) {
outputStream.write(buffer, 0, bytesRead);
bytesUploadedCounter.addAndGet(Math.max(bytesRead, 0));
bytesRead = blobInputStream.read(buffer);
}
} catch (IOException ex) {
throw new RuntimeException(ex);
}
cloudBlobMetadata.setLastUpdateTime(System.currentTimeMillis());
map.put(blobId, new Pair<>(cloudBlobMetadata, outputStream.toByteArray()));
changeFeed.add(blobId);
blobsUploadedCounter.incrementAndGet();
if (blobIdsToTrack.remove(blobId)) {
uploadLatch.countDown();
}
return true;
}
Aggregations