use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class LatchBasedInMemoryCloudDestination method undeleteBlob.
@Override
public short undeleteBlob(BlobId blobId, short lifeVersion, CloudUpdateValidator cloudUpdateValidator) throws CloudStorageException {
StoreErrorCodes serverError = hardError != null ? hardError : serverErrors.size() > 0 ? serverErrors.poll() : null;
if (serverError != null) {
throw new CloudStorageException("undeleteBlob simulated error", new StoreException("undeleteBlob simulated error", serverError));
}
if (map.containsKey(blobId)) {
if (!MessageInfo.hasLifeVersion((lifeVersion))) {
lifeVersion = map.get(blobId).getFirst().getLifeVersion();
lifeVersion++;
}
map.get(blobId).getFirst().setLifeVersion(lifeVersion);
map.get(blobId).getFirst().setDeletionTime(Utils.Infinite_Time);
map.get(blobId).getFirst().setLastUpdateTime(System.currentTimeMillis());
changeFeed.add(blobId);
return map.get(blobId).getFirst().getLifeVersion();
} else {
throw new CloudStorageException(String.format("Cannot update lifeversion as blob %s is not found.", blobId.getID()), null, CloudBlobStore.STATUS_NOT_FOUND, false, null);
}
}
use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class CloudBlobStoreIntegrationTest method testDeleteFromVcr.
/**
* Test {@link CloudBlobStore#delete} method for vcr.
*/
public void testDeleteFromVcr() throws StoreException {
// First upload a blob with a life version 2
MockMessageWriteSet messageWriteSet = new MockMessageWriteSet();
addBlobToMessageSet(messageWriteSet, Utils.Infinite_Time, accountId, containerId, partitionId, operationTime, (short) 2);
cloudBlobStore.put(messageWriteSet);
// verify that the blob was uploaded with expected metadata.
StoreInfo storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertEquals("Unexpected live version", messageWriteSet.getMessageSetInfo().get(0).getLifeVersion(), storeInfo.getMessageReadSetInfo().get(0).getLifeVersion());
assertEquals("Unexpected delete status", messageWriteSet.getMessageSetInfo().get(0).isDeleted(), storeInfo.getMessageReadSetInfo().get(0).isDeleted());
// Now delete with a smaller life version should fail silently without updating the life version.
MessageInfo messageInfo = messageWriteSet.getMessageSetInfo().get(0);
MessageInfo deleteMessageInfo = new MessageInfo(messageInfo.getStoreKey(), messageInfo.getSize(), messageInfo.isDeleted(), messageInfo.isTtlUpdated(), messageInfo.isUndeleted(), messageInfo.getExpirationTimeInMs(), messageInfo.getCrc(), messageInfo.getAccountId(), messageInfo.getContainerId(), messageInfo.getOperationTimeMs(), (short) 1);
cloudBlobStore.delete(Collections.singletonList(deleteMessageInfo));
storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertEquals("Unexpected live version", messageWriteSet.getMessageSetInfo().get(0).getLifeVersion(), storeInfo.getMessageReadSetInfo().get(0).getLifeVersion());
assertEquals("Unexpected delete status", messageWriteSet.getMessageSetInfo().get(0).isDeleted(), storeInfo.getMessageReadSetInfo().get(0).isDeleted());
// Delete with same life version should pass without changing life version.
deleteMessageInfo = new MessageInfo(messageInfo.getStoreKey(), messageInfo.getSize(), messageInfo.isDeleted(), messageInfo.isTtlUpdated(), messageInfo.isUndeleted(), messageInfo.getExpirationTimeInMs(), messageInfo.getCrc(), messageInfo.getAccountId(), messageInfo.getContainerId(), messageInfo.getOperationTimeMs(), messageInfo.getLifeVersion());
cloudBlobStore.delete(Collections.singletonList(deleteMessageInfo));
storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertEquals("Unexpected live version", messageWriteSet.getMessageSetInfo().get(0).getLifeVersion(), storeInfo.getMessageReadSetInfo().get(0).getLifeVersion());
assertTrue("Unexpected delete status", storeInfo.getMessageReadSetInfo().get(0).isDeleted());
// Deleting a deleted blob with higher life version should update life version.
deleteMessageInfo = new MessageInfo(messageInfo.getStoreKey(), messageInfo.getSize(), messageInfo.isDeleted(), messageInfo.isTtlUpdated(), messageInfo.isUndeleted(), messageInfo.getExpirationTimeInMs(), messageInfo.getCrc(), messageInfo.getAccountId(), messageInfo.getContainerId(), messageInfo.getOperationTimeMs(), (short) 3);
cloudBlobStore.delete(Collections.singletonList(deleteMessageInfo));
storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertEquals("Unexpected live version", 3, storeInfo.getMessageReadSetInfo().get(0).getLifeVersion());
assertTrue("Unexpected delete status", storeInfo.getMessageReadSetInfo().get(0).isDeleted());
// Deleting again with smaller life version should fail with exception.
deleteMessageInfo = new MessageInfo(messageInfo.getStoreKey(), messageInfo.getSize(), messageInfo.isDeleted(), messageInfo.isTtlUpdated(), messageInfo.isUndeleted(), messageInfo.getExpirationTimeInMs(), messageInfo.getCrc(), messageInfo.getAccountId(), messageInfo.getContainerId(), messageInfo.getOperationTimeMs(), (short) 1);
try {
cloudBlobStore.delete(Collections.singletonList(deleteMessageInfo));
fail("Delete should fail with ID_Deleted StoreException");
} catch (StoreException ex) {
assertEquals("Unexpected error code", ex.getErrorCode(), StoreErrorCodes.ID_Deleted);
}
storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertEquals("Unexpected live version", 3, storeInfo.getMessageReadSetInfo().get(0).getLifeVersion());
assertTrue("Unexpected delete status", storeInfo.getMessageReadSetInfo().get(0).isDeleted());
// Restart cloud blob store to clear cache. Deleting again with smaller life version should fail silently without updating anything.
cloudBlobStore.shutdown();
cloudBlobStore.start();
cloudBlobStore.delete(Collections.singletonList(deleteMessageInfo));
storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertEquals("Unexpected live version", 3, storeInfo.getMessageReadSetInfo().get(0).getLifeVersion());
assertTrue("Unexpected delete status", storeInfo.getMessageReadSetInfo().get(0).isDeleted());
}
use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class CloudBlobStoreIntegrationTest method testUpdateTtl.
/**
* Test {@link CloudBlobStore#updateTtl} method.
*/
@Test
public void testUpdateTtl() throws StoreException {
MockMessageWriteSet messageWriteSet = new MockMessageWriteSet();
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
CloudConfig cloudConfig = new CloudConfig(verifiableProperties);
long now = System.currentTimeMillis();
long expirationTimeMs = now;
if (isVcr) {
// vcr doesn't upload a blob that is within CloudConfig#vcrMinTtlDays of expiry.
expirationTimeMs += Math.max(TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays), TimeUnit.SECONDS.toMillis(storeConfig.storeTtlUpdateBufferTimeSeconds));
} else {
expirationTimeMs += TimeUnit.SECONDS.toMillis(storeConfig.storeTtlUpdateBufferTimeSeconds);
}
expirationTimeMs += 100000;
addBlobToMessageSet(messageWriteSet, expirationTimeMs, accountId, containerId, partitionId, operationTime, (short) -1);
cloudBlobStore.put(messageWriteSet);
// verify that the blob was uploaded with expected metadata.
StoreInfo storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertFalse("Unexpected ttl update status", storeInfo.getMessageReadSetInfo().get(0).isTtlUpdated());
assertEquals("Unexpected expiration time", expirationTimeMs, storeInfo.getMessageReadSetInfo().get(0).getExpirationTimeInMs());
// Do a ttl update without setting ttl update flag.
MessageInfo ttlUpdateMessageInfo = new MessageInfo(messageWriteSet.getMessageSetInfo().get(0).getStoreKey(), 100, false, true, -1, accountId, containerId, now);
cloudBlobStore.updateTtl(Collections.singletonList(ttlUpdateMessageInfo));
storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertTrue("Unexpected ttl update status", storeInfo.getMessageReadSetInfo().get(0).isTtlUpdated());
assertEquals("Unexpected expiration time", -1, storeInfo.getMessageReadSetInfo().get(0).getExpirationTimeInMs());
// Do a ttl update on a updated blob. It should fail silently.
ttlUpdateMessageInfo = new MessageInfo(messageWriteSet.getMessageSetInfo().get(0).getStoreKey(), 100, false, true, -1, accountId, containerId, now);
cloudBlobStore.updateTtl(Collections.singletonList(ttlUpdateMessageInfo));
storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertTrue("Unexpected ttl update status", storeInfo.getMessageReadSetInfo().get(0).isTtlUpdated());
assertEquals("Unexpected expiration time", -1, storeInfo.getMessageReadSetInfo().get(0).getExpirationTimeInMs());
// Clear cache by restarting blob store. Do a ttl update on a updated blob. It should fail silently.
cloudBlobStore.shutdown();
cloudBlobStore.start();
ttlUpdateMessageInfo = new MessageInfo(messageWriteSet.getMessageSetInfo().get(0).getStoreKey(), 100, false, true, -1, accountId, containerId, now);
cloudBlobStore.updateTtl(Collections.singletonList(ttlUpdateMessageInfo));
storeInfo = cloudBlobStore.get(messageWriteSet.getMessageSetInfo().stream().map(MessageInfo::getStoreKey).collect(Collectors.toList()), EnumSet.allOf(StoreGetOptions.class));
assertTrue("Unexpected ttl update status", storeInfo.getMessageReadSetInfo().get(0).isTtlUpdated());
assertEquals("Unexpected expiration time", -1, storeInfo.getMessageReadSetInfo().get(0).getExpirationTimeInMs());
// Delete the blob.
cloudBlobStore.delete(Collections.singletonList(ttlUpdateMessageInfo));
// ttlupdate of a deleted blob should throw ID_Delete Store Exception for frontend and fail silently for vcr.
try {
cloudBlobStore.updateTtl(Collections.singletonList(ttlUpdateMessageInfo));
if (!isVcr) {
fail("Update ttl of a deleted blob should fail for frontend.");
}
} catch (StoreException ex) {
assertEquals("Unexcpected error code", ex.getErrorCode(), StoreErrorCodes.ID_Deleted);
}
// Clear cache by restarting blob store. ttlupdate of a deleted blob should throw ID_Delete Store Exception.
cloudBlobStore.shutdown();
cloudBlobStore.start();
try {
cloudBlobStore.updateTtl(Collections.singletonList(ttlUpdateMessageInfo));
if (!isVcr) {
fail("Update ttl of a deleted blob should fail.");
}
} catch (StoreException ex) {
assertEquals("Unexpected error code", ex.getErrorCode(), StoreErrorCodes.ID_Deleted);
}
}
use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class AmbryRequests method handlePutRequest.
@Override
public void handlePutRequest(NetworkRequest request) throws IOException, InterruptedException {
PutRequest receivedRequest;
if (request instanceof LocalChannelRequest) {
// This is a case where handlePutRequest is called when frontends are writing to Azure. In this case, this method
// is called by request handler threads running within the frontend router itself. So, the request can be directly
// referenced as java objects without any need for deserialization.
PutRequest sentRequest = (PutRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
// However, we will create a new PutRequest object to represent the received Put request since the blob content
// 'buffer' in PutRequest is accessed as 'stream' while writing to Store. Also, crc value for this request
// would be null since it is only calculated (on the fly) when sending the request to network. It might be okay to
// use null crc here since the scenario for which we are using crc (i.e. possibility of collisions due to fast
// replication) as described in this PR https://github.com/linkedin/ambry/pull/549 might not be applicable when
// frontends are talking to Azure.
receivedRequest = new PutRequest(sentRequest.getCorrelationId(), sentRequest.getClientId(), sentRequest.getBlobId(), sentRequest.getBlobProperties(), sentRequest.getUsermetadata(), sentRequest.getBlobSize(), sentRequest.getBlobType(), sentRequest.getBlobEncryptionKey(), new ByteBufInputStream(sentRequest.getBlob()), null);
} else {
InputStream is = request.getInputStream();
DataInputStream dis = is instanceof DataInputStream ? (DataInputStream) is : new DataInputStream(is);
receivedRequest = PutRequest.readFrom(dis, clusterMap);
}
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
metrics.putBlobRequestQueueTimeInMs.update(requestQueueTime);
metrics.putBlobRequestRate.mark();
long startTime = SystemTime.getInstance().milliseconds();
PutResponse response = null;
try {
ServerErrorCode error = validateRequest(receivedRequest.getBlobId().getPartition(), RequestOrResponseType.PutRequest, false);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating put request failed with error {} for request {}", error, receivedRequest);
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), error);
} else {
MessageFormatInputStream stream = new PutMessageFormatInputStream(receivedRequest.getBlobId(), receivedRequest.getBlobEncryptionKey(), receivedRequest.getBlobProperties(), receivedRequest.getUsermetadata(), receivedRequest.getBlobStream(), receivedRequest.getBlobSize(), receivedRequest.getBlobType());
BlobProperties properties = receivedRequest.getBlobProperties();
long expirationTime = Utils.addSecondsToEpochTime(receivedRequest.getBlobProperties().getCreationTimeInMs(), properties.getTimeToLiveInSeconds());
MessageInfo info = new MessageInfo.Builder(receivedRequest.getBlobId(), stream.getSize(), properties.getAccountId(), properties.getContainerId(), properties.getCreationTimeInMs()).expirationTimeInMs(expirationTime).crc(receivedRequest.getCrc()).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
ArrayList<MessageInfo> infoList = new ArrayList<>();
infoList.add(info);
MessageFormatWriteSet writeset = new MessageFormatWriteSet(stream, infoList, false);
Store storeToPut = storeManager.getStore(receivedRequest.getBlobId().getPartition());
storeToPut.put(writeset);
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.No_Error);
metrics.blobSizeInBytes.update(receivedRequest.getBlobSize());
metrics.blobUserMetadataSizeInBytes.update(receivedRequest.getUsermetadata().limit());
if (notification != null) {
notification.onBlobReplicaCreated(currentNode.getHostname(), currentNode.getPort(), receivedRequest.getBlobId().getID(), BlobReplicaSourceType.PRIMARY);
}
}
} catch (StoreException e) {
logger.error("Store exception on a put with error code {} for request {}", e.getErrorCode(), receivedRequest, e);
if (e.getErrorCode() == StoreErrorCodes.Already_Exist) {
metrics.idAlreadyExistError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.IOError) {
metrics.storeIOError.inc();
} else {
metrics.unExpectedStorePutError.inc();
}
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
} catch (Exception e) {
logger.error("Unknown exception on a put for request {}", receivedRequest, e);
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.Unknown_Error);
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", receivedRequest, response, processingTime);
metrics.putBlobProcessingTimeInMs.update(processingTime);
metrics.updatePutBlobProcessingTimeBySize(receivedRequest.getBlobSize(), processingTime);
}
sendPutResponse(requestResponseChannel, response, request, metrics.putBlobResponseQueueTimeInMs, metrics.putBlobSendTimeInMs, metrics.putBlobTotalTimeInMs, totalTimeSpent, receivedRequest.getBlobSize(), metrics);
}
use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class AmbryRequests method handleTtlUpdateRequest.
@Override
public void handleTtlUpdateRequest(NetworkRequest request) throws IOException, InterruptedException {
TtlUpdateRequest updateRequest;
if (request instanceof LocalChannelRequest) {
// This is a case where handleTtlUpdateRequest is called when frontends are talking to Azure. In this case, this method
// is called by request handler threads running within the frontend router itself. So, the request can be directly
// referenced as java objects without any need for deserialization.
updateRequest = (TtlUpdateRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
} else {
updateRequest = TtlUpdateRequest.readFrom(new DataInputStream(request.getInputStream()), clusterMap);
}
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
metrics.updateBlobTtlRequestQueueTimeInMs.update(requestQueueTime);
metrics.updateBlobTtlRequestRate.mark();
long startTime = SystemTime.getInstance().milliseconds();
TtlUpdateResponse response = null;
try {
ServerErrorCode error = validateRequest(updateRequest.getBlobId().getPartition(), RequestOrResponseType.TtlUpdateRequest, false);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating TtlUpdateRequest failed with error {} for request {}", error, updateRequest);
response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), error);
} else {
BlobId convertedStoreKey = (BlobId) getConvertedStoreKeys(Collections.singletonList(updateRequest.getBlobId())).get(0);
MessageInfo info = new MessageInfo.Builder(convertedStoreKey, -1, convertedStoreKey.getAccountId(), convertedStoreKey.getContainerId(), updateRequest.getOperationTimeInMs()).isTtlUpdated(true).expirationTimeInMs(updateRequest.getExpiresAtMs()).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
Store store = storeManager.getStore(updateRequest.getBlobId().getPartition());
store.updateTtl(Collections.singletonList(info));
response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), ServerErrorCode.No_Error);
if (notification != null) {
notification.onBlobReplicaUpdated(currentNode.getHostname(), currentNode.getPort(), convertedStoreKey.getID(), BlobReplicaSourceType.PRIMARY, UpdateType.TTL_UPDATE, info);
}
}
} catch (StoreException e) {
boolean logInErrorLevel = false;
if (e.getErrorCode() == StoreErrorCodes.ID_Not_Found) {
metrics.idNotFoundError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.TTL_Expired) {
metrics.ttlExpiredError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.ID_Deleted) {
metrics.idDeletedError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Authorization_Failure) {
metrics.ttlUpdateAuthorizationFailure.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Already_Updated) {
metrics.ttlAlreadyUpdatedError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.Update_Not_Allowed) {
metrics.ttlUpdateRejectedError.inc();
} else {
logInErrorLevel = true;
metrics.unExpectedStoreTtlUpdateError.inc();
}
if (logInErrorLevel) {
logger.error("Store exception on a TTL update with error code {} for request {}", e.getErrorCode(), updateRequest, e);
} else {
logger.trace("Store exception on a TTL update with error code {} for request {}", e.getErrorCode(), updateRequest, e);
}
response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
} catch (Exception e) {
logger.error("Unknown exception for TTL update request {}", updateRequest, e);
response = new TtlUpdateResponse(updateRequest.getCorrelationId(), updateRequest.getClientId(), ServerErrorCode.Unknown_Error);
metrics.unExpectedStoreTtlUpdateError.inc();
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", updateRequest, response, processingTime);
metrics.updateBlobTtlProcessingTimeInMs.update(processingTime);
}
requestResponseChannel.sendResponse(response, request, new ServerNetworkResponseMetrics(metrics.updateBlobTtlResponseQueueTimeInMs, metrics.updateBlobTtlSendTimeInMs, metrics.updateBlobTtlTotalTimeInMs, null, null, totalTimeSpent));
}
Aggregations