use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class CloudBlobStore method preUndeleteValidation.
/**
* Validates existing metadata in cloud destination against requested undelete.
* Note that this method also has an unclean side effect of updating the {@code updateFields}.
* @param metadata existing {@link CloudBlobMetadata} in cloud.
* @param key {@link StoreKey} being updated.
* @param updateFields {@link Map} of fields and values being updated.
* @return false only for vcr if local cloud destination life version is more recent. true if validation successful.
* @throws StoreException if validation fails.
*/
private boolean preUndeleteValidation(CloudBlobMetadata metadata, StoreKey key, Map<String, Object> updateFields) throws StoreException {
validateAccountAndContainer(Collections.singletonMap(key.getID(), metadata), Collections.singletonList(key));
short requestedLifeVersion = (short) updateFields.get(FIELD_LIFE_VERSION);
if (isVcr) {
// implementation and can have some inconsistencies.
return metadata.getLifeVersion() < requestedLifeVersion;
}
if (metadata.isExpired()) {
throw new StoreException("Id " + key + " already expired in cloud ", StoreErrorCodes.TTL_Expired);
} else if (metadata.isUndeleted()) {
throw new StoreException("Id " + key + " is already undeleted in cloud", StoreErrorCodes.ID_Undeleted);
} else if (!metadata.isDeleted()) {
throw new StoreException("Id " + key + " is not deleted yet in cloud ", StoreErrorCodes.ID_Not_Deleted);
} else if (metadata.getDeletionTime() + TimeUnit.HOURS.toMillis(storeConfig.storeDeletedMessageRetentionHours) < System.currentTimeMillis()) {
throw new StoreException("Id " + key + " already permanently deleted in cloud ", StoreErrorCodes.ID_Deleted_Permanently);
}
// Update life version to appropriate value for frontend requests.
updateFields.put(FIELD_LIFE_VERSION, metadata.getLifeVersion() + 1);
return true;
}
use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class CloudBlobStore method updateTtl.
/**
* {@inheritDoc}
* Currently, the only supported operation is to set the TTL to infinite (i.e. no arbitrary increase or decrease)
* @param infos The list of messages that need to be updated.
* @throws StoreException
*/
@Override
public void updateTtl(List<MessageInfo> infos) throws StoreException {
checkStarted();
// Note: We skipped uploading the blob on PUT record if the TTL was below threshold (threshold should be 0 for non DR cases).
try {
for (MessageInfo msgInfo : infos) {
if (msgInfo.getExpirationTimeInMs() != Utils.Infinite_Time) {
throw new StoreException("CloudBlobStore only supports removing the expiration time", StoreErrorCodes.Update_Not_Allowed);
}
if (msgInfo.isTtlUpdated()) {
BlobId blobId = (BlobId) msgInfo.getStoreKey();
requestAgent.doWithRetries(() -> updateTtlIfNeeded(blobId), "UpdateTtl", partitionId.toPathString());
} else {
logger.error("updateTtl() is called but msgInfo.isTtlUpdated is not set. msgInfo: {}", msgInfo);
vcrMetrics.updateTtlNotSetError.inc();
}
}
} catch (CloudStorageException ex) {
if (ex.getCause() instanceof StoreException) {
throw (StoreException) ex.getCause();
}
StoreErrorCodes errorCode = (ex.getStatusCode() == STATUS_NOT_FOUND) ? StoreErrorCodes.ID_Not_Found : StoreErrorCodes.IOError;
throw new StoreException(ex, errorCode);
}
}
use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class CloudBlobStore method get.
@Override
public StoreInfo get(List<? extends StoreKey> ids, EnumSet<StoreGetOptions> storeGetOptions) throws StoreException {
checkStarted();
checkStoreKeyDuplicates(ids);
List<CloudMessageReadSet.BlobReadInfo> blobReadInfos = new ArrayList<>(ids.size());
List<MessageInfo> messageInfos = new ArrayList<>(ids.size());
try {
List<BlobId> blobIdList = ids.stream().map(key -> (BlobId) key).collect(Collectors.toList());
Map<String, CloudBlobMetadata> cloudBlobMetadataListMap = requestAgent.doWithRetries(() -> cloudDestination.getBlobMetadata(blobIdList), "GetBlobMetadata", partitionId.toPathString());
// Throw StoreException with ID_Not_Found if cloudBlobMetadataListMap size is less than expected.
if (cloudBlobMetadataListMap.size() < blobIdList.size()) {
Set<BlobId> missingBlobs = blobIdList.stream().filter(blobId -> !cloudBlobMetadataListMap.containsKey(blobId)).collect(Collectors.toSet());
throw new StoreException("Some of the keys were missing in the cloud metadata store: " + missingBlobs, StoreErrorCodes.ID_Not_Found);
}
long currentTimeStamp = System.currentTimeMillis();
// Validate cloud meta data, may throw StoreException with ID_Deleted, TTL_Expired and Authorization_Failure
validateCloudMetadata(cloudBlobMetadataListMap, storeGetOptions, currentTimeStamp, ids);
for (BlobId blobId : blobIdList) {
CloudBlobMetadata blobMetadata = cloudBlobMetadataListMap.get(blobId.getID());
// TODO: need to add ttlUpdated to CloudBlobMetadata so we can use it here
// For now, set ttlUpdated = true for all permanent blobs, so the correct ttl
// is applied by GetOperation.
boolean ttlUpdated = blobMetadata.getExpirationTime() == Utils.Infinite_Time;
boolean deleted = blobMetadata.getDeletionTime() != Utils.Infinite_Time;
MessageInfo messageInfo = new MessageInfo(blobId, blobMetadata.getSize(), deleted, ttlUpdated, blobMetadata.isUndeleted(), blobMetadata.getExpirationTime(), null, (short) blobMetadata.getAccountId(), (short) blobMetadata.getContainerId(), getOperationTime(blobMetadata), blobMetadata.getLifeVersion());
messageInfos.add(messageInfo);
blobReadInfos.add(new CloudMessageReadSet.BlobReadInfo(blobMetadata, blobId));
}
} catch (CloudStorageException e) {
if (e.getCause() instanceof StoreException) {
throw (StoreException) e.getCause();
} else {
throw new StoreException(e, StoreErrorCodes.IOError);
}
}
CloudMessageReadSet messageReadSet = new CloudMessageReadSet(blobReadInfos, this);
return new StoreInfo(messageReadSet, messageInfos);
}
use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class CloudBlobStore method findEntriesSince.
@Override
public FindInfo findEntriesSince(FindToken token, long maxTotalSizeOfEntries, String hostname, String remoteReplicaPath) throws StoreException {
try {
FindResult findResult = requestAgent.doWithRetries(() -> cloudDestination.findEntriesSince(partitionId.toPathString(), token, maxTotalSizeOfEntries), "FindEntriesSince", partitionId.toPathString());
if (findResult.getMetadataList().isEmpty()) {
return new FindInfo(Collections.emptyList(), findResult.getUpdatedFindToken());
}
List<MessageInfo> messageEntries = new ArrayList<>();
for (CloudBlobMetadata metadata : findResult.getMetadataList()) {
messageEntries.add(getMessageInfoFromMetadata(metadata));
}
return new FindInfo(messageEntries, findResult.getUpdatedFindToken());
} catch (CloudStorageException | IOException ex) {
throw new StoreException(ex, StoreErrorCodes.IOError);
}
}
use of com.github.ambry.store.StoreException in project ambry by linkedin.
the class ReplicaThread method writeMessagesToLocalStoreAndAdvanceTokens.
/**
* Writes the messages (if any) to the local stores from the remote stores for the missing keys, and advances tokens.
* @param exchangeMetadataResponseList The list of metadata response from the remote node
* @param getResponse The {@link GetResponse} that contains the missing messages. This may be null if there are no
* missing messages to write as per the exchange metadata response. In that case this method will
* simply advance the tokens for every store.
* @param replicasToReplicatePerNode The list of remote replicas for the remote node
* @param remoteNode The remote node from which replication needs to happen
*/
private void writeMessagesToLocalStoreAndAdvanceTokens(List<ExchangeMetadataResponse> exchangeMetadataResponseList, GetResponse getResponse, List<RemoteReplicaInfo> replicasToReplicatePerNode, DataNodeId remoteNode) throws IOException {
int partitionResponseInfoIndex = 0;
long totalBytesFixed = 0;
long totalBlobsFixed = 0;
long startTime = SystemTime.getInstance().milliseconds();
for (int i = 0; i < exchangeMetadataResponseList.size(); i++) {
ExchangeMetadataResponse exchangeMetadataResponse = exchangeMetadataResponseList.get(i);
RemoteReplicaInfo remoteReplicaInfo = replicasToReplicatePerNode.get(i);
if (exchangeMetadataResponse.serverErrorCode == ServerErrorCode.No_Error) {
if (exchangeMetadataResponse.missingStoreKeys.size() > 0) {
PartitionResponseInfo partitionResponseInfo = getResponse.getPartitionResponseInfoList().get(partitionResponseInfoIndex);
responseHandler.onEvent(remoteReplicaInfo.getReplicaId(), partitionResponseInfo.getErrorCode());
partitionResponseInfoIndex++;
if (partitionResponseInfo.getPartition().compareTo(remoteReplicaInfo.getReplicaId().getPartitionId()) != 0) {
throw new IllegalStateException("The partition id from partitionResponseInfo " + partitionResponseInfo.getPartition() + " and from remoteReplicaInfo " + remoteReplicaInfo.getReplicaId().getPartitionId() + " are not the same");
}
if (partitionResponseInfo.getErrorCode() == ServerErrorCode.No_Error) {
try {
List<MessageInfo> messageInfoList = partitionResponseInfo.getMessageInfoList();
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Messages to fix: {} " + "Partition: {} Local mount path: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), exchangeMetadataResponse.missingStoreKeys, remoteReplicaInfo.getReplicaId().getPartitionId(), remoteReplicaInfo.getLocalReplicaId().getMountPath());
MessageFormatWriteSet writeset = null;
if (validateMessageStream) {
MessageSievingInputStream validMessageDetectionInputStream = new MessageSievingInputStream(getResponse.getInputStream(), messageInfoList, storeKeyFactory, metricRegistry);
if (validMessageDetectionInputStream.hasInvalidMessages()) {
replicationMetrics.incrementInvalidMessageError(partitionResponseInfo.getPartition());
logger.error("Out of " + (messageInfoList.size()) + " messages, " + (messageInfoList.size() - validMessageDetectionInputStream.getValidMessageInfoList().size()) + " invalid messages were found in message stream from " + remoteReplicaInfo.getReplicaId());
}
messageInfoList = validMessageDetectionInputStream.getValidMessageInfoList();
if (messageInfoList.size() == 0) {
logger.error("MessageInfoList is of size 0 as all messages are invalidated ");
} else {
writeset = new MessageFormatWriteSet(validMessageDetectionInputStream, messageInfoList, false);
remoteReplicaInfo.getLocalStore().put(writeset);
}
} else {
writeset = new MessageFormatWriteSet(getResponse.getInputStream(), messageInfoList, true);
remoteReplicaInfo.getLocalStore().put(writeset);
}
for (MessageInfo messageInfo : messageInfoList) {
totalBytesFixed += messageInfo.getSize();
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Message replicated: {} Partition: {} " + "Local mount path: {} Message size: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey(), remoteReplicaInfo.getReplicaId().getPartitionId(), remoteReplicaInfo.getLocalReplicaId().getMountPath(), messageInfo.getSize());
if (notification != null) {
notification.onBlobReplicaCreated(dataNodeId.getHostname(), dataNodeId.getPort(), messageInfo.getStoreKey().getID(), BlobReplicaSourceType.REPAIRED);
}
}
totalBlobsFixed += messageInfoList.size();
remoteReplicaInfo.setToken(exchangeMetadataResponse.remoteToken);
remoteReplicaInfo.setLocalLagFromRemoteInBytes(exchangeMetadataResponse.localLagFromRemoteInBytes);
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Token after speaking to remote node: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), exchangeMetadataResponse.remoteToken);
} catch (StoreException e) {
if (e.getErrorCode() != StoreErrorCodes.Already_Exist) {
replicationMetrics.updateLocalStoreError(remoteReplicaInfo.getReplicaId());
logger.error("Remote node: " + remoteNode + " Thread name: " + threadName + " Remote replica: " + remoteReplicaInfo.getReplicaId(), e);
}
}
} else if (partitionResponseInfo.getErrorCode() == ServerErrorCode.Blob_Deleted) {
replicationMetrics.blobDeletedOnGetCount.inc();
logger.trace("One of the blobs to GET is deleted: Remote node: {} Thread name: {} Remote replica: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId());
} else {
replicationMetrics.updateGetRequestError(remoteReplicaInfo.getReplicaId());
logger.error("Remote node: {} Thread name: {} Remote replica: {} Server error: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), partitionResponseInfo.getErrorCode());
}
} else {
// There are no missing keys. We just advance the token
remoteReplicaInfo.setToken(exchangeMetadataResponse.remoteToken);
remoteReplicaInfo.setLocalLagFromRemoteInBytes(exchangeMetadataResponse.localLagFromRemoteInBytes);
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Token after speaking to remote node: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), exchangeMetadataResponse.remoteToken);
}
}
}
long batchStoreWriteTime = SystemTime.getInstance().milliseconds() - startTime;
replicationMetrics.updateBatchStoreWriteTime(batchStoreWriteTime, totalBytesFixed, totalBlobsFixed, replicatingFromRemoteColo, replicatingOverSsl, datacenterName);
}
Aggregations