use of com.github.ambry.messageformat.MessageFormatInputStream in project ambry by linkedin.
the class ReplicationTestHelper method createPutMessage.
/**
* Constructs an entire message with header, blob properties, user metadata and blob content.
* @param id id for which the message has to be constructed.
* @param accountId accountId of the blob
* @param containerId containerId of the blob
* @param enableEncryption {@code true} if encryption needs to be enabled. {@code false} otherwise
* @param lifeVersion lifeVersion for this hich the message has to be constructed.
* @return a {@link Pair} of {@link ByteBuffer} and {@link MessageInfo} representing the entire message and the
* associated {@link MessageInfo}
* @throws MessageFormatException
* @throws IOException
*/
public static PutMsgInfoAndBuffer createPutMessage(StoreKey id, short accountId, short containerId, boolean enableEncryption, short lifeVersion) throws MessageFormatException, IOException {
Random blobIdRandom = new Random(id.getID().hashCode());
int blobSize = blobIdRandom.nextInt(500) + 501;
int userMetadataSize = blobIdRandom.nextInt(blobSize / 2);
int encryptionKeySize = blobIdRandom.nextInt(blobSize / 4);
byte[] blob = new byte[blobSize];
byte[] usermetadata = new byte[userMetadataSize];
byte[] encryptionKey = enableEncryption ? new byte[encryptionKeySize] : null;
blobIdRandom.nextBytes(blob);
blobIdRandom.nextBytes(usermetadata);
BlobProperties blobProperties = new BlobProperties(blobSize, "test", null, null, false, EXPIRY_TIME_MS - CONSTANT_TIME_MS, CONSTANT_TIME_MS, accountId, containerId, encryptionKey != null, null, null, null);
MessageFormatInputStream stream = new PutMessageFormatInputStream(id, encryptionKey == null ? null : ByteBuffer.wrap(encryptionKey), blobProperties, ByteBuffer.wrap(usermetadata), new ByteBufferInputStream(ByteBuffer.wrap(blob)), blobSize, BlobType.DataBlob, lifeVersion);
byte[] message = Utils.readBytesFromStream(stream, (int) stream.getSize());
return new PutMsgInfoAndBuffer(ByteBuffer.wrap(message), new MessageInfo(id, message.length, false, false, false, EXPIRY_TIME_MS, null, accountId, containerId, CONSTANT_TIME_MS, lifeVersion));
}
use of com.github.ambry.messageformat.MessageFormatInputStream in project ambry by linkedin.
the class ReplicationTestHelper method getUndeleteMessage.
public static ByteBuffer getUndeleteMessage(StoreKey id, short accountId, short containerId, short lifeVersion, long undeleteTimeMs) throws MessageFormatException, IOException {
MessageFormatInputStream stream = new UndeleteMessageFormatInputStream(id, accountId, containerId, undeleteTimeMs, lifeVersion);
byte[] message = Utils.readBytesFromStream(stream, (int) stream.getSize());
return ByteBuffer.wrap(message);
}
use of com.github.ambry.messageformat.MessageFormatInputStream in project ambry by linkedin.
the class ReplicationTestHelper method getDeleteMessage.
/**
* Returns a delete message for the given {@code id}
* @param id the id for which a delete message must be constructed.
* @return {@link ByteBuffer} representing the entire message.
* @throws MessageFormatException
* @throws IOException
*/
public static ByteBuffer getDeleteMessage(StoreKey id, short accountId, short containerId, long deletionTimeMs, short lifeVersion) throws MessageFormatException, IOException {
MessageFormatInputStream stream = new DeleteMessageFormatInputStream(id, accountId, containerId, deletionTimeMs, lifeVersion);
byte[] message = Utils.readBytesFromStream(stream, (int) stream.getSize());
return ByteBuffer.wrap(message);
}
use of com.github.ambry.messageformat.MessageFormatInputStream in project ambry by linkedin.
the class BlobStore method delete.
@Override
public void delete(List<MessageInfo> infosToDelete) throws StoreException {
checkStarted();
checkDuplicates(infosToDelete);
final Timer.Context context = metrics.deleteResponse.time();
try {
List<IndexValue> indexValuesPriorToDelete = new ArrayList<>();
List<IndexValue> originalPuts = new ArrayList<>();
List<Short> lifeVersions = new ArrayList<>();
Offset indexEndOffsetBeforeCheck = index.getCurrentEndOffset();
for (MessageInfo info : infosToDelete) {
IndexValue value = index.findKey(info.getStoreKey(), new FileSpan(index.getStartOffset(), indexEndOffsetBeforeCheck));
if (value == null) {
throw new StoreException("Cannot delete id " + info.getStoreKey() + " because it is not present in the index", StoreErrorCodes.ID_Not_Found);
}
if (!info.getStoreKey().isAccountContainerMatch(value.getAccountId(), value.getContainerId())) {
if (config.storeValidateAuthorization) {
throw new StoreException("DELETE authorization failure. Key: " + info.getStoreKey() + "Actually accountId: " + value.getAccountId() + "Actually containerId: " + value.getContainerId(), StoreErrorCodes.Authorization_Failure);
} else {
logger.warn("DELETE authorization failure. Key: {} Actually accountId: {} Actually containerId: {}", info.getStoreKey(), value.getAccountId(), value.getContainerId());
metrics.deleteAuthorizationFailureCount.inc();
}
}
short revisedLifeVersion = info.getLifeVersion();
if (info.getLifeVersion() == MessageInfo.LIFE_VERSION_FROM_FRONTEND) {
// This is a delete request from frontend
if (value.isDelete()) {
throw new StoreException("Cannot delete id " + info.getStoreKey() + " since it is already deleted in the index.", StoreErrorCodes.ID_Deleted);
}
revisedLifeVersion = value.getLifeVersion();
} else {
// This is a delete request from replication
if (value.isDelete() && value.getLifeVersion() == info.getLifeVersion()) {
throw new StoreException("Cannot delete id " + info.getStoreKey() + " since it is already deleted in the index with lifeVersion " + value.getLifeVersion() + ".", StoreErrorCodes.ID_Deleted);
}
if (value.getLifeVersion() > info.getLifeVersion()) {
throw new StoreException("Cannot delete id " + info.getStoreKey() + " since it has a higher lifeVersion than the message info: " + value.getLifeVersion() + ">" + info.getLifeVersion(), StoreErrorCodes.Life_Version_Conflict);
}
}
indexValuesPriorToDelete.add(value);
lifeVersions.add(revisedLifeVersion);
if (!value.isDelete() && !value.isUndelete()) {
originalPuts.add(value);
} else {
originalPuts.add(index.findKey(info.getStoreKey(), new FileSpan(index.getStartOffset(), value.getOffset()), EnumSet.of(PersistentIndex.IndexEntryType.PUT)));
}
}
synchronized (storeWriteLock) {
Offset currentIndexEndOffset = index.getCurrentEndOffset();
if (!currentIndexEndOffset.equals(indexEndOffsetBeforeCheck)) {
FileSpan fileSpan = new FileSpan(indexEndOffsetBeforeCheck, currentIndexEndOffset);
int i = 0;
for (MessageInfo info : infosToDelete) {
IndexValue value = index.findKey(info.getStoreKey(), fileSpan, EnumSet.allOf(PersistentIndex.IndexEntryType.class));
if (value != null) {
// From these cases, we can have value being DELETE, TTL_UPDATE AND UNDELETE, we have to deal with them accordingly.
if (value.getLifeVersion() == lifeVersions.get(i)) {
if (value.isDelete()) {
throw new StoreException("Cannot delete id " + info.getStoreKey() + " since it is already deleted in the index.", StoreErrorCodes.ID_Deleted);
}
// value being ttl update is fine, we can just append DELETE to it.
} else {
// For the extreme case, we log it out and throw an exception.
logger.warn("Concurrent operation for id " + info.getStoreKey() + " in store " + dataDir + ". Newly added value " + value);
throw new StoreException("Cannot delete id " + info.getStoreKey() + " since there are concurrent operation while delete", StoreErrorCodes.Life_Version_Conflict);
}
indexValuesPriorToDelete.set(i, value);
}
i++;
}
}
List<InputStream> inputStreams = new ArrayList<>(infosToDelete.size());
List<MessageInfo> updatedInfos = new ArrayList<>(infosToDelete.size());
int i = 0;
for (MessageInfo info : infosToDelete) {
MessageFormatInputStream stream = new DeleteMessageFormatInputStream(info.getStoreKey(), info.getAccountId(), info.getContainerId(), info.getOperationTimeMs(), lifeVersions.get(i));
// Don't change the lifeVersion here, there are other logic in markAsDeleted that relies on this lifeVersion.
updatedInfos.add(new MessageInfo(info.getStoreKey(), stream.getSize(), info.getAccountId(), info.getContainerId(), info.getOperationTimeMs(), info.getLifeVersion()));
inputStreams.add(stream);
i++;
}
Offset endOffsetOfLastMessage = log.getEndOffset();
MessageFormatWriteSet writeSet = new MessageFormatWriteSet(new SequenceInputStream(Collections.enumeration(inputStreams)), updatedInfos, false);
writeSet.writeTo(log);
logger.trace("Store : {} delete mark written to log", dataDir);
int correspondingPutIndex = 0;
for (MessageInfo info : updatedInfos) {
FileSpan fileSpan = log.getFileSpanForMessage(endOffsetOfLastMessage, info.getSize());
IndexValue deleteIndexValue = index.markAsDeleted(info.getStoreKey(), fileSpan, null, info.getOperationTimeMs(), info.getLifeVersion());
endOffsetOfLastMessage = fileSpan.getEndOffset();
blobStoreStats.handleNewDeleteEntry(info.getStoreKey(), deleteIndexValue, originalPuts.get(correspondingPutIndex), indexValuesPriorToDelete.get(correspondingPutIndex));
correspondingPutIndex++;
}
logger.trace("Store : {} delete has been marked in the index ", dataDir);
}
onSuccess();
} catch (StoreException e) {
if (e.getErrorCode() == StoreErrorCodes.IOError) {
onError();
}
throw e;
} catch (Exception e) {
throw new StoreException("Unknown error while trying to delete blobs from store " + dataDir, e, StoreErrorCodes.Unknown_Error);
} finally {
context.stop();
}
}
use of com.github.ambry.messageformat.MessageFormatInputStream in project ambry by linkedin.
the class BlobStore method undelete.
@Override
public short undelete(MessageInfo info) throws StoreException {
checkStarted();
final Timer.Context context = metrics.undeleteResponse.time();
// The lifeVersion from message info is -1 when the undelete method is invoked by frontend request, we have to
// get the legit lifeVersion before we can write undelete record to log segment.
short revisedLifeVersion = info.getLifeVersion();
try {
StoreKey id = info.getStoreKey();
Offset indexEndOffsetBeforeCheck = index.getCurrentEndOffset();
short lifeVersionFromMessageInfo = info.getLifeVersion();
List<IndexValue> values = index.findAllIndexValuesForKey(id, new FileSpan(index.getStartOffset(), indexEndOffsetBeforeCheck));
// Check if the undelete record is valid.
index.validateSanityForUndelete(id, values, lifeVersionFromMessageInfo);
IndexValue latestValue = values.get(0);
IndexValue originalPut = values.get(values.size() - 1);
if (!IndexValue.hasLifeVersion(revisedLifeVersion)) {
revisedLifeVersion = (short) (latestValue.getLifeVersion() + 1);
}
MessageFormatInputStream stream = new UndeleteMessageFormatInputStream(id, info.getAccountId(), info.getContainerId(), info.getOperationTimeMs(), revisedLifeVersion);
// Update info to add stream size;
info = new MessageInfo(id, stream.getSize(), info.getAccountId(), info.getContainerId(), info.getOperationTimeMs(), revisedLifeVersion);
ArrayList<MessageInfo> infoList = new ArrayList<>();
infoList.add(info);
MessageFormatWriteSet writeSet = new MessageFormatWriteSet(stream, infoList, false);
if (!info.getStoreKey().isAccountContainerMatch(latestValue.getAccountId(), latestValue.getContainerId())) {
if (config.storeValidateAuthorization) {
throw new StoreException("UNDELETE authorization failure. Key: " + info.getStoreKey() + " Actually accountId: " + latestValue.getAccountId() + "Actually containerId: " + latestValue.getContainerId(), StoreErrorCodes.Authorization_Failure);
} else {
logger.warn("UNDELETE authorization failure. Key: {} Actually accountId: {} Actually containerId: {}", info.getStoreKey(), latestValue.getAccountId(), latestValue.getContainerId());
metrics.undeleteAuthorizationFailureCount.inc();
}
}
synchronized (storeWriteLock) {
Offset currentIndexEndOffset = index.getCurrentEndOffset();
if (!currentIndexEndOffset.equals(indexEndOffsetBeforeCheck)) {
FileSpan fileSpan = new FileSpan(indexEndOffsetBeforeCheck, currentIndexEndOffset);
IndexValue value = index.findKey(info.getStoreKey(), fileSpan, EnumSet.of(PersistentIndex.IndexEntryType.DELETE, PersistentIndex.IndexEntryType.UNDELETE));
if (value != null) {
if (value.isUndelete() && value.getLifeVersion() == revisedLifeVersion) {
// Might get an concurrent undelete from both replication and frontend.
throw new IdUndeletedStoreException("Can't undelete id " + info.getStoreKey() + " in " + dataDir + " since concurrent operations", value.getLifeVersion());
} else {
logger.warn("Revised lifeVersion is " + revisedLifeVersion + " last value is " + value);
throw new StoreException("Cannot undelete id " + info.getStoreKey() + " since concurrent operation occurs", StoreErrorCodes.Life_Version_Conflict);
}
}
}
Offset endOffsetOfLastMessage = log.getEndOffset();
writeSet.writeTo(log);
logger.trace("Store : {} undelete mark written to log", dataDir);
FileSpan fileSpan = log.getFileSpanForMessage(endOffsetOfLastMessage, info.getSize());
// we still use lifeVersion from message info here so that we can re-verify the sanity of undelete request in persistent index.
IndexValue newUndelete = index.markAsUndeleted(info.getStoreKey(), fileSpan, null, info.getOperationTimeMs(), lifeVersionFromMessageInfo);
blobStoreStats.handleNewUndeleteEntry(info.getStoreKey(), newUndelete, originalPut, latestValue);
}
onSuccess();
return revisedLifeVersion;
} catch (StoreException e) {
if (e.getErrorCode() == StoreErrorCodes.IOError) {
onError();
}
throw e;
} catch (Exception e) {
throw new StoreException("Unknown error while trying to undelete blobs from store " + dataDir, e, StoreErrorCodes.Unknown_Error);
} finally {
context.stop();
}
}
Aggregations