use of com.github.ambry.utils.CrcInputStream in project ambry by linkedin.
the class DeserializedBlob method deserializeAndGetBlobWithVersion.
static DeserializedBlob deserializeAndGetBlobWithVersion(InputStream stream) throws IOException, MessageFormatException {
CrcInputStream crcStream = new CrcInputStream(stream);
DataInputStream inputStream = new DataInputStream(crcStream);
short version = inputStream.readShort();
switch(version) {
case Blob_Version_V1:
return new DeserializedBlob(Blob_Version_V1, Blob_Format_V1.deserializeBlobRecord(crcStream));
case Blob_Version_V2:
return new DeserializedBlob(Blob_Version_V2, Blob_Format_V2.deserializeBlobRecord(crcStream));
default:
throw new MessageFormatException("data version not supported", MessageFormatErrorCodes.Unknown_Format_Version);
}
}
use of com.github.ambry.utils.CrcInputStream in project ambry by linkedin.
the class DeserializedBlob method deserializeAndGetBlobPropertiesWithVersion.
static DeserializedBlobProperties deserializeAndGetBlobPropertiesWithVersion(InputStream stream) throws IOException, MessageFormatException {
CrcInputStream crcStream = new CrcInputStream(stream);
DataInputStream inputStream = new DataInputStream(crcStream);
short version = inputStream.readShort();
switch(version) {
case BlobProperties_Version_V1:
return new DeserializedBlobProperties(BlobProperties_Version_V1, BlobProperties_Format_V1.deserializeBlobPropertiesRecord(crcStream));
default:
throw new MessageFormatException("blob property version " + version + " not supported", MessageFormatErrorCodes.Unknown_Format_Version);
}
}
use of com.github.ambry.utils.CrcInputStream in project ambry by linkedin.
the class ServerHardDeleteTest method ensureCleanupTokenCatchesUp.
/**
* Waits and ensures that the hard delete cleanup token catches up to the expected token value.
* @param path the path to the cleanup token.
* @param mockClusterMap the {@link MockClusterMap} being used for the cluster.
* @param expectedTokenValue the expected value that the cleanup token should contain. Until this value is reached,
* the method will keep reopening the file and read the value or until a predefined
* timeout is reached.
* @throws Exception if there were any I/O errors or the sleep gets interrupted.
*/
void ensureCleanupTokenCatchesUp(String path, MockClusterMap mockClusterMap, long expectedTokenValue) throws Exception {
final int TIMEOUT = 10000;
File cleanupTokenFile = new File(path, "cleanuptoken");
StoreFindToken endToken;
long parsedTokenValue = -1;
long endTime = SystemTime.getInstance().milliseconds() + TIMEOUT;
do {
if (cleanupTokenFile.exists()) {
/* The cleanup token format is as follows:
--
token_version
startTokenForRecovery
endTokenForRecovery
numBlobsInRange
pause flag
--
blob1_blobReadOptions {version, offset, sz, ttl, key}
blob2_blobReadOptions
....
blobN_blobReadOptions
--
length_of_blob1_messageStoreRecoveryInfo
blob1_messageStoreRecoveryInfo {headerVersion, userMetadataVersion, userMetadataSize, blobRecordVersion,
blobType, blobStreamSize}
length_of_blob2_messageStoreRecoveryInfo
blob2_messageStoreRecoveryInfo
....
length_of_blobN_messageStoreRecoveryInfo
blobN_messageStoreRecoveryInfo
crc
---
*/
CrcInputStream crcStream = new CrcInputStream(new FileInputStream(cleanupTokenFile));
DataInputStream stream = new DataInputStream(crcStream);
try {
short version = stream.readShort();
Assert.assertEquals(version, HardDeleter.Cleanup_Token_Version_V1);
StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", mockClusterMap);
FindTokenFactory factory = Utils.getObj("com.github.ambry.store.StoreFindTokenFactory", storeKeyFactory);
factory.getFindToken(stream);
endToken = (StoreFindToken) factory.getFindToken(stream);
Offset endTokenOffset = endToken.getOffset();
parsedTokenValue = endTokenOffset == null ? -1 : endTokenOffset.getOffset();
boolean pauseFlag = stream.readByte() == (byte) 1;
int num = stream.readInt();
List<StoreKey> storeKeyList = new ArrayList<StoreKey>(num);
for (int i = 0; i < num; i++) {
// Read BlobReadOptions
short blobReadOptionsVersion = stream.readShort();
switch(blobReadOptionsVersion) {
case 1:
Offset.fromBytes(stream);
stream.readLong();
stream.readLong();
StoreKey key = storeKeyFactory.getStoreKey(stream);
storeKeyList.add(key);
break;
default:
Assert.assertFalse(true);
}
}
for (int i = 0; i < num; i++) {
int length = stream.readInt();
short headerVersion = stream.readShort();
short userMetadataVersion = stream.readShort();
int userMetadataSize = stream.readInt();
short blobRecordVersion = stream.readShort();
if (blobRecordVersion == MessageFormatRecord.Blob_Version_V2) {
short blobType = stream.readShort();
}
long blobStreamSize = stream.readLong();
StoreKey key = storeKeyFactory.getStoreKey(stream);
Assert.assertTrue(storeKeyList.get(i).equals(key));
}
long crc = crcStream.getValue();
Assert.assertEquals(crc, stream.readLong());
Thread.sleep(1000);
} finally {
stream.close();
}
}
} while (SystemTime.getInstance().milliseconds() < endTime && parsedTokenValue < expectedTokenValue);
Assert.assertEquals(expectedTokenValue, parsedTokenValue);
}
use of com.github.ambry.utils.CrcInputStream in project ambry by linkedin.
the class HardDeleter method readCleanupTokenAndPopulateRecoveryRange.
/**
* Reads from the cleanupToken file and adds into hardDeleteRecoveryRange the info for all the messages persisted
* in the file. If cleanupToken is non-existent or if there is a crc failure, resets the token.
* This method calls into MessageStoreHardDelete interface to let it read the persisted recovery metadata from the
* stream.
* @throws StoreException on version mismatch.
*/
private void readCleanupTokenAndPopulateRecoveryRange() throws IOException, StoreException {
File cleanupTokenFile = new File(dataDir, Cleanup_Token_Filename);
StoreFindToken recoveryStartToken = recoveryEndToken = new StoreFindToken();
startToken = startTokenBeforeLogFlush = startTokenSafeToPersist = endToken = new StoreFindToken();
if (cleanupTokenFile.exists()) {
CrcInputStream crcStream = new CrcInputStream(new FileInputStream(cleanupTokenFile));
DataInputStream stream = new DataInputStream(crcStream);
try {
short version = stream.readShort();
switch(version) {
case Cleanup_Token_Version_V0:
recoveryStartToken = StoreFindToken.fromBytes(stream, factory);
recoveryEndToken = StoreFindToken.fromBytes(stream, factory);
hardDeleteRecoveryRange = new HardDeletePersistInfo(stream, factory);
break;
case Cleanup_Token_Version_V1:
recoveryStartToken = StoreFindToken.fromBytes(stream, factory);
recoveryEndToken = StoreFindToken.fromBytes(stream, factory);
paused.set(stream.readByte() == (byte) 1);
hardDeleteRecoveryRange = new HardDeletePersistInfo(stream, factory);
break;
default:
hardDeleteRecoveryRange.clear();
metrics.hardDeleteIncompleteRecoveryCount.inc();
throw new StoreException("Invalid version in cleanup token " + dataDir, StoreErrorCodes.Index_Version_Error);
}
long crc = crcStream.getValue();
if (crc != stream.readLong()) {
hardDeleteRecoveryRange.clear();
metrics.hardDeleteIncompleteRecoveryCount.inc();
throw new StoreException("Crc check does not match for cleanup token file for dataDir " + dataDir + " aborting. ", StoreErrorCodes.Illegal_Index_State);
}
if (config.storeSetFilePermissionEnabled) {
Files.setPosixFilePermissions(cleanupTokenFile.toPath(), config.storeDataFilePermission);
}
} catch (IOException e) {
hardDeleteRecoveryRange.clear();
metrics.hardDeleteIncompleteRecoveryCount.inc();
throw new StoreException("Failed to read cleanup token ", e, StoreErrorCodes.Initialization_Error);
} finally {
stream.close();
}
}
/* If all the information was successfully read and there are no crc check failures, then the next time hard
deletes are done, it can start at least at the recoveryStartToken.
*/
startToken = startTokenBeforeLogFlush = startTokenSafeToPersist = endToken = recoveryStartToken;
}
Aggregations