use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class StoredBlob method makeGetResponse.
/**
* Make a {@link GetResponse} for the given {@link GetRequest} for which the given {@link ServerErrorCode} was
* encountered. The request could be for BlobInfo or for Blob (the only two options that the router would request
* for).
* @param getRequest the {@link GetRequest} for which the response is being constructed.
* @param getError the {@link ServerErrorCode} that was encountered.
* @return the constructed {@link GetResponse}
* @throws IOException if there was an error constructing the response.
*/
GetResponse makeGetResponse(GetRequest getRequest, ServerErrorCode getError) throws IOException {
GetResponse getResponse;
if (getError == ServerErrorCode.No_Error) {
List<PartitionRequestInfo> infos = getRequest.getPartitionInfoList();
if (infos.size() != 1 || infos.get(0).getBlobIds().size() != 1) {
getError = ServerErrorCode.Unknown_Error;
}
}
ServerErrorCode serverError;
ServerErrorCode partitionError;
boolean isDataBlob = false;
try {
String id = getRequest.getPartitionInfoList().get(0).getBlobIds().get(0).getID();
isDataBlob = blobs.get(id).type == BlobType.DataBlob;
} catch (Exception ignored) {
}
if (!getErrorOnDataBlobOnly || isDataBlob) {
// set it in the partitionResponseInfo
if (getError == ServerErrorCode.No_Error || getError == ServerErrorCode.Blob_Expired || getError == ServerErrorCode.Blob_Deleted || getError == ServerErrorCode.Blob_Not_Found || getError == ServerErrorCode.Blob_Authorization_Failure || getError == ServerErrorCode.Disk_Unavailable) {
partitionError = getError;
serverError = ServerErrorCode.No_Error;
} else {
serverError = getError;
// does not matter - this will not be checked if serverError is not No_Error.
partitionError = ServerErrorCode.No_Error;
}
} else {
serverError = ServerErrorCode.No_Error;
partitionError = ServerErrorCode.No_Error;
}
if (serverError == ServerErrorCode.No_Error) {
int byteBufferSize;
ByteBuffer byteBuffer;
StoreKey key = getRequest.getPartitionInfoList().get(0).getBlobIds().get(0);
short accountId = Account.UNKNOWN_ACCOUNT_ID;
short containerId = Container.UNKNOWN_CONTAINER_ID;
long operationTimeMs = Utils.Infinite_Time;
StoredBlob blob = blobs.get(key.getID());
ServerErrorCode processedError = errorForGet(key.getID(), blob, getRequest);
MessageMetadata msgMetadata = null;
if (processedError == ServerErrorCode.No_Error) {
ByteBuffer buf = blobs.get(key.getID()).serializedSentPutRequest.duplicate();
// read off the size
buf.getLong();
// read off the type.
buf.getShort();
PutRequest originalBlobPutReq = PutRequest.readFrom(new DataInputStream(new ByteBufferInputStream(buf)), clusterMap);
switch(getRequest.getMessageFormatFlag()) {
case BlobInfo:
BlobProperties blobProperties = originalBlobPutReq.getBlobProperties();
accountId = blobProperties.getAccountId();
containerId = blobProperties.getContainerId();
operationTimeMs = blobProperties.getCreationTimeInMs();
ByteBuffer userMetadata = originalBlobPutReq.getUsermetadata();
byteBufferSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(blobProperties) + MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(userMetadata);
byteBuffer = ByteBuffer.allocate(byteBufferSize);
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
MessageFormatRecord.BlobProperties_Format_V1.serializeBlobPropertiesRecord(byteBuffer, blobProperties);
MessageFormatRecord.UserMetadata_Format_V1.serializeUserMetadataRecord(byteBuffer, userMetadata);
break;
case Blob:
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
byteBufferSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
byteBuffer = ByteBuffer.allocate(byteBufferSize);
MessageFormatRecord.Blob_Format_V2.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize(), originalBlobPutReq.getBlobType());
break;
case MessageFormatRecord.Blob_Version_V1:
byteBufferSize = (int) MessageFormatRecord.Blob_Format_V1.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
byteBuffer = ByteBuffer.allocate(byteBufferSize);
MessageFormatRecord.Blob_Format_V1.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBuffer.put(Utils.readBytesFromStream(originalBlobPutReq.getBlobStream(), (int) originalBlobPutReq.getBlobSize()));
Crc32 crc = new Crc32();
crc.update(byteBuffer.array(), 0, byteBuffer.position());
byteBuffer.putLong(crc.getValue());
break;
case All:
blobProperties = originalBlobPutReq.getBlobProperties();
accountId = blobProperties.getAccountId();
containerId = blobProperties.getContainerId();
userMetadata = originalBlobPutReq.getUsermetadata();
operationTimeMs = originalBlobPutReq.getBlobProperties().getCreationTimeInMs();
int blobHeaderSize = MessageFormatRecord.MessageHeader_Format_V2.getHeaderSize();
int blobEncryptionRecordSize = originalBlobPutReq.getBlobEncryptionKey() != null ? MessageFormatRecord.BlobEncryptionKey_Format_V1.getBlobEncryptionKeyRecordSize(originalBlobPutReq.getBlobEncryptionKey().duplicate()) : 0;
int blobPropertiesSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(blobProperties);
int userMetadataSize = MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(userMetadata);
int blobInfoSize = blobPropertiesSize + userMetadataSize;
int blobRecordSize;
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
blobRecordSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
break;
case MessageFormatRecord.Blob_Version_V1:
blobRecordSize = (int) MessageFormatRecord.Blob_Format_V1.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBufferSize = blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobInfoSize + blobRecordSize;
byteBuffer = ByteBuffer.allocate(byteBufferSize);
try {
MessageFormatRecord.MessageHeader_Format_V2.serializeHeader(byteBuffer, blobEncryptionRecordSize + blobInfoSize + blobRecordSize, originalBlobPutReq.getBlobEncryptionKey() == null ? Message_Header_Invalid_Relative_Offset : blobHeaderSize + key.sizeInBytes(), blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize, Message_Header_Invalid_Relative_Offset, blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobPropertiesSize, blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobInfoSize);
} catch (MessageFormatException e) {
e.printStackTrace();
}
byteBuffer.put(key.toBytes());
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
MessageFormatRecord.BlobEncryptionKey_Format_V1.serializeBlobEncryptionKeyRecord(byteBuffer, originalBlobPutReq.getBlobEncryptionKey().duplicate());
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
MessageFormatRecord.BlobProperties_Format_V1.serializeBlobPropertiesRecord(byteBuffer, blobProperties);
MessageFormatRecord.UserMetadata_Format_V1.serializeUserMetadataRecord(byteBuffer, userMetadata);
int blobRecordStart = byteBuffer.position();
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
MessageFormatRecord.Blob_Format_V2.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize(), originalBlobPutReq.getBlobType());
break;
case MessageFormatRecord.Blob_Version_V1:
MessageFormatRecord.Blob_Format_V1.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBuffer.put(Utils.readBytesFromStream(originalBlobPutReq.getBlobStream(), (int) originalBlobPutReq.getBlobSize()));
crc = new Crc32();
crc.update(byteBuffer.array(), blobRecordStart, blobRecordSize - MessageFormatRecord.Crc_Size);
byteBuffer.putLong(crc.getValue());
break;
default:
throw new IOException("GetRequest flag is not supported: " + getRequest.getMessageFormatFlag());
}
} else if (processedError == ServerErrorCode.Blob_Deleted) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Deleted;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else if (processedError == ServerErrorCode.Blob_Expired) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Expired;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else if (processedError == ServerErrorCode.Blob_Authorization_Failure) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Authorization_Failure;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Not_Found;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
}
byteBuffer.flip();
ByteBufferSend responseSend = new ByteBufferSend(byteBuffer);
List<MessageInfo> messageInfoList = new ArrayList<>();
List<MessageMetadata> messageMetadataList = new ArrayList<>();
List<PartitionResponseInfo> partitionResponseInfoList = new ArrayList<PartitionResponseInfo>();
if (partitionError == ServerErrorCode.No_Error) {
messageInfoList.add(new MessageInfo(key, byteBufferSize, false, blob.isTtlUpdated(), blob.isUndeleted(), blob.expiresAt, null, accountId, containerId, operationTimeMs, blob.lifeVersion));
messageMetadataList.add(msgMetadata);
}
PartitionResponseInfo partitionResponseInfo = partitionError == ServerErrorCode.No_Error ? new PartitionResponseInfo(getRequest.getPartitionInfoList().get(0).getPartition(), messageInfoList, messageMetadataList) : new PartitionResponseInfo(getRequest.getPartitionInfoList().get(0).getPartition(), partitionError);
partitionResponseInfoList.add(partitionResponseInfo);
getResponse = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), partitionResponseInfoList, responseSend, serverError);
} else {
getResponse = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), new ArrayList<PartitionResponseInfo>(), new ByteBufferSend(ByteBuffer.allocate(0)), serverError);
}
return getResponse;
}
use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class DumpDataHelper method readSingleRecordFromLog.
/**
* Fetches one blob record from the log
* @param randomAccessFile {@link RandomAccessFile} referring to the log file
* @param currentOffset the offset at which to read the record from
* @param clusterMap the {@link ClusterMap} object to use to generate BlobId
* @param currentTimeInMs current time in ms to determine expiration
* @param metrics {@link StoreToolsMetrics} instance
* @return the {@link LogBlobRecordInfo} containing the blob record info
* @throws IOException
* @throws MessageFormatException
*/
static LogBlobRecordInfo readSingleRecordFromLog(RandomAccessFile randomAccessFile, long currentOffset, ClusterMap clusterMap, long currentTimeInMs, StoreToolsMetrics metrics) throws IOException, MessageFormatException {
String messageheader = null;
BlobId blobId = null;
String encryptionKey = null;
String blobProperty = null;
String usermetadata = null;
String blobDataOutput = null;
String deleteMsg = null;
boolean isDeleted = false;
boolean isExpired = false;
long expiresAtMs = -1;
int totalRecordSize = 0;
final Timer.Context context = metrics.readSingleBlobRecordFromLogTimeMs.time();
try {
randomAccessFile.seek(currentOffset);
short version = randomAccessFile.readShort();
MessageFormatRecord.MessageHeader_Format header = null;
if (version == MessageFormatRecord.Message_Header_Version_V1) {
ByteBuffer buffer = ByteBuffer.allocate(MessageFormatRecord.MessageHeader_Format_V1.getHeaderSize());
buffer.putShort(version);
randomAccessFile.read(buffer.array(), 2, buffer.capacity() - 2);
buffer.clear();
header = new MessageFormatRecord.MessageHeader_Format_V1(buffer);
messageheader = " Header - version " + header.getVersion() + " messagesize " + header.getMessageSize() + " currentOffset " + currentOffset + " blobPropertiesRelativeOffset " + header.getBlobPropertiesRecordRelativeOffset() + " userMetadataRelativeOffset " + header.getUserMetadataRecordRelativeOffset() + " dataRelativeOffset " + header.getBlobRecordRelativeOffset() + " crc " + header.getCrc();
totalRecordSize += header.getMessageSize() + buffer.capacity();
} else if (version == MessageFormatRecord.Message_Header_Version_V2) {
ByteBuffer buffer = ByteBuffer.allocate(MessageFormatRecord.MessageHeader_Format_V2.getHeaderSize());
buffer.putShort(version);
randomAccessFile.read(buffer.array(), 2, buffer.capacity() - 2);
buffer.clear();
header = new MessageFormatRecord.MessageHeader_Format_V2(buffer);
messageheader = " Header - version " + header.getVersion() + " messagesize " + header.getMessageSize() + " currentOffset " + currentOffset + " blobEncryptionKeyRelativeOffset " + header.getBlobEncryptionKeyRecordRelativeOffset() + " blobPropertiesRelativeOffset " + header.getBlobPropertiesRecordRelativeOffset() + " userMetadataRelativeOffset " + header.getUserMetadataRecordRelativeOffset() + " dataRelativeOffset " + header.getBlobRecordRelativeOffset() + " crc " + header.getCrc();
totalRecordSize += header.getMessageSize() + buffer.capacity();
} else if (version == MessageFormatRecord.Message_Header_Version_V3) {
ByteBuffer buffer = ByteBuffer.allocate(MessageFormatRecord.MessageHeader_Format_V3.getHeaderSize());
buffer.putShort(version);
randomAccessFile.read(buffer.array(), 2, buffer.capacity() - 2);
buffer.clear();
header = new MessageFormatRecord.MessageHeader_Format_V3(buffer);
messageheader = " Header - version " + header.getVersion() + " messagesize " + header.getMessageSize() + " currentOffset " + currentOffset + " blobEncryptionKeyRelativeOffset " + header.getBlobEncryptionKeyRecordRelativeOffset() + " blobPropertiesRelativeOffset " + header.getBlobPropertiesRecordRelativeOffset() + " userMetadataRelativeOffset " + header.getUserMetadataRecordRelativeOffset() + " dataRelativeOffset " + header.getBlobRecordRelativeOffset() + " crc " + header.getCrc();
totalRecordSize += header.getMessageSize() + buffer.capacity();
} else {
throw new MessageFormatException("Header version not supported " + version, MessageFormatErrorCodes.IO_Error);
}
// read blob id
InputStream streamlog = Channels.newInputStream(randomAccessFile.getChannel());
blobId = new BlobId(new DataInputStream(streamlog), clusterMap);
totalRecordSize += blobId.sizeInBytes();
if (header.getBlobPropertiesRecordRelativeOffset() != MessageFormatRecord.Message_Header_Invalid_Relative_Offset) {
ByteBuffer blobEncryptionKey = null;
if (header.hasEncryptionKeyRecord()) {
blobEncryptionKey = MessageFormatRecord.deserializeBlobEncryptionKey(streamlog);
encryptionKey = "EncryptionKey found which is of size " + blobEncryptionKey.remaining();
}
BlobProperties props = MessageFormatRecord.deserializeBlobProperties(streamlog);
expiresAtMs = Utils.addSecondsToEpochTime(props.getCreationTimeInMs(), props.getTimeToLiveInSeconds());
isExpired = isExpired(expiresAtMs, currentTimeInMs);
blobProperty = " Blob properties - blobSize " + props.getBlobSize() + " serviceId " + props.getServiceId() + ", isExpired " + isExpired + " accountId " + props.getAccountId() + " containerId " + props.getContainerId();
ByteBuffer metadata = MessageFormatRecord.deserializeUserMetadata(streamlog);
usermetadata = " Metadata - size " + metadata.capacity();
BlobData blobData = MessageFormatRecord.deserializeBlob(streamlog);
blobDataOutput = "Blob - size " + blobData.getSize();
} else {
UpdateRecord updateRecord = MessageFormatRecord.deserializeUpdateRecord(streamlog);
switch(updateRecord.getType()) {
case DELETE:
isDeleted = true;
deleteMsg = "delete change : AccountId:" + updateRecord.getAccountId() + ", ContainerId:" + updateRecord.getContainerId() + ", DeletionTimeInSecs:" + updateRecord.getUpdateTimeInMs();
break;
default:
// TODO (TTL update): handle TTL update
throw new IllegalStateException("Unrecognized update record type: " + updateRecord.getType());
}
}
return new LogBlobRecordInfo(messageheader, blobId, encryptionKey, blobProperty, usermetadata, blobDataOutput, deleteMsg, isDeleted, isExpired, expiresAtMs, totalRecordSize);
} finally {
context.stop();
}
}
use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class BlobValidator method getRecordFromNode.
/**
* Gets the {@link ServerResponse} from {@code dataNodeId} for {@code blobId}.
* @param dataNodeId the {@link DataNodeId} to query.
* @param blobId the {@link BlobId} to operate on.
* @param getOption the {@link GetOption} to use with the {@link com.github.ambry.protocol.GetRequest}.
* @param clusterMap the {@link ClusterMap} instance to use.
* @param storeKeyFactory the {@link StoreKeyFactory} to use.
* @return the {@link ServerResponse} from {@code dataNodeId} for {@code blobId}.
* @throws InterruptedException
*/
private ServerResponse getRecordFromNode(DataNodeId dataNodeId, BlobId blobId, GetOption getOption, ClusterMap clusterMap, StoreKeyFactory storeKeyFactory) throws InterruptedException {
LOGGER.debug("Getting {} from {}", blobId, dataNodeId);
ServerResponse serverResponse;
try {
Pair<ServerErrorCode, BlobAll> response = serverAdminTool.getAll(dataNodeId, blobId, getOption, clusterMap, storeKeyFactory);
ServerErrorCode errorCode = response.getFirst();
if (errorCode == ServerErrorCode.No_Error) {
BlobAll blobAll = response.getSecond();
ByteBuf buffer = blobAll.getBlobData().content();
byte[] blobBytes = new byte[buffer.readableBytes()];
buffer.readBytes(blobBytes);
buffer.release();
serverResponse = new ServerResponse(errorCode, blobAll.getStoreKey(), blobAll.getBlobInfo().getBlobProperties(), blobAll.getBlobInfo().getUserMetadata(), blobBytes, blobAll.getBlobEncryptionKey());
} else {
serverResponse = new ServerResponse(errorCode, null, null, null, null, null);
}
} catch (MessageFormatException e) {
LOGGER.error("Error while deserializing record for {} from {}", blobId, dataNodeId, e);
serverResponse = new ServerResponse(ServerErrorCode.Data_Corrupt, null, null, null, null, null);
} catch (Exception e) {
LOGGER.error("Error while getting record for {} from {}", blobId, dataNodeId, e);
serverResponse = new ServerResponse(ServerErrorCode.Unknown_Error, null, null, null, null, null);
} finally {
throttler.maybeThrottle(1);
}
LOGGER.debug("ServerError code is {} for blob {} from {}", serverResponse.serverErrorCode, blobId, dataNodeId);
return serverResponse;
}
use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class HardDeleteVerifier method verify.
private void verify(String dataDir) throws Exception {
final String Cleanup_Token_Filename = "cleanuptoken";
FileWriter fileWriter = null;
try {
fileWriter = new FileWriter(new File(outFile));
long offsetInCleanupToken = getOffsetFromCleanupToken(new File(dataDir, Cleanup_Token_Filename));
rangeMap = new HashMap<BlobId, IndexValue>();
offRangeMap = new HashMap<BlobId, IndexValue>();
long lastEligibleSegmentEndOffset = readAndPopulateIndex(offsetInCleanupToken);
// 2. Scan the log and check against blobMap
File logFile = new File(dataDir, "log_current");
RandomAccessFile randomAccessFile = new RandomAccessFile(logFile, "r");
InputStream streamlog = Channels.newInputStream(randomAccessFile.getChannel());
long currentOffset = 0;
System.out.println("Starting scan from offset " + currentOffset + " to " + offsetInCleanupToken);
long lastOffsetToLookFor = lastEligibleSegmentEndOffset;
boolean seeking = false;
while (currentOffset < lastOffsetToLookFor) {
try {
short version = randomAccessFile.readShort();
if (version == 1) {
seeking = false;
ByteBuffer buffer = ByteBuffer.allocate(MessageFormatRecord.MessageHeader_Format_V1.getHeaderSize());
buffer.putShort(version);
randomAccessFile.read(buffer.array(), 2, buffer.capacity() - 2);
buffer.rewind();
MessageFormatRecord.MessageHeader_Format_V1 header = new MessageFormatRecord.MessageHeader_Format_V1(buffer);
// read blob id
BlobId id;
id = new BlobId(new DataInputStream(streamlog), map);
IndexValue indexValue = rangeMap.get(id);
boolean isDeleted = false;
if (indexValue == null) {
throw new IllegalStateException("Key in log not found in index " + id);
} else if (indexValue.isFlagSet(IndexValue.Flags.Delete_Index)) {
isDeleted = true;
}
if (header.getBlobPropertiesRecordRelativeOffset() != MessageFormatRecord.Message_Header_Invalid_Relative_Offset) {
BlobProperties props;
ByteBuffer metadata;
BlobData output;
try {
props = MessageFormatRecord.deserializeBlobProperties(streamlog);
metadata = MessageFormatRecord.deserializeUserMetadata(streamlog);
output = MessageFormatRecord.deserializeBlob(streamlog);
} catch (MessageFormatException e) {
if (!isDeleted) {
corruptNonDeleted++;
} else {
corruptDeleted++;
}
throw e;
}
if (isDeleted) {
ByteBuf byteBuf = output.content();
try {
if (!verifyZeroed(metadata.array()) || !verifyZeroed(Utils.readBytesFromByteBuf(byteBuf, new byte[(int) output.getSize()], 0, (int) output.getSize()))) {
/* If the offset in the index is different from that in the log, hard delete wouldn't have been
possible and we just saw a duplicate put for the same key, otherwise we missed a hard delete. */
if (currentOffset == indexValue.getOriginalMessageOffset()) {
notHardDeletedErrorCount++;
} else {
// the assumption here is that this put has been lost as far as the index is concerned due to
// a duplicate put. Of course, these shouldn't happen anymore, we are accounting for past
// bugs.
duplicatePuts++;
}
} else {
hardDeletedPuts++;
}
} finally {
byteBuf.release();
}
} else {
unDeletedPuts++;
}
} else if (MessageFormatRecord.deserializeUpdateRecord(streamlog).getType().equals(SubRecord.Type.DELETE)) {
deletes++;
}
currentOffset += (header.getMessageSize() + buffer.capacity() + id.sizeInBytes());
} else {
throw new IllegalStateException("Unknown version for entry");
}
} catch (MessageFormatException e) {
if (!seeking) {
invalidEntriesInlog = true;
e.printStackTrace();
seeking = true;
}
randomAccessFile.seek(++currentOffset);
} catch (IOException e) {
if (!seeking) {
invalidEntriesInlog = true;
e.printStackTrace();
seeking = true;
}
randomAccessFile.seek(++currentOffset);
} catch (IllegalArgumentException e) {
if (!seeking) {
invalidEntriesInlog = true;
e.printStackTrace();
seeking = true;
}
randomAccessFile.seek(++currentOffset);
} catch (IllegalStateException e) {
if (!seeking) {
invalidEntriesInlog = true;
e.printStackTrace();
seeking = true;
}
randomAccessFile.seek(++currentOffset);
} catch (Exception e) {
e.printStackTrace(System.err);
invalidEntriesInlog = true;
randomAccessFile.seek(++currentOffset);
break;
}
}
String msg = ("\n============");
msg += "\ninvalidEntriesInlog? " + (invalidEntriesInlog ? "Yes" : "No");
msg += "\nnotHardDeletedErrorCount: " + notHardDeletedErrorCount;
msg += "\ncorruptNonDeletedCount:" + corruptNonDeleted;
msg += "\n========";
msg += "\ncorruptDeleted:" + corruptDeleted;
msg += "\nduplicatePuts: " + duplicatePuts;
msg += "\nundeleted Put Records: " + unDeletedPuts;
msg += "\nhard deleted Put Records: " + hardDeletedPuts;
msg += "\nDelete Records: " + deletes;
msg += "\n============";
fileWriter.write(msg);
System.out.println(msg);
} finally {
if (fileWriter != null) {
fileWriter.flush();
fileWriter.close();
}
}
}
use of com.github.ambry.messageformat.MessageFormatException in project ambry by linkedin.
the class HardDeleteVerifier method deserializeUserMetadataAndBlob.
boolean deserializeUserMetadataAndBlob(InputStream streamlog, InputStream oldStreamlog, boolean isDeleted) throws ContinueException {
boolean caughtException = false;
boolean caughtExceptionInOld = false;
ByteBuffer usermetadata = null;
ByteBuffer oldUsermetadata = null;
BlobData blobData = null;
BlobData oldBlobData = null;
try {
usermetadata = MessageFormatRecord.deserializeUserMetadata(streamlog);
blobData = MessageFormatRecord.deserializeBlob(streamlog);
} catch (MessageFormatException e) {
caughtException = true;
} catch (IOException e) {
caughtException = true;
}
try {
oldUsermetadata = MessageFormatRecord.deserializeUserMetadata(oldStreamlog);
oldBlobData = MessageFormatRecord.deserializeBlob(oldStreamlog);
} catch (MessageFormatException e) {
caughtExceptionInOld = true;
} catch (IOException e) {
caughtExceptionInOld = true;
}
boolean asExpected;
if (!caughtException) {
if (isDeleted) {
ByteBuf byteBuf = blobData.content();
try {
asExpected = verifyZeroed(usermetadata.array()) && verifyZeroed(Utils.readBytesFromByteBuf(byteBuf, new byte[(int) blobData.getSize()], 0, (int) blobData.getSize()));
} catch (IOException e) {
asExpected = false;
} finally {
byteBuf.release();
}
} else {
ByteBuf byteBuf = blobData.content();
ByteBuf oldByteBuf = oldBlobData.content();
try {
asExpected = Arrays.equals(usermetadata.array(), oldUsermetadata.array()) && Arrays.equals(Utils.readBytesFromByteBuf(byteBuf, new byte[(int) blobData.getSize()], 0, (int) blobData.getSize()), Utils.readBytesFromByteBuf(oldByteBuf, new byte[(int) oldBlobData.getSize()], 0, (int) oldBlobData.getSize()));
} catch (IOException e) {
asExpected = false;
} finally {
byteBuf.release();
oldByteBuf.release();
}
}
return asExpected;
} else if (!caughtExceptionInOld) {
if (isDeleted) {
corruptDeleted++;
} else {
corruptNonDeleted++;
}
throw new ContinueException("records did not deserialize");
} else {
throw new ContinueException("records did not deserialize in either.");
}
}
Aggregations