use of com.github.ambry.messageformat.BlobData in project ambry by linkedin.
the class ServerAdminTool method getBlob.
/**
* Gets blob data for {@code blobId}.
* @param dataNodeId the {@link DataNodeId} to contact.
* @param blobId the {@link BlobId} to operate on.
* @param getOption the {@link GetOption} to send with the {@link GetRequest}.
* @param clusterMap the {@link ClusterMap} to use.
* @return the {@link ServerErrorCode} and {@link BlobData} for {@code blobId}
* @throws Exception
*/
public Pair<ServerErrorCode, BlobData> getBlob(DataNodeId dataNodeId, BlobId blobId, GetOption getOption, ClusterMap clusterMap) throws Exception {
Pair<ServerErrorCode, InputStream> response = getGetResponse(dataNodeId, blobId, MessageFormatFlags.Blob, getOption, clusterMap);
InputStream stream = response.getSecond();
BlobData blobData = stream != null ? MessageFormatRecord.deserializeBlob(stream) : null;
return new Pair<>(response.getFirst(), blobData);
}
use of com.github.ambry.messageformat.BlobData in project ambry by linkedin.
the class HardDeleteVerifier method verify.
private void verify(String dataDir) throws Exception {
final String Cleanup_Token_Filename = "cleanuptoken";
FileWriter fileWriter = null;
try {
fileWriter = new FileWriter(new File(outFile));
long offsetInCleanupToken = getOffsetFromCleanupToken(new File(dataDir, Cleanup_Token_Filename));
rangeMap = new HashMap<BlobId, IndexValue>();
offRangeMap = new HashMap<BlobId, IndexValue>();
long lastEligibleSegmentEndOffset = readAndPopulateIndex(offsetInCleanupToken);
// 2. Scan the log and check against blobMap
File logFile = new File(dataDir, "log_current");
RandomAccessFile randomAccessFile = new RandomAccessFile(logFile, "r");
InputStream streamlog = Channels.newInputStream(randomAccessFile.getChannel());
long currentOffset = 0;
System.out.println("Starting scan from offset " + currentOffset + " to " + offsetInCleanupToken);
long lastOffsetToLookFor = lastEligibleSegmentEndOffset;
boolean seeking = false;
while (currentOffset < lastOffsetToLookFor) {
try {
short version = randomAccessFile.readShort();
if (version == 1) {
seeking = false;
ByteBuffer buffer = ByteBuffer.allocate(MessageFormatRecord.MessageHeader_Format_V1.getHeaderSize());
buffer.putShort(version);
randomAccessFile.read(buffer.array(), 2, buffer.capacity() - 2);
buffer.rewind();
MessageFormatRecord.MessageHeader_Format_V1 header = new MessageFormatRecord.MessageHeader_Format_V1(buffer);
// read blob id
BlobId id;
id = new BlobId(new DataInputStream(streamlog), map);
IndexValue indexValue = rangeMap.get(id);
boolean isDeleted = false;
if (indexValue == null) {
throw new IllegalStateException("Key in log not found in index " + id);
} else if (indexValue.isFlagSet(IndexValue.Flags.Delete_Index)) {
isDeleted = true;
}
if (header.getBlobPropertiesRecordRelativeOffset() != MessageFormatRecord.Message_Header_Invalid_Relative_Offset) {
BlobProperties props;
ByteBuffer metadata;
BlobData output;
try {
props = MessageFormatRecord.deserializeBlobProperties(streamlog);
metadata = MessageFormatRecord.deserializeUserMetadata(streamlog);
output = MessageFormatRecord.deserializeBlob(streamlog);
} catch (MessageFormatException e) {
if (!isDeleted) {
corruptNonDeleted++;
} else {
corruptDeleted++;
}
throw e;
}
if (isDeleted) {
ByteBuf byteBuf = output.content();
try {
if (!verifyZeroed(metadata.array()) || !verifyZeroed(Utils.readBytesFromByteBuf(byteBuf, new byte[(int) output.getSize()], 0, (int) output.getSize()))) {
/* If the offset in the index is different from that in the log, hard delete wouldn't have been
possible and we just saw a duplicate put for the same key, otherwise we missed a hard delete. */
if (currentOffset == indexValue.getOriginalMessageOffset()) {
notHardDeletedErrorCount++;
} else {
// the assumption here is that this put has been lost as far as the index is concerned due to
// a duplicate put. Of course, these shouldn't happen anymore, we are accounting for past
// bugs.
duplicatePuts++;
}
} else {
hardDeletedPuts++;
}
} finally {
byteBuf.release();
}
} else {
unDeletedPuts++;
}
} else if (MessageFormatRecord.deserializeUpdateRecord(streamlog).getType().equals(SubRecord.Type.DELETE)) {
deletes++;
}
currentOffset += (header.getMessageSize() + buffer.capacity() + id.sizeInBytes());
} else {
throw new IllegalStateException("Unknown version for entry");
}
} catch (MessageFormatException e) {
if (!seeking) {
invalidEntriesInlog = true;
e.printStackTrace();
seeking = true;
}
randomAccessFile.seek(++currentOffset);
} catch (IOException e) {
if (!seeking) {
invalidEntriesInlog = true;
e.printStackTrace();
seeking = true;
}
randomAccessFile.seek(++currentOffset);
} catch (IllegalArgumentException e) {
if (!seeking) {
invalidEntriesInlog = true;
e.printStackTrace();
seeking = true;
}
randomAccessFile.seek(++currentOffset);
} catch (IllegalStateException e) {
if (!seeking) {
invalidEntriesInlog = true;
e.printStackTrace();
seeking = true;
}
randomAccessFile.seek(++currentOffset);
} catch (Exception e) {
e.printStackTrace(System.err);
invalidEntriesInlog = true;
randomAccessFile.seek(++currentOffset);
break;
}
}
String msg = ("\n============");
msg += "\ninvalidEntriesInlog? " + (invalidEntriesInlog ? "Yes" : "No");
msg += "\nnotHardDeletedErrorCount: " + notHardDeletedErrorCount;
msg += "\ncorruptNonDeletedCount:" + corruptNonDeleted;
msg += "\n========";
msg += "\ncorruptDeleted:" + corruptDeleted;
msg += "\nduplicatePuts: " + duplicatePuts;
msg += "\nundeleted Put Records: " + unDeletedPuts;
msg += "\nhard deleted Put Records: " + hardDeletedPuts;
msg += "\nDelete Records: " + deletes;
msg += "\n============";
fileWriter.write(msg);
System.out.println(msg);
} finally {
if (fileWriter != null) {
fileWriter.flush();
fileWriter.close();
}
}
}
use of com.github.ambry.messageformat.BlobData in project ambry by linkedin.
the class HardDeleteVerifier method deserializeUserMetadataAndBlob.
boolean deserializeUserMetadataAndBlob(InputStream streamlog, InputStream oldStreamlog, boolean isDeleted) throws ContinueException {
boolean caughtException = false;
boolean caughtExceptionInOld = false;
ByteBuffer usermetadata = null;
ByteBuffer oldUsermetadata = null;
BlobData blobData = null;
BlobData oldBlobData = null;
try {
usermetadata = MessageFormatRecord.deserializeUserMetadata(streamlog);
blobData = MessageFormatRecord.deserializeBlob(streamlog);
} catch (MessageFormatException e) {
caughtException = true;
} catch (IOException e) {
caughtException = true;
}
try {
oldUsermetadata = MessageFormatRecord.deserializeUserMetadata(oldStreamlog);
oldBlobData = MessageFormatRecord.deserializeBlob(oldStreamlog);
} catch (MessageFormatException e) {
caughtExceptionInOld = true;
} catch (IOException e) {
caughtExceptionInOld = true;
}
boolean asExpected;
if (!caughtException) {
if (isDeleted) {
ByteBuf byteBuf = blobData.content();
try {
asExpected = verifyZeroed(usermetadata.array()) && verifyZeroed(Utils.readBytesFromByteBuf(byteBuf, new byte[(int) blobData.getSize()], 0, (int) blobData.getSize()));
} catch (IOException e) {
asExpected = false;
} finally {
byteBuf.release();
}
} else {
ByteBuf byteBuf = blobData.content();
ByteBuf oldByteBuf = oldBlobData.content();
try {
asExpected = Arrays.equals(usermetadata.array(), oldUsermetadata.array()) && Arrays.equals(Utils.readBytesFromByteBuf(byteBuf, new byte[(int) blobData.getSize()], 0, (int) blobData.getSize()), Utils.readBytesFromByteBuf(oldByteBuf, new byte[(int) oldBlobData.getSize()], 0, (int) oldBlobData.getSize()));
} catch (IOException e) {
asExpected = false;
} finally {
byteBuf.release();
oldByteBuf.release();
}
}
return asExpected;
} else if (!caughtExceptionInOld) {
if (isDeleted) {
corruptDeleted++;
} else {
corruptNonDeleted++;
}
throw new ContinueException("records did not deserialize");
} else {
throw new ContinueException("records did not deserialize in either.");
}
}
use of com.github.ambry.messageformat.BlobData in project ambry by linkedin.
the class ServerHardDeleteTest method getAndVerify.
/**
* Fetches the Blob(for all MessageFormatFlags) and verifies the content
* @param channel the {@link BlockingChannel} to use to send and receive data
* @param blobsCount the total number of blobs that needs to be verified against
* @throws Exception
*/
void getAndVerify(BlockingChannel channel, int blobsCount) throws Exception {
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<>();
ArrayList<BlobId> ids = new ArrayList<>();
for (int i = 0; i < blobsCount; i++) {
ids.add(blobIdList.get(i));
}
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIdList.get(0).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
ArrayList<MessageFormatFlags> flags = new ArrayList<>();
flags.add(MessageFormatFlags.BlobProperties);
flags.add(MessageFormatFlags.BlobUserMetadata);
flags.add(MessageFormatFlags.Blob);
for (MessageFormatFlags flag : flags) {
GetRequest getRequest = new GetRequest(1, "clientid2", flag, partitionRequestInfoList, GetOption.Include_All);
channel.send(getRequest);
InputStream stream = channel.receive().getInputStream();
GetResponse resp = GetResponse.readFrom(new DataInputStream(stream), mockClusterMap);
if (flag == MessageFormatFlags.BlobProperties) {
for (int i = 0; i < blobsCount; i++) {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
Assert.assertEquals(properties.get(i).getBlobSize(), propertyOutput.getBlobSize());
Assert.assertEquals("serviceid1", propertyOutput.getServiceId());
Assert.assertEquals("AccountId mismatch", properties.get(i).getAccountId(), propertyOutput.getAccountId());
Assert.assertEquals("ContainerId mismatch", properties.get(i).getContainerId(), propertyOutput.getContainerId());
}
} else if (flag == MessageFormatFlags.BlobUserMetadata) {
for (int i = 0; i < blobsCount; i++) {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
Assert.assertArrayEquals(userMetadataOutput.array(), usermetadata.get(i));
}
} else if (flag == MessageFormatFlags.Blob) {
for (int i = 0; i < blobsCount; i++) {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
Assert.assertEquals(properties.get(i).getBlobSize(), blobData.getSize());
byte[] dataOutput = new byte[(int) blobData.getSize()];
blobData.getStream().read(dataOutput);
Assert.assertArrayEquals(dataOutput, data.get(i));
}
} else {
throw new IllegalArgumentException("Unrecognized message format flags " + flags);
}
}
}
Aggregations