use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class DumpDataHelper method readSingleRecordFromLog.
/**
* Fetches one blob record from the log
* @param randomAccessFile {@link RandomAccessFile} referring to the log file
* @param currentOffset the offset at which to read the record from
* @param clusterMap the {@link ClusterMap} object to use to generate BlobId
* @param currentTimeInMs current time in ms to determine expiration
* @param metrics {@link StoreToolsMetrics} instance
* @return the {@link LogBlobRecordInfo} containing the blob record info
* @throws IOException
* @throws MessageFormatException
*/
static LogBlobRecordInfo readSingleRecordFromLog(RandomAccessFile randomAccessFile, long currentOffset, ClusterMap clusterMap, long currentTimeInMs, StoreToolsMetrics metrics) throws IOException, MessageFormatException {
String messageheader = null;
BlobId blobId = null;
String encryptionKey = null;
String blobProperty = null;
String usermetadata = null;
String blobDataOutput = null;
String deleteMsg = null;
boolean isDeleted = false;
boolean isExpired = false;
long expiresAtMs = -1;
int totalRecordSize = 0;
final Timer.Context context = metrics.readSingleBlobRecordFromLogTimeMs.time();
try {
randomAccessFile.seek(currentOffset);
short version = randomAccessFile.readShort();
MessageFormatRecord.MessageHeader_Format header = null;
if (version == MessageFormatRecord.Message_Header_Version_V1) {
ByteBuffer buffer = ByteBuffer.allocate(MessageFormatRecord.MessageHeader_Format_V1.getHeaderSize());
buffer.putShort(version);
randomAccessFile.read(buffer.array(), 2, buffer.capacity() - 2);
buffer.clear();
header = new MessageFormatRecord.MessageHeader_Format_V1(buffer);
messageheader = " Header - version " + header.getVersion() + " messagesize " + header.getMessageSize() + " currentOffset " + currentOffset + " blobPropertiesRelativeOffset " + header.getBlobPropertiesRecordRelativeOffset() + " userMetadataRelativeOffset " + header.getUserMetadataRecordRelativeOffset() + " dataRelativeOffset " + header.getBlobRecordRelativeOffset() + " crc " + header.getCrc();
totalRecordSize += header.getMessageSize() + buffer.capacity();
} else if (version == MessageFormatRecord.Message_Header_Version_V2) {
ByteBuffer buffer = ByteBuffer.allocate(MessageFormatRecord.MessageHeader_Format_V2.getHeaderSize());
buffer.putShort(version);
randomAccessFile.read(buffer.array(), 2, buffer.capacity() - 2);
buffer.clear();
header = new MessageFormatRecord.MessageHeader_Format_V2(buffer);
messageheader = " Header - version " + header.getVersion() + " messagesize " + header.getMessageSize() + " currentOffset " + currentOffset + " blobEncryptionKeyRelativeOffset " + header.getBlobEncryptionKeyRecordRelativeOffset() + " blobPropertiesRelativeOffset " + header.getBlobPropertiesRecordRelativeOffset() + " userMetadataRelativeOffset " + header.getUserMetadataRecordRelativeOffset() + " dataRelativeOffset " + header.getBlobRecordRelativeOffset() + " crc " + header.getCrc();
totalRecordSize += header.getMessageSize() + buffer.capacity();
} else if (version == MessageFormatRecord.Message_Header_Version_V3) {
ByteBuffer buffer = ByteBuffer.allocate(MessageFormatRecord.MessageHeader_Format_V3.getHeaderSize());
buffer.putShort(version);
randomAccessFile.read(buffer.array(), 2, buffer.capacity() - 2);
buffer.clear();
header = new MessageFormatRecord.MessageHeader_Format_V3(buffer);
messageheader = " Header - version " + header.getVersion() + " messagesize " + header.getMessageSize() + " currentOffset " + currentOffset + " blobEncryptionKeyRelativeOffset " + header.getBlobEncryptionKeyRecordRelativeOffset() + " blobPropertiesRelativeOffset " + header.getBlobPropertiesRecordRelativeOffset() + " userMetadataRelativeOffset " + header.getUserMetadataRecordRelativeOffset() + " dataRelativeOffset " + header.getBlobRecordRelativeOffset() + " crc " + header.getCrc();
totalRecordSize += header.getMessageSize() + buffer.capacity();
} else {
throw new MessageFormatException("Header version not supported " + version, MessageFormatErrorCodes.IO_Error);
}
// read blob id
InputStream streamlog = Channels.newInputStream(randomAccessFile.getChannel());
blobId = new BlobId(new DataInputStream(streamlog), clusterMap);
totalRecordSize += blobId.sizeInBytes();
if (header.getBlobPropertiesRecordRelativeOffset() != MessageFormatRecord.Message_Header_Invalid_Relative_Offset) {
ByteBuffer blobEncryptionKey = null;
if (header.hasEncryptionKeyRecord()) {
blobEncryptionKey = MessageFormatRecord.deserializeBlobEncryptionKey(streamlog);
encryptionKey = "EncryptionKey found which is of size " + blobEncryptionKey.remaining();
}
BlobProperties props = MessageFormatRecord.deserializeBlobProperties(streamlog);
expiresAtMs = Utils.addSecondsToEpochTime(props.getCreationTimeInMs(), props.getTimeToLiveInSeconds());
isExpired = isExpired(expiresAtMs, currentTimeInMs);
blobProperty = " Blob properties - blobSize " + props.getBlobSize() + " serviceId " + props.getServiceId() + ", isExpired " + isExpired + " accountId " + props.getAccountId() + " containerId " + props.getContainerId();
ByteBuffer metadata = MessageFormatRecord.deserializeUserMetadata(streamlog);
usermetadata = " Metadata - size " + metadata.capacity();
BlobData blobData = MessageFormatRecord.deserializeBlob(streamlog);
blobDataOutput = "Blob - size " + blobData.getSize();
} else {
UpdateRecord updateRecord = MessageFormatRecord.deserializeUpdateRecord(streamlog);
switch(updateRecord.getType()) {
case DELETE:
isDeleted = true;
deleteMsg = "delete change : AccountId:" + updateRecord.getAccountId() + ", ContainerId:" + updateRecord.getContainerId() + ", DeletionTimeInSecs:" + updateRecord.getUpdateTimeInMs();
break;
default:
// TODO (TTL update): handle TTL update
throw new IllegalStateException("Unrecognized update record type: " + updateRecord.getType());
}
}
return new LogBlobRecordInfo(messageheader, blobId, encryptionKey, blobProperty, usermetadata, blobDataOutput, deleteMsg, isDeleted, isExpired, expiresAtMs, totalRecordSize);
} finally {
context.stop();
}
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class ServerAdminTool method getBlobProperties.
/**
* Gets {@link BlobProperties} for {@code blobId}.
* @param dataNodeId the {@link DataNodeId} to contact.
* @param blobId the {@link BlobId} to operate on.
* @param getOption the {@link GetOption} to send with the {@link GetRequest}.
* @param clusterMap the {@link ClusterMap} to use.
* @return the {@link ServerErrorCode} and {@link BlobProperties} of {@code blobId}.
* @throws Exception
*/
public Pair<ServerErrorCode, BlobProperties> getBlobProperties(DataNodeId dataNodeId, BlobId blobId, GetOption getOption, ClusterMap clusterMap) throws Exception {
Pair<ServerErrorCode, InputStream> response = getGetResponse(dataNodeId, blobId, MessageFormatFlags.BlobProperties, getOption, clusterMap);
InputStream stream = response.getSecond();
BlobProperties blobProperties = stream != null ? MessageFormatRecord.deserializeBlobProperties(stream) : null;
return new Pair<>(response.getFirst(), blobProperties);
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class DirectoryUploader method walkDirectoryToCreateBlobs.
public void walkDirectoryToCreateBlobs(String path, FileWriter writer, String datacenter, byte datacenterId, boolean enableVerboseLogging) throws InterruptedException {
File root = new File(path);
File[] list = root.listFiles();
Random random = new Random();
if (list == null) {
return;
}
for (File f : list) {
if (!f.isDirectory()) {
System.out.println("File :" + f.getAbsoluteFile());
if (f.length() > Integer.MAX_VALUE) {
System.out.println("File length is " + f.length());
throw new IllegalArgumentException("File length is " + f.length() + "; files larger than " + Integer.MAX_VALUE + " cannot be put using this tool.");
}
BlobProperties props = new BlobProperties(f.length(), "migration", Account.UNKNOWN_ACCOUNT_ID, Container.UNKNOWN_CONTAINER_ID, false);
byte[] usermetadata = new byte[1];
FileInputStream stream = null;
try {
int replicaCount = 0;
BlobId blobId = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, datacenterId, props.getAccountId(), props.getContainerId(), partitionId, false, BlobId.BlobDataType.DATACHUNK);
List<ReplicaId> successList = new ArrayList<>();
List<ReplicaId> failureList = new ArrayList<>();
for (ReplicaId replicaId : blobId.getPartition().getReplicaIds()) {
if (replicaId.getDataNodeId().getDatacenterName().equalsIgnoreCase(datacenter)) {
// If a node was specified, only write to that node instead of all nodes of a partition
if (dataNodeId != null && !dataNodeId.equals(replicaId.getDataNodeId())) {
continue;
}
replicaCount += 1;
try {
stream = new FileInputStream(f);
AtomicInteger correlationId = new AtomicInteger(random.nextInt(100000));
if (writeToAmbryReplica(props, ByteBuffer.wrap(usermetadata), stream, blobId, replicaId, correlationId, enableVerboseLogging)) {
successList.add(replicaId);
} else {
failureList.add(replicaId);
}
} catch (Exception e) {
System.out.println("Exception thrown on replica " + replicaId);
} finally {
if (stream != null) {
stream.close();
}
}
}
}
System.out.println("Successfuly put blob " + blobId + " to " + successList + ", but failed in " + failureList + "\n");
if (successList.size() == replicaCount) {
writer.write("blobId|" + blobId.getID() + "|source|" + f.getAbsolutePath() + "|fileSize|" + f.length() + "|\n");
}
// failed blobs are not written to the out file
} catch (FileNotFoundException e) {
System.out.println("File not found path : " + f.getAbsolutePath() + " exception : " + e);
} catch (IOException e) {
System.out.println("IOException when writing to migration log " + e);
} finally {
try {
if (stream != null) {
stream.close();
}
} catch (Exception e) {
System.out.println("Error while closing file stream " + e);
}
}
}
}
}
use of com.github.ambry.messageformat.BlobProperties in project ambry by linkedin.
the class HardDeleteVerifier method verify.
private void verify(String dataDir) throws Exception {
final String Cleanup_Token_Filename = "cleanuptoken";
FileWriter fileWriter = null;
try {
fileWriter = new FileWriter(new File(outFile));
long offsetInCleanupToken = getOffsetFromCleanupToken(new File(dataDir, Cleanup_Token_Filename));
rangeMap = new HashMap<BlobId, IndexValue>();
offRangeMap = new HashMap<BlobId, IndexValue>();
long lastEligibleSegmentEndOffset = readAndPopulateIndex(offsetInCleanupToken);
// 2. Scan the log and check against blobMap
File logFile = new File(dataDir, "log_current");
RandomAccessFile randomAccessFile = new RandomAccessFile(logFile, "r");
InputStream streamlog = Channels.newInputStream(randomAccessFile.getChannel());
long currentOffset = 0;
System.out.println("Starting scan from offset " + currentOffset + " to " + offsetInCleanupToken);
long lastOffsetToLookFor = lastEligibleSegmentEndOffset;
boolean seeking = false;
while (currentOffset < lastOffsetToLookFor) {
try {
short version = randomAccessFile.readShort();
if (version == 1) {
seeking = false;
ByteBuffer buffer = ByteBuffer.allocate(MessageFormatRecord.MessageHeader_Format_V1.getHeaderSize());
buffer.putShort(version);
randomAccessFile.read(buffer.array(), 2, buffer.capacity() - 2);
buffer.rewind();
MessageFormatRecord.MessageHeader_Format_V1 header = new MessageFormatRecord.MessageHeader_Format_V1(buffer);
// read blob id
BlobId id;
id = new BlobId(new DataInputStream(streamlog), map);
IndexValue indexValue = rangeMap.get(id);
boolean isDeleted = false;
if (indexValue == null) {
throw new IllegalStateException("Key in log not found in index " + id);
} else if (indexValue.isFlagSet(IndexValue.Flags.Delete_Index)) {
isDeleted = true;
}
if (header.getBlobPropertiesRecordRelativeOffset() != MessageFormatRecord.Message_Header_Invalid_Relative_Offset) {
BlobProperties props;
ByteBuffer metadata;
BlobData output;
try {
props = MessageFormatRecord.deserializeBlobProperties(streamlog);
metadata = MessageFormatRecord.deserializeUserMetadata(streamlog);
output = MessageFormatRecord.deserializeBlob(streamlog);
} catch (MessageFormatException e) {
if (!isDeleted) {
corruptNonDeleted++;
} else {
corruptDeleted++;
}
throw e;
}
if (isDeleted) {
ByteBuf byteBuf = output.content();
try {
if (!verifyZeroed(metadata.array()) || !verifyZeroed(Utils.readBytesFromByteBuf(byteBuf, new byte[(int) output.getSize()], 0, (int) output.getSize()))) {
/* If the offset in the index is different from that in the log, hard delete wouldn't have been
possible and we just saw a duplicate put for the same key, otherwise we missed a hard delete. */
if (currentOffset == indexValue.getOriginalMessageOffset()) {
notHardDeletedErrorCount++;
} else {
// the assumption here is that this put has been lost as far as the index is concerned due to
// a duplicate put. Of course, these shouldn't happen anymore, we are accounting for past
// bugs.
duplicatePuts++;
}
} else {
hardDeletedPuts++;
}
} finally {
byteBuf.release();
}
} else {
unDeletedPuts++;
}
} else if (MessageFormatRecord.deserializeUpdateRecord(streamlog).getType().equals(SubRecord.Type.DELETE)) {
deletes++;
}
currentOffset += (header.getMessageSize() + buffer.capacity() + id.sizeInBytes());
} else {
throw new IllegalStateException("Unknown version for entry");
}
} catch (MessageFormatException e) {
if (!seeking) {
invalidEntriesInlog = true;
e.printStackTrace();
seeking = true;
}
randomAccessFile.seek(++currentOffset);
} catch (IOException e) {
if (!seeking) {
invalidEntriesInlog = true;
e.printStackTrace();
seeking = true;
}
randomAccessFile.seek(++currentOffset);
} catch (IllegalArgumentException e) {
if (!seeking) {
invalidEntriesInlog = true;
e.printStackTrace();
seeking = true;
}
randomAccessFile.seek(++currentOffset);
} catch (IllegalStateException e) {
if (!seeking) {
invalidEntriesInlog = true;
e.printStackTrace();
seeking = true;
}
randomAccessFile.seek(++currentOffset);
} catch (Exception e) {
e.printStackTrace(System.err);
invalidEntriesInlog = true;
randomAccessFile.seek(++currentOffset);
break;
}
}
String msg = ("\n============");
msg += "\ninvalidEntriesInlog? " + (invalidEntriesInlog ? "Yes" : "No");
msg += "\nnotHardDeletedErrorCount: " + notHardDeletedErrorCount;
msg += "\ncorruptNonDeletedCount:" + corruptNonDeleted;
msg += "\n========";
msg += "\ncorruptDeleted:" + corruptDeleted;
msg += "\nduplicatePuts: " + duplicatePuts;
msg += "\nundeleted Put Records: " + unDeletedPuts;
msg += "\nhard deleted Put Records: " + hardDeletedPuts;
msg += "\nDelete Records: " + deletes;
msg += "\n============";
fileWriter.write(msg);
System.out.println(msg);
} finally {
if (fileWriter != null) {
fileWriter.flush();
fileWriter.close();
}
}
}
Aggregations