use of com.github.ambry.protocol.GetResponse in project ambry by linkedin.
the class ServerReadPerformance method main.
public static void main(String[] args) {
ConnectionPool connectionPool = null;
FileWriter writer = null;
try {
OptionParser parser = new OptionParser();
ArgumentAcceptingOptionSpec<String> logToReadOpt = parser.accepts("logToRead", "The log that needs to be replayed for traffic").withRequiredArg().describedAs("log_to_read").ofType(String.class);
ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<Integer> readsPerSecondOpt = parser.accepts("readsPerSecond", "The rate at which reads need to be performed").withRequiredArg().describedAs("The number of reads per second").ofType(Integer.class).defaultsTo(1000);
ArgumentAcceptingOptionSpec<Long> measurementIntervalOpt = parser.accepts("measurementInterval", "The interval in second to report performance result").withOptionalArg().describedAs("The CPU time spent for getting blobs, not wall time").ofType(Long.class).defaultsTo(300L);
ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
ArgumentAcceptingOptionSpec<String> sslEnabledDatacentersOpt = parser.accepts("sslEnabledDatacenters", "Datacenters to which ssl should be enabled").withOptionalArg().describedAs("Comma separated list").ofType(String.class).defaultsTo("");
ArgumentAcceptingOptionSpec<String> sslKeystorePathOpt = parser.accepts("sslKeystorePath", "SSL key store path").withOptionalArg().describedAs("The file path of SSL key store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslKeystoreTypeOpt = parser.accepts("sslKeystoreType", "SSL key store type").withOptionalArg().describedAs("The type of SSL key store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslTruststorePathOpt = parser.accepts("sslTruststorePath", "SSL trust store path").withOptionalArg().describedAs("The file path of SSL trust store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslKeystorePasswordOpt = parser.accepts("sslKeystorePassword", "SSL key store password").withOptionalArg().describedAs("The password of SSL key store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslKeyPasswordOpt = parser.accepts("sslKeyPassword", "SSL key password").withOptionalArg().describedAs("The password of SSL private key").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslTruststorePasswordOpt = parser.accepts("sslTruststorePassword", "SSL trust store password").withOptionalArg().describedAs("The password of SSL trust store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslCipherSuitesOpt = parser.accepts("sslCipherSuites", "SSL enabled cipher suites").withOptionalArg().describedAs("Comma separated list").defaultsTo("TLS_RSA_WITH_AES_128_CBC_SHA").ofType(String.class);
OptionSet options = parser.parse(args);
ArrayList<OptionSpec> listOpt = new ArrayList<>();
listOpt.add(logToReadOpt);
listOpt.add(hardwareLayoutOpt);
listOpt.add(partitionLayoutOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
long measurementIntervalNs = options.valueOf(measurementIntervalOpt) * SystemTime.NsPerSec;
ToolUtils.validateSSLOptions(options, parser, sslEnabledDatacentersOpt, sslKeystorePathOpt, sslKeystoreTypeOpt, sslTruststorePathOpt, sslKeystorePasswordOpt, sslKeyPasswordOpt, sslTruststorePasswordOpt);
String sslEnabledDatacenters = options.valueOf(sslEnabledDatacentersOpt);
Properties sslProperties;
if (sslEnabledDatacenters.length() != 0) {
sslProperties = ToolUtils.createSSLProperties(sslEnabledDatacenters, options.valueOf(sslKeystorePathOpt), options.valueOf(sslKeystoreTypeOpt), options.valueOf(sslKeystorePasswordOpt), options.valueOf(sslKeyPasswordOpt), options.valueOf(sslTruststorePathOpt), options.valueOf(sslTruststorePasswordOpt), options.valueOf(sslCipherSuitesOpt));
} else {
sslProperties = new Properties();
}
ToolUtils.addClusterMapProperties(sslProperties);
String logToRead = options.valueOf(logToReadOpt);
int readsPerSecond = options.valueOf(readsPerSecondOpt);
boolean enableVerboseLogging = options.has(verboseLoggingOpt);
if (enableVerboseLogging) {
System.out.println("Enabled verbose logging");
}
File logFile = new File(System.getProperty("user.dir"), "readperfresult");
writer = new FileWriter(logFile);
String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(sslProperties));
ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
final AtomicLong totalTimeTaken = new AtomicLong(0);
final AtomicLong totalReads = new AtomicLong(0);
final AtomicBoolean shutdown = new AtomicBoolean(false);
// attach shutdown handler to catch control-c
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
try {
System.out.println("Shutdown invoked");
shutdown.set(true);
String message = "Total reads : " + totalReads.get() + " Total time taken : " + totalTimeTaken.get() + " Nano Seconds Average time taken per read " + ((double) totalTimeTaken.get()) / SystemTime.NsPerSec / totalReads.get() + " Seconds";
System.out.println(message);
} catch (Exception e) {
System.out.println("Error while shutting down " + e);
}
}
});
final BufferedReader br = new BufferedReader(new FileReader(logToRead));
Throttler throttler = new Throttler(readsPerSecond, 100, true, SystemTime.getInstance());
String line;
ConnectedChannel channel = null;
ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(new VerifiableProperties(new Properties()));
VerifiableProperties vProps = new VerifiableProperties(sslProperties);
SSLConfig sslConfig = new SSLConfig(vProps);
clusterMapConfig = new ClusterMapConfig(vProps);
connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, new MetricRegistry());
long totalNumberOfGetBlobs = 0;
long totalLatencyForGetBlobs = 0;
ArrayList<Long> latenciesForGetBlobs = new ArrayList<Long>();
long maxLatencyForGetBlobs = 0;
long minLatencyForGetBlobs = Long.MAX_VALUE;
while ((line = br.readLine()) != null) {
String[] id = line.split("-");
BlobData blobData = null;
BlobId blobId = new BlobId(id[1], map);
ArrayList<BlobId> blobIds = new ArrayList<BlobId>();
blobIds.add(blobId);
for (ReplicaId replicaId : blobId.getPartition().getReplicaIds()) {
long startTimeGetBlob = 0;
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
try {
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "getperf", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
Port port = replicaId.getDataNodeId().getPortToConnectTo();
channel = connectionPool.checkOutConnection(replicaId.getDataNodeId().getHostname(), port, 10000);
startTimeGetBlob = SystemTime.getInstance().nanoseconds();
channel.send(getRequest);
InputStream receiveStream = channel.receive().getInputStream();
GetResponse getResponse = GetResponse.readFrom(new DataInputStream(receiveStream), map);
blobData = MessageFormatRecord.deserializeBlob(getResponse.getInputStream());
long sizeRead = 0;
byte[] outputBuffer = new byte[(int) blobData.getSize()];
ByteBufferOutputStream streamOut = new ByteBufferOutputStream(ByteBuffer.wrap(outputBuffer));
while (sizeRead < blobData.getSize()) {
streamOut.write(blobData.getStream().read());
sizeRead++;
}
long latencyPerBlob = SystemTime.getInstance().nanoseconds() - startTimeGetBlob;
totalTimeTaken.addAndGet(latencyPerBlob);
latenciesForGetBlobs.add(latencyPerBlob);
totalReads.incrementAndGet();
totalNumberOfGetBlobs++;
totalLatencyForGetBlobs += latencyPerBlob;
if (enableVerboseLogging) {
System.out.println("Time taken to get blob id " + blobId + " in ms " + latencyPerBlob / SystemTime.NsPerMs);
}
if (latencyPerBlob > maxLatencyForGetBlobs) {
maxLatencyForGetBlobs = latencyPerBlob;
}
if (latencyPerBlob < minLatencyForGetBlobs) {
minLatencyForGetBlobs = latencyPerBlob;
}
if (totalLatencyForGetBlobs >= measurementIntervalNs) {
Collections.sort(latenciesForGetBlobs);
int index99 = (int) (latenciesForGetBlobs.size() * 0.99) - 1;
int index95 = (int) (latenciesForGetBlobs.size() * 0.95) - 1;
String message = totalNumberOfGetBlobs + "," + (double) latenciesForGetBlobs.get(index99) / SystemTime.NsPerSec + "," + (double) latenciesForGetBlobs.get(index95) / SystemTime.NsPerSec + "," + ((double) totalLatencyForGetBlobs / SystemTime.NsPerSec / totalNumberOfGetBlobs);
System.out.println(message);
writer.write(message + "\n");
totalLatencyForGetBlobs = 0;
latenciesForGetBlobs.clear();
totalNumberOfGetBlobs = 0;
maxLatencyForGetBlobs = 0;
minLatencyForGetBlobs = Long.MAX_VALUE;
}
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequestProperties = new GetRequest(1, "getperf", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
long startTimeGetBlobProperties = SystemTime.getInstance().nanoseconds();
channel.send(getRequestProperties);
InputStream receivePropertyStream = channel.receive().getInputStream();
GetResponse getResponseProperty = GetResponse.readFrom(new DataInputStream(receivePropertyStream), map);
BlobProperties blobProperties = MessageFormatRecord.deserializeBlobProperties(getResponseProperty.getInputStream());
long endTimeGetBlobProperties = SystemTime.getInstance().nanoseconds() - startTimeGetBlobProperties;
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequestUserMetadata = new GetRequest(1, "getperf", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
long startTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds();
channel.send(getRequestUserMetadata);
InputStream receiveUserMetadataStream = channel.receive().getInputStream();
GetResponse getResponseUserMetadata = GetResponse.readFrom(new DataInputStream(receiveUserMetadataStream), map);
ByteBuffer userMetadata = MessageFormatRecord.deserializeUserMetadata(getResponseUserMetadata.getInputStream());
long endTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds() - startTimeGetBlobUserMetadata;
// delete the blob
DeleteRequest deleteRequest = new DeleteRequest(0, "perf", blobId, System.currentTimeMillis());
channel.send(deleteRequest);
DeleteResponse deleteResponse = DeleteResponse.readFrom(new DataInputStream(channel.receive().getInputStream()));
if (deleteResponse.getError() != ServerErrorCode.No_Error) {
throw new UnexpectedException("error " + deleteResponse.getError());
}
throttler.maybeThrottle(1);
} finally {
if (channel != null) {
connectionPool.checkInConnection(channel);
channel = null;
}
}
}
}
} catch (Exception e) {
e.printStackTrace();
System.out.println("Error in server read performance " + e);
} finally {
if (writer != null) {
try {
writer.close();
} catch (Exception e) {
System.out.println("Error when closing writer");
}
}
if (connectionPool != null) {
connectionPool.shutdown();
}
}
}
use of com.github.ambry.protocol.GetResponse in project ambry by linkedin.
the class ReplicaThread method fixMissingStoreKeys.
/**
* Gets all the messages from the remote node for the missing keys and writes them to the local store
* @param connectedChannel The connected channel that represents a connection to the remote replica
* @param replicasToReplicatePerNode The information about the replicas that is being replicated
* @param exchangeMetadataResponseList The missing keys in the local stores whose message needs to be retrieved
* from the remote stores
* @throws IOException
* @throws StoreException
* @throws MessageFormatException
* @throws ReplicationException
*/
void fixMissingStoreKeys(ConnectedChannel connectedChannel, List<RemoteReplicaInfo> replicasToReplicatePerNode, List<ExchangeMetadataResponse> exchangeMetadataResponseList) throws IOException, StoreException, MessageFormatException, ReplicationException {
long fixMissingStoreKeysStartTimeInMs = SystemTime.getInstance().milliseconds();
try {
if (exchangeMetadataResponseList.size() != replicasToReplicatePerNode.size() || replicasToReplicatePerNode.size() == 0) {
throw new IllegalArgumentException("ExchangeMetadataResponseList size " + exchangeMetadataResponseList.size() + " and replicasToReplicatePerNode size " + replicasToReplicatePerNode.size() + " should be the same and greater than zero");
}
DataNodeId remoteNode = replicasToReplicatePerNode.get(0).getReplicaId().getDataNodeId();
GetResponse getResponse = getMessagesForMissingKeys(connectedChannel, exchangeMetadataResponseList, replicasToReplicatePerNode, remoteNode);
writeMessagesToLocalStoreAndAdvanceTokens(exchangeMetadataResponseList, getResponse, replicasToReplicatePerNode, remoteNode);
} finally {
long fixMissingStoreKeysTime = SystemTime.getInstance().milliseconds() - fixMissingStoreKeysStartTimeInMs;
replicationMetrics.updateFixMissingStoreKeysTime(fixMissingStoreKeysTime, replicatingFromRemoteColo, replicatingOverSsl, datacenterName);
}
}
use of com.github.ambry.protocol.GetResponse in project ambry by linkedin.
the class ServerHardDeleteTest method getAndVerify.
/**
* Fetches the Blob(for all MessageFormatFlags) and verifies the content
* @param channel the {@link BlockingChannel} to use to send and receive data
* @param blobsCount the total number of blobs that needs to be verified against
* @throws Exception
*/
void getAndVerify(BlockingChannel channel, int blobsCount) throws Exception {
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<>();
ArrayList<BlobId> ids = new ArrayList<>();
for (int i = 0; i < blobsCount; i++) {
ids.add(blobIdList.get(i));
}
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIdList.get(0).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
ArrayList<MessageFormatFlags> flags = new ArrayList<>();
flags.add(MessageFormatFlags.BlobProperties);
flags.add(MessageFormatFlags.BlobUserMetadata);
flags.add(MessageFormatFlags.Blob);
for (MessageFormatFlags flag : flags) {
GetRequest getRequest = new GetRequest(1, "clientid2", flag, partitionRequestInfoList, GetOption.Include_All);
channel.send(getRequest);
InputStream stream = channel.receive().getInputStream();
GetResponse resp = GetResponse.readFrom(new DataInputStream(stream), mockClusterMap);
if (flag == MessageFormatFlags.BlobProperties) {
for (int i = 0; i < blobsCount; i++) {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
Assert.assertEquals(properties.get(i).getBlobSize(), propertyOutput.getBlobSize());
Assert.assertEquals("serviceid1", propertyOutput.getServiceId());
Assert.assertEquals("AccountId mismatch", properties.get(i).getAccountId(), propertyOutput.getAccountId());
Assert.assertEquals("ContainerId mismatch", properties.get(i).getContainerId(), propertyOutput.getContainerId());
}
} else if (flag == MessageFormatFlags.BlobUserMetadata) {
for (int i = 0; i < blobsCount; i++) {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
Assert.assertArrayEquals(userMetadataOutput.array(), usermetadata.get(i));
}
} else if (flag == MessageFormatFlags.Blob) {
for (int i = 0; i < blobsCount; i++) {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
Assert.assertEquals(properties.get(i).getBlobSize(), blobData.getSize());
byte[] dataOutput = new byte[(int) blobData.getSize()];
blobData.getStream().read(dataOutput);
Assert.assertArrayEquals(dataOutput, data.get(i));
}
} else {
throw new IllegalArgumentException("Unrecognized message format flags " + flags);
}
}
}
use of com.github.ambry.protocol.GetResponse in project ambry by linkedin.
the class MockRouterCallback method testErrorPrecedence.
/**
* Help test error precedence.
* @param serverErrorCodesInOrder the list of error codes to set the mock servers with.
* @param expectedErrorCode the expected router error code for the operation.
* @throws Exception
*/
private void testErrorPrecedence(ServerErrorCode[] serverErrorCodesInOrder, RouterErrorCode expectedErrorCode) throws Exception {
NonBlockingRouter.currentOperationsCount.incrementAndGet();
GetBlobInfoOperation op = new GetBlobInfoOperation(routerConfig, routerMetrics, mockClusterMap, responseHandler, blobId, options, null, routerCallback, kms, cryptoService, cryptoJobHandler, time);
ArrayList<RequestInfo> requestListToFill = new ArrayList<>();
requestRegistrationCallback.requestListToFill = requestListToFill;
int i = 0;
for (MockServer server : mockServerLayout.getMockServers()) {
server.setServerErrorForAllRequests(serverErrorCodesInOrder[i++]);
}
while (!op.isOperationComplete()) {
op.poll(requestRegistrationCallback);
List<ResponseInfo> responses = sendAndWaitForResponses(requestListToFill);
for (ResponseInfo responseInfo : responses) {
GetResponse getResponse = responseInfo.getError() == null ? GetResponse.readFrom(new DataInputStream(new ByteBufferInputStream(responseInfo.getResponse())), mockClusterMap) : null;
op.handleResponse(responseInfo, getResponse);
if (op.isOperationComplete()) {
break;
}
}
}
RouterException routerException = (RouterException) op.getOperationException();
Assert.assertEquals(expectedErrorCode, routerException.getErrorCode());
}
use of com.github.ambry.protocol.GetResponse in project ambry by linkedin.
the class StoredBlob method makeGetResponse.
/**
* Make a {@link GetResponse} for the given {@link GetRequest} for which the given {@link ServerErrorCode} was
* encountered. The request could be for BlobInfo or for Blob (the only two options that the router would request
* for).
* @param getRequest the {@link GetRequest} for which the response is being constructed.
* @param getError the {@link ServerErrorCode} that was encountered.
* @return the constructed {@link GetResponse}
* @throws IOException if there was an error constructing the response.
*/
GetResponse makeGetResponse(GetRequest getRequest, ServerErrorCode getError) throws IOException {
GetResponse getResponse;
if (getError == ServerErrorCode.No_Error) {
List<PartitionRequestInfo> infos = getRequest.getPartitionInfoList();
if (infos.size() != 1 || infos.get(0).getBlobIds().size() != 1) {
getError = ServerErrorCode.Unknown_Error;
}
}
ServerErrorCode serverError;
ServerErrorCode partitionError;
boolean isDataBlob = false;
try {
String id = getRequest.getPartitionInfoList().get(0).getBlobIds().get(0).getID();
isDataBlob = blobs.get(id).type == BlobType.DataBlob;
} catch (Exception ignored) {
}
if (!getErrorOnDataBlobOnly || isDataBlob) {
// set it in the partitionResponseInfo
if (getError == ServerErrorCode.No_Error || getError == ServerErrorCode.Blob_Expired || getError == ServerErrorCode.Blob_Deleted || getError == ServerErrorCode.Blob_Not_Found) {
partitionError = getError;
serverError = ServerErrorCode.No_Error;
} else {
serverError = getError;
// does not matter - this will not be checked if serverError is not No_Error.
partitionError = ServerErrorCode.No_Error;
}
} else {
serverError = ServerErrorCode.No_Error;
partitionError = ServerErrorCode.No_Error;
}
if (serverError == ServerErrorCode.No_Error) {
int byteBufferSize;
ByteBuffer byteBuffer;
StoreKey key = getRequest.getPartitionInfoList().get(0).getBlobIds().get(0);
short accountId = Account.UNKNOWN_ACCOUNT_ID;
short containerId = Container.UNKNOWN_CONTAINER_ID;
long operationTimeMs = Utils.Infinite_Time;
StoredBlob blob = blobs.get(key.getID());
ServerErrorCode processedError = errorForGet(blob, getRequest);
MessageMetadata msgMetadata = null;
if (processedError == ServerErrorCode.No_Error) {
ByteBuffer buf = blobs.get(key.getID()).serializedSentPutRequest.duplicate();
// read off the size
buf.getLong();
// read off the type.
buf.getShort();
PutRequest.ReceivedPutRequest originalBlobPutReq = PutRequest.readFrom(new DataInputStream(new ByteBufferInputStream(buf)), clusterMap);
switch(getRequest.getMessageFormatFlag()) {
case BlobInfo:
BlobProperties blobProperties = originalBlobPutReq.getBlobProperties();
accountId = blobProperties.getAccountId();
containerId = blobProperties.getContainerId();
operationTimeMs = blobProperties.getCreationTimeInMs();
ByteBuffer userMetadata = originalBlobPutReq.getUsermetadata();
byteBufferSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(blobProperties) + MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(userMetadata);
byteBuffer = ByteBuffer.allocate(byteBufferSize);
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
MessageFormatRecord.BlobProperties_Format_V1.serializeBlobPropertiesRecord(byteBuffer, blobProperties);
MessageFormatRecord.UserMetadata_Format_V1.serializeUserMetadataRecord(byteBuffer, userMetadata);
break;
case Blob:
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
byteBufferSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
byteBuffer = ByteBuffer.allocate(byteBufferSize);
MessageFormatRecord.Blob_Format_V2.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize(), originalBlobPutReq.getBlobType());
break;
case MessageFormatRecord.Blob_Version_V1:
byteBufferSize = (int) MessageFormatRecord.Blob_Format_V1.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
byteBuffer = ByteBuffer.allocate(byteBufferSize);
MessageFormatRecord.Blob_Format_V1.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBuffer.put(Utils.readBytesFromStream(originalBlobPutReq.getBlobStream(), (int) originalBlobPutReq.getBlobSize()));
Crc32 crc = new Crc32();
crc.update(byteBuffer.array(), 0, byteBuffer.position());
byteBuffer.putLong(crc.getValue());
break;
case All:
blobProperties = originalBlobPutReq.getBlobProperties();
accountId = blobProperties.getAccountId();
containerId = blobProperties.getContainerId();
userMetadata = originalBlobPutReq.getUsermetadata();
operationTimeMs = originalBlobPutReq.getBlobProperties().getCreationTimeInMs();
int blobHeaderSize = MessageFormatRecord.MessageHeader_Format_V2.getHeaderSize();
int blobEncryptionRecordSize = originalBlobPutReq.getBlobEncryptionKey() != null ? MessageFormatRecord.BlobEncryptionKey_Format_V1.getBlobEncryptionKeyRecordSize(originalBlobPutReq.getBlobEncryptionKey().duplicate()) : 0;
int blobPropertiesSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(blobProperties);
int userMetadataSize = MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(userMetadata);
int blobInfoSize = blobPropertiesSize + userMetadataSize;
int blobRecordSize;
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
blobRecordSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
break;
case MessageFormatRecord.Blob_Version_V1:
blobRecordSize = (int) MessageFormatRecord.Blob_Format_V1.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBufferSize = blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobInfoSize + blobRecordSize;
byteBuffer = ByteBuffer.allocate(byteBufferSize);
try {
MessageFormatRecord.MessageHeader_Format_V2.serializeHeader(byteBuffer, blobEncryptionRecordSize + blobInfoSize + blobRecordSize, originalBlobPutReq.getBlobEncryptionKey() == null ? Message_Header_Invalid_Relative_Offset : blobHeaderSize + key.sizeInBytes(), blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize, Message_Header_Invalid_Relative_Offset, blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobPropertiesSize, blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobInfoSize);
} catch (MessageFormatException e) {
e.printStackTrace();
}
byteBuffer.put(key.toBytes());
if (originalBlobPutReq.getBlobEncryptionKey() != null) {
MessageFormatRecord.BlobEncryptionKey_Format_V1.serializeBlobEncryptionKeyRecord(byteBuffer, originalBlobPutReq.getBlobEncryptionKey().duplicate());
msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
}
MessageFormatRecord.BlobProperties_Format_V1.serializeBlobPropertiesRecord(byteBuffer, blobProperties);
MessageFormatRecord.UserMetadata_Format_V1.serializeUserMetadataRecord(byteBuffer, userMetadata);
int blobRecordStart = byteBuffer.position();
switch(blobFormatVersion) {
case MessageFormatRecord.Blob_Version_V2:
MessageFormatRecord.Blob_Format_V2.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize(), originalBlobPutReq.getBlobType());
break;
case MessageFormatRecord.Blob_Version_V1:
MessageFormatRecord.Blob_Format_V1.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize());
break;
default:
throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
}
byteBuffer.put(Utils.readBytesFromStream(originalBlobPutReq.getBlobStream(), (int) originalBlobPutReq.getBlobSize()));
crc = new Crc32();
crc.update(byteBuffer.array(), blobRecordStart, blobRecordSize - MessageFormatRecord.Crc_Size);
byteBuffer.putLong(crc.getValue());
break;
default:
throw new IOException("GetRequest flag is not supported: " + getRequest.getMessageFormatFlag());
}
} else if (processedError == ServerErrorCode.Blob_Deleted) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Deleted;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else if (processedError == ServerErrorCode.Blob_Expired) {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Expired;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
} else {
if (partitionError == ServerErrorCode.No_Error) {
partitionError = ServerErrorCode.Blob_Not_Found;
}
byteBuffer = ByteBuffer.allocate(0);
byteBufferSize = 0;
}
byteBuffer.flip();
ByteBufferSend responseSend = new ByteBufferSend(byteBuffer);
List<MessageInfo> messageInfoList = new ArrayList<>(1);
List<MessageMetadata> messageMetadataList = new ArrayList<>(1);
List<PartitionResponseInfo> partitionResponseInfoList = new ArrayList<PartitionResponseInfo>();
messageInfoList.add(new MessageInfo(key, byteBufferSize, accountId, containerId, operationTimeMs));
messageMetadataList.add(msgMetadata);
PartitionResponseInfo partitionResponseInfo = partitionError == ServerErrorCode.No_Error ? new PartitionResponseInfo(getRequest.getPartitionInfoList().get(0).getPartition(), messageInfoList, messageMetadataList) : new PartitionResponseInfo(getRequest.getPartitionInfoList().get(0).getPartition(), partitionError);
partitionResponseInfoList.add(partitionResponseInfo);
getResponse = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), partitionResponseInfoList, responseSend, serverError);
} else {
getResponse = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), new ArrayList<PartitionResponseInfo>(), new ByteBufferSend(ByteBuffer.allocate(0)), serverError);
}
return getResponse;
}
Aggregations