use of com.github.ambry.utils.ByteBufferOutputStream in project ambry by linkedin.
the class HardDeleteRecoveryMetadata method getBlobPropertiesRecord.
private BlobProperties getBlobPropertiesRecord(MessageReadSet readSet, int readSetIndex, long relativeOffset, long blobPropertiesSize) throws MessageFormatException, IOException {
/* Read the field from the channel */
ByteBuffer blobProperties = ByteBuffer.allocate((int) blobPropertiesSize);
readSet.writeTo(readSetIndex, Channels.newChannel(new ByteBufferOutputStream(blobProperties)), relativeOffset, blobPropertiesSize);
blobProperties.flip();
return deserializeBlobProperties(new ByteBufferInputStream(blobProperties));
}
use of com.github.ambry.utils.ByteBufferOutputStream in project ambry by linkedin.
the class BlobStoreTest method checkStoreInfo.
/**
* Verifies the provided {@code storeInfo} for correctness of the {@link MessageInfo}. Also reads the blob as
* described by the {@link MessageReadSet} inside {@code storeInfo} to verify that it matches the reference.
* @param storeInfo the {@link StoreInfo} to verify.
* @param expectedKeys all the {@link MockId}s that are expected to be found in {@code storeInfo}.
* @throws IOException
*/
private void checkStoreInfo(StoreInfo storeInfo, Set<MockId> expectedKeys) throws IOException {
List<MessageInfo> messageInfos = storeInfo.getMessageReadSetInfo();
MessageReadSet readSet = storeInfo.getMessageReadSet();
assertEquals("ReadSet contains an unexpected number of messages", expectedKeys.size(), readSet.count());
Set<MockId> examinedKeys = new HashSet<>();
for (int i = 0; i < messageInfos.size(); i++) {
MessageInfo messageInfo = messageInfos.get(i);
MockId id = (MockId) messageInfo.getStoreKey();
MessageInfo expectedInfo = allKeys.get(id).getFirst();
assertEquals("Unexpected size in MessageInfo", expectedInfo.getSize(), messageInfo.getSize());
assertEquals("AccountId mismatch", expectedInfo.getAccountId(), messageInfo.getAccountId());
assertEquals("ContainerId mismatch", expectedInfo.getContainerId(), messageInfo.getContainerId());
assertEquals("OperationTime mismatch", expectedInfo.getOperationTimeMs(), messageInfo.getOperationTimeMs());
assertEquals("Unexpected expiresAtMs in MessageInfo", (expectedInfo.getExpirationTimeInMs() != Utils.Infinite_Time ? (expectedInfo.getExpirationTimeInMs() / Time.MsPerSec) * Time.MsPerSec : Utils.Infinite_Time), messageInfo.getExpirationTimeInMs());
assertEquals("Unexpected key in readSet", id, readSet.getKeyAt(i));
assertEquals("Unexpected size in ReadSet", expectedInfo.getSize(), readSet.sizeInBytes(i));
ByteBuffer readBuf = ByteBuffer.allocate((int) expectedInfo.getSize());
ByteBufferOutputStream stream = new ByteBufferOutputStream(readBuf);
WritableByteChannel channel = Channels.newChannel(stream);
readSet.writeTo(i, channel, 0, expectedInfo.getSize());
ByteBuffer expectedData = allKeys.get(id).getSecond();
assertArrayEquals("Data obtained from reset does not match original", expectedData.array(), readBuf.array());
examinedKeys.add(id);
}
assertEquals("Expected and examined keys do not match", expectedKeys, examinedKeys);
}
use of com.github.ambry.utils.ByteBufferOutputStream in project ambry by linkedin.
the class ServerReadPerformance method main.
public static void main(String[] args) {
ConnectionPool connectionPool = null;
FileWriter writer = null;
try {
OptionParser parser = new OptionParser();
ArgumentAcceptingOptionSpec<String> logToReadOpt = parser.accepts("logToRead", "The log that needs to be replayed for traffic").withRequiredArg().describedAs("log_to_read").ofType(String.class);
ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<Integer> readsPerSecondOpt = parser.accepts("readsPerSecond", "The rate at which reads need to be performed").withRequiredArg().describedAs("The number of reads per second").ofType(Integer.class).defaultsTo(1000);
ArgumentAcceptingOptionSpec<Long> measurementIntervalOpt = parser.accepts("measurementInterval", "The interval in second to report performance result").withOptionalArg().describedAs("The CPU time spent for getting blobs, not wall time").ofType(Long.class).defaultsTo(300L);
ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
ArgumentAcceptingOptionSpec<String> sslEnabledDatacentersOpt = parser.accepts("sslEnabledDatacenters", "Datacenters to which ssl should be enabled").withOptionalArg().describedAs("Comma separated list").ofType(String.class).defaultsTo("");
ArgumentAcceptingOptionSpec<String> sslKeystorePathOpt = parser.accepts("sslKeystorePath", "SSL key store path").withOptionalArg().describedAs("The file path of SSL key store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslKeystoreTypeOpt = parser.accepts("sslKeystoreType", "SSL key store type").withOptionalArg().describedAs("The type of SSL key store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslTruststorePathOpt = parser.accepts("sslTruststorePath", "SSL trust store path").withOptionalArg().describedAs("The file path of SSL trust store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslKeystorePasswordOpt = parser.accepts("sslKeystorePassword", "SSL key store password").withOptionalArg().describedAs("The password of SSL key store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslKeyPasswordOpt = parser.accepts("sslKeyPassword", "SSL key password").withOptionalArg().describedAs("The password of SSL private key").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslTruststorePasswordOpt = parser.accepts("sslTruststorePassword", "SSL trust store password").withOptionalArg().describedAs("The password of SSL trust store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslCipherSuitesOpt = parser.accepts("sslCipherSuites", "SSL enabled cipher suites").withOptionalArg().describedAs("Comma separated list").defaultsTo("TLS_RSA_WITH_AES_128_CBC_SHA").ofType(String.class);
OptionSet options = parser.parse(args);
ArrayList<OptionSpec> listOpt = new ArrayList<>();
listOpt.add(logToReadOpt);
listOpt.add(hardwareLayoutOpt);
listOpt.add(partitionLayoutOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
long measurementIntervalNs = options.valueOf(measurementIntervalOpt) * SystemTime.NsPerSec;
ToolUtils.validateSSLOptions(options, parser, sslEnabledDatacentersOpt, sslKeystorePathOpt, sslKeystoreTypeOpt, sslTruststorePathOpt, sslKeystorePasswordOpt, sslKeyPasswordOpt, sslTruststorePasswordOpt);
String sslEnabledDatacenters = options.valueOf(sslEnabledDatacentersOpt);
Properties sslProperties;
if (sslEnabledDatacenters.length() != 0) {
sslProperties = ToolUtils.createSSLProperties(sslEnabledDatacenters, options.valueOf(sslKeystorePathOpt), options.valueOf(sslKeystoreTypeOpt), options.valueOf(sslKeystorePasswordOpt), options.valueOf(sslKeyPasswordOpt), options.valueOf(sslTruststorePathOpt), options.valueOf(sslTruststorePasswordOpt), options.valueOf(sslCipherSuitesOpt));
} else {
sslProperties = new Properties();
}
ToolUtils.addClusterMapProperties(sslProperties);
String logToRead = options.valueOf(logToReadOpt);
int readsPerSecond = options.valueOf(readsPerSecondOpt);
boolean enableVerboseLogging = options.has(verboseLoggingOpt);
if (enableVerboseLogging) {
System.out.println("Enabled verbose logging");
}
File logFile = new File(System.getProperty("user.dir"), "readperfresult");
writer = new FileWriter(logFile);
String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(sslProperties));
ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
final AtomicLong totalTimeTaken = new AtomicLong(0);
final AtomicLong totalReads = new AtomicLong(0);
final AtomicBoolean shutdown = new AtomicBoolean(false);
// attach shutdown handler to catch control-c
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
try {
System.out.println("Shutdown invoked");
shutdown.set(true);
String message = "Total reads : " + totalReads.get() + " Total time taken : " + totalTimeTaken.get() + " Nano Seconds Average time taken per read " + ((double) totalTimeTaken.get()) / SystemTime.NsPerSec / totalReads.get() + " Seconds";
System.out.println(message);
} catch (Exception e) {
System.out.println("Error while shutting down " + e);
}
}
});
final BufferedReader br = new BufferedReader(new FileReader(logToRead));
Throttler throttler = new Throttler(readsPerSecond, 100, true, SystemTime.getInstance());
String line;
ConnectedChannel channel = null;
ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(new VerifiableProperties(new Properties()));
VerifiableProperties vProps = new VerifiableProperties(sslProperties);
SSLConfig sslConfig = new SSLConfig(vProps);
clusterMapConfig = new ClusterMapConfig(vProps);
connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, new MetricRegistry());
long totalNumberOfGetBlobs = 0;
long totalLatencyForGetBlobs = 0;
ArrayList<Long> latenciesForGetBlobs = new ArrayList<Long>();
long maxLatencyForGetBlobs = 0;
long minLatencyForGetBlobs = Long.MAX_VALUE;
while ((line = br.readLine()) != null) {
String[] id = line.split("-");
BlobData blobData = null;
BlobId blobId = new BlobId(id[1], map);
ArrayList<BlobId> blobIds = new ArrayList<BlobId>();
blobIds.add(blobId);
for (ReplicaId replicaId : blobId.getPartition().getReplicaIds()) {
long startTimeGetBlob = 0;
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
try {
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "getperf", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
Port port = replicaId.getDataNodeId().getPortToConnectTo();
channel = connectionPool.checkOutConnection(replicaId.getDataNodeId().getHostname(), port, 10000);
startTimeGetBlob = SystemTime.getInstance().nanoseconds();
channel.send(getRequest);
DataInputStream receiveStream = channel.receive().getInputStream();
GetResponse getResponse = GetResponse.readFrom(receiveStream, map);
blobData = MessageFormatRecord.deserializeBlob(getResponse.getInputStream());
long sizeRead = 0;
byte[] outputBuffer = new byte[(int) blobData.getSize()];
ByteBufferOutputStream streamOut = new ByteBufferOutputStream(ByteBuffer.wrap(outputBuffer));
ByteBuf buffer = blobData.content();
try {
buffer.readBytes(streamOut, (int) blobData.getSize());
} finally {
buffer.release();
}
long latencyPerBlob = SystemTime.getInstance().nanoseconds() - startTimeGetBlob;
totalTimeTaken.addAndGet(latencyPerBlob);
latenciesForGetBlobs.add(latencyPerBlob);
totalReads.incrementAndGet();
totalNumberOfGetBlobs++;
totalLatencyForGetBlobs += latencyPerBlob;
if (enableVerboseLogging) {
System.out.println("Time taken to get blob id " + blobId + " in ms " + latencyPerBlob / SystemTime.NsPerMs);
}
if (latencyPerBlob > maxLatencyForGetBlobs) {
maxLatencyForGetBlobs = latencyPerBlob;
}
if (latencyPerBlob < minLatencyForGetBlobs) {
minLatencyForGetBlobs = latencyPerBlob;
}
if (totalLatencyForGetBlobs >= measurementIntervalNs) {
Collections.sort(latenciesForGetBlobs);
int index99 = (int) (latenciesForGetBlobs.size() * 0.99) - 1;
int index95 = (int) (latenciesForGetBlobs.size() * 0.95) - 1;
String message = totalNumberOfGetBlobs + "," + (double) latenciesForGetBlobs.get(index99) / SystemTime.NsPerSec + "," + (double) latenciesForGetBlobs.get(index95) / SystemTime.NsPerSec + "," + ((double) totalLatencyForGetBlobs / SystemTime.NsPerSec / totalNumberOfGetBlobs);
System.out.println(message);
writer.write(message + "\n");
totalLatencyForGetBlobs = 0;
latenciesForGetBlobs.clear();
totalNumberOfGetBlobs = 0;
maxLatencyForGetBlobs = 0;
minLatencyForGetBlobs = Long.MAX_VALUE;
}
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequestProperties = new GetRequest(1, "getperf", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
long startTimeGetBlobProperties = SystemTime.getInstance().nanoseconds();
channel.send(getRequestProperties);
DataInputStream receivePropertyStream = channel.receive().getInputStream();
GetResponse getResponseProperty = GetResponse.readFrom(receivePropertyStream, map);
BlobProperties blobProperties = MessageFormatRecord.deserializeBlobProperties(getResponseProperty.getInputStream());
long endTimeGetBlobProperties = SystemTime.getInstance().nanoseconds() - startTimeGetBlobProperties;
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequestUserMetadata = new GetRequest(1, "getperf", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
long startTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds();
channel.send(getRequestUserMetadata);
DataInputStream receiveUserMetadataStream = channel.receive().getInputStream();
GetResponse getResponseUserMetadata = GetResponse.readFrom(receiveUserMetadataStream, map);
ByteBuffer userMetadata = MessageFormatRecord.deserializeUserMetadata(getResponseUserMetadata.getInputStream());
long endTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds() - startTimeGetBlobUserMetadata;
// delete the blob
DeleteRequest deleteRequest = new DeleteRequest(0, "perf", blobId, System.currentTimeMillis());
channel.send(deleteRequest);
DeleteResponse deleteResponse = DeleteResponse.readFrom(channel.receive().getInputStream());
if (deleteResponse.getError() != ServerErrorCode.No_Error) {
throw new UnexpectedException("error " + deleteResponse.getError());
}
throttler.maybeThrottle(1);
} finally {
if (channel != null) {
connectionPool.checkInConnection(channel);
channel = null;
}
}
}
}
} catch (Exception e) {
e.printStackTrace();
System.out.println("Error in server read performance " + e);
} finally {
if (writer != null) {
try {
writer.close();
} catch (Exception e) {
System.out.println("Error when closing writer");
}
}
if (connectionPool != null) {
connectionPool.shutdown();
}
}
}
use of com.github.ambry.utils.ByteBufferOutputStream in project ambry by linkedin.
the class CloudBlobStoreTest method testGetForExistingBlobs.
/**
* Test cloud store get with a list of blobs that are all valid and previously uploaded in the store
* @param blobIds list of blob ids to get
* @param blobIdToUploadedDataMap map of expected blobid to data buffers
* @throws Exception
*/
private void testGetForExistingBlobs(List<BlobId> blobIds, Map<BlobId, ByteBuffer> blobIdToUploadedDataMap) throws Exception {
StoreInfo storeInfo = store.get(blobIds, EnumSet.noneOf(StoreGetOptions.class));
assertEquals("Number of records returned by get should be same as uploaded", storeInfo.getMessageReadSetInfo().size(), blobIds.size());
for (int i = 0; i < storeInfo.getMessageReadSetInfo().size(); i++) {
MessageInfo messageInfo = storeInfo.getMessageReadSetInfo().get(i);
if (blobIdToUploadedDataMap.containsKey(messageInfo.getStoreKey())) {
ByteBuffer uploadedData = blobIdToUploadedDataMap.get(messageInfo.getStoreKey());
ByteBuffer downloadedData = ByteBuffer.allocate((int) messageInfo.getSize());
WritableByteChannel writableByteChannel = Channels.newChannel(new ByteBufferOutputStream(downloadedData));
storeInfo.getMessageReadSet().writeTo(i, writableByteChannel, 0, messageInfo.getSize());
downloadedData.flip();
assertEquals(uploadedData, downloadedData);
break;
}
}
}
use of com.github.ambry.utils.ByteBufferOutputStream in project ambry by linkedin.
the class HardDeleteRecoveryMetadata method getHardDeleteInfo.
/**
* For the message at readSetIndex, does the following:
* 1. Reads the whole blob and does a crc check. If the crc check fails, returns null - this means that the record
* is not retrievable anyway.
* 2. Adds to a hard delete replacement write set.
* 3. Returns the hard delete info.
*/
private HardDeleteInfo getHardDeleteInfo(int readSetIndex) {
HardDeleteInfo hardDeleteInfo = null;
try {
/* Read the version field in the header */
ByteBuffer headerVersionBuf = ByteBuffer.allocate(Version_Field_Size_In_Bytes);
readSet.writeTo(readSetIndex, Channels.newChannel(new ByteBufferOutputStream(headerVersionBuf)), 0, Version_Field_Size_In_Bytes);
headerVersionBuf.flip();
short headerVersion = headerVersionBuf.getShort();
if (!isValidHeaderVersion(headerVersion)) {
throw new MessageFormatException("Unknown header version during hard delete " + headerVersion + " storeKey " + readSet.getKeyAt(readSetIndex), MessageFormatErrorCodes.Unknown_Format_Version);
}
ByteBuffer header = ByteBuffer.allocate(getHeaderSizeForVersion(headerVersion));
/* Read the rest of the header */
header.putShort(headerVersion);
readSet.writeTo(readSetIndex, Channels.newChannel(new ByteBufferOutputStream(header)), Version_Field_Size_In_Bytes, header.capacity() - Version_Field_Size_In_Bytes);
header.flip();
MessageHeader_Format headerFormat = getMessageHeader(headerVersion, header);
headerFormat.verifyHeader();
StoreKey storeKey = storeKeyFactory.getStoreKey(new DataInputStream(new MessageReadSetIndexInputStream(readSet, readSetIndex, header.capacity())));
if (storeKey.compareTo(readSet.getKeyAt(readSetIndex)) != 0) {
throw new MessageFormatException("Id mismatch between metadata and store - metadataId " + readSet.getKeyAt(readSetIndex) + " storeId " + storeKey, MessageFormatErrorCodes.Store_Key_Id_MisMatch);
}
if (!headerFormat.isPutRecord()) {
throw new MessageFormatException("Cleanup operation for a non-PUT record is unsupported", MessageFormatErrorCodes.IO_Error);
} else {
HardDeleteRecoveryMetadata hardDeleteRecoveryMetadata = recoveryInfoMap.get(storeKey);
int userMetadataRelativeOffset = headerFormat.getUserMetadataRecordRelativeOffset();
short userMetadataVersion;
int userMetadataSize;
short blobRecordVersion;
BlobType blobType;
long blobStreamSize;
DeserializedUserMetadata userMetadataInfo;
DeserializedBlob blobRecordInfo;
if (hardDeleteRecoveryMetadata == null) {
userMetadataInfo = getUserMetadataInfo(readSet, readSetIndex, headerFormat.getUserMetadataRecordRelativeOffset(), headerFormat.getUserMetadataRecordSize());
userMetadataSize = userMetadataInfo.getUserMetadata().capacity();
userMetadataVersion = userMetadataInfo.getVersion();
blobRecordInfo = getBlobRecordInfo(readSet, readSetIndex, headerFormat.getBlobRecordRelativeOffset(), headerFormat.getBlobRecordSize());
blobStreamSize = blobRecordInfo.getBlobData().getSize();
blobRecordVersion = blobRecordInfo.getVersion();
blobType = blobRecordInfo.getBlobData().getBlobType();
hardDeleteRecoveryMetadata = new HardDeleteRecoveryMetadata(headerVersion, userMetadataVersion, userMetadataSize, blobRecordVersion, blobType, blobStreamSize, storeKey);
} else {
logger.trace("Skipping crc check for user metadata and blob stream fields for key {}", storeKey);
userMetadataVersion = hardDeleteRecoveryMetadata.getUserMetadataVersion();
blobRecordVersion = hardDeleteRecoveryMetadata.getBlobRecordVersion();
blobType = hardDeleteRecoveryMetadata.getBlobType();
userMetadataSize = hardDeleteRecoveryMetadata.getUserMetadataSize();
blobStreamSize = hardDeleteRecoveryMetadata.getBlobStreamSize();
}
HardDeleteMessageFormatInputStream hardDeleteStream = new HardDeleteMessageFormatInputStream(userMetadataRelativeOffset, userMetadataVersion, userMetadataSize, blobRecordVersion, blobType, blobStreamSize);
hardDeleteInfo = new HardDeleteInfo(Channels.newChannel(hardDeleteStream), hardDeleteStream.getSize(), hardDeleteStream.getHardDeleteStreamRelativeOffset(), hardDeleteRecoveryMetadata.toBytes());
}
} catch (Exception e) {
logger.error("Exception when reading blob: ", e);
}
return hardDeleteInfo;
}
Aggregations