use of com.github.ambry.network.ConnectedChannel in project ambry by linkedin.
the class Verifier method run.
@Override
public void run() {
try {
List<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
while (requestsVerified.get() != totalRequests.get() && !cancelTest.get()) {
Payload payload = payloadQueue.poll(1000, TimeUnit.MILLISECONDS);
if (payload != null) {
notificationSystem.awaitBlobCreations(payload.blobId);
for (MockDataNodeId dataNodeId : clusterMap.getDataNodes()) {
ConnectedChannel channel1 = null;
try {
BlobId blobId = new BlobId(payload.blobId, clusterMap);
Port port = new Port(portType == PortType.PLAINTEXT ? dataNodeId.getPort() : dataNodeId.getSSLPort(), portType);
channel1 = connectionPool.checkOutConnection("localhost", port, 10000);
ArrayList<BlobId> ids = new ArrayList<BlobId>();
ids.add(blobId);
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(ids.get(0).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
channel1.send(getRequest);
DataInputStream stream = channel1.receive().getInputStream();
GetResponse resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getError() != ServerErrorCode.No_Error) {
System.out.println(dataNodeId.getHostname() + " " + dataNodeId.getPort() + " " + resp.getError());
throw new IllegalStateException();
} else {
try {
BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
if (propertyOutput.getBlobSize() != payload.blobProperties.getBlobSize()) {
String exceptionMsg = "blob size not matching " + " expected " + payload.blobProperties.getBlobSize() + " actual " + propertyOutput.getBlobSize();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
if (!propertyOutput.getServiceId().equals(payload.blobProperties.getServiceId())) {
String exceptionMsg = "service id not matching " + " expected " + payload.blobProperties.getServiceId() + " actual " + propertyOutput.getBlobSize();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
if (propertyOutput.getAccountId() != payload.blobProperties.getAccountId()) {
String exceptionMsg = "accountid not matching " + " expected " + payload.blobProperties.getAccountId() + " actual " + propertyOutput.getAccountId();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
if (propertyOutput.getContainerId() != payload.blobProperties.getContainerId()) {
String exceptionMsg = "containerId not matching " + " expected " + payload.blobProperties.getContainerId() + " actual " + propertyOutput.getContainerId();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
if (propertyOutput.isEncrypted() != payload.blobProperties.isEncrypted()) {
String exceptionMsg = "IsEncrypted not matching " + " expected " + payload.blobProperties.isEncrypted() + " actual " + propertyOutput.isEncrypted();
System.out.println(exceptionMsg);
throw new IllegalStateException(exceptionMsg);
}
long actualExpiryTimeMs = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs();
checkExpiryTimeMatch(payload, actualExpiryTimeMs, "messageinfo in blobproperty");
} catch (MessageFormatException e) {
e.printStackTrace();
throw new IllegalStateException(e);
}
}
// get user metadata
ids.clear();
ids.add(blobId);
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(ids.get(0).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
channel1.send(getRequest);
stream = channel1.receive().getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getError() != ServerErrorCode.No_Error) {
System.out.println("Error after get user metadata " + resp.getError());
throw new IllegalStateException();
} else {
try {
ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
if (userMetadataOutput.compareTo(ByteBuffer.wrap(payload.metadata)) != 0) {
throw new IllegalStateException();
}
long actualExpiryTimeMs = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs();
checkExpiryTimeMatch(payload, actualExpiryTimeMs, "messageinfo in usermetadatga");
} catch (MessageFormatException e) {
e.printStackTrace();
throw new IllegalStateException();
}
}
// get blob
ids.clear();
ids.add(blobId);
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(ids.get(0).getPartition(), ids);
partitionRequestInfoList.add(partitionRequestInfo);
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
channel1.send(getRequest);
stream = channel1.receive().getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
// System.out.println("response from get " + resp.getError());
if (resp.getError() != ServerErrorCode.No_Error) {
System.out.println("Error after get blob " + resp.getError());
throw new IllegalStateException();
} else {
try {
BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
byte[] blobout = new byte[(int) blobData.getSize()];
ByteBuf buffer = blobData.content();
try {
buffer.readBytes(blobout);
} finally {
buffer.release();
}
if (ByteBuffer.wrap(blobout).compareTo(ByteBuffer.wrap(payload.blob)) != 0) {
throw new IllegalStateException();
}
long actualExpiryTimeMs = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs();
checkExpiryTimeMatch(payload, actualExpiryTimeMs, "messageinfo in blobdata");
} catch (MessageFormatException e) {
e.printStackTrace();
throw new IllegalStateException();
}
}
// get blob all
getRequest = new GetRequest(1, "clientid2", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
channel1.send(getRequest);
stream = channel1.receive().getInputStream();
resp = GetResponse.readFrom(stream, clusterMap);
if (resp.getError() != ServerErrorCode.No_Error) {
System.out.println("Error after get blob " + resp.getError());
throw new IllegalStateException();
} else {
try {
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), new BlobIdFactory(clusterMap));
byte[] blobout = new byte[(int) blobAll.getBlobData().getSize()];
ByteBuf buffer = blobAll.getBlobData().content();
try {
buffer.readBytes(blobout);
} finally {
buffer.release();
}
if (ByteBuffer.wrap(blobout).compareTo(ByteBuffer.wrap(payload.blob)) != 0) {
throw new IllegalStateException();
}
long actualExpiryTimeMs = resp.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getExpirationTimeInMs();
checkExpiryTimeMatch(payload, actualExpiryTimeMs, "messageinfo in bloball");
} catch (MessageFormatException e) {
e.printStackTrace();
throw new IllegalStateException();
}
}
if (payload.blobProperties.getTimeToLiveInSeconds() != Utils.Infinite_Time) {
// ttl update, check and wait for replication
ServerTestUtil.updateBlobTtl(channel1, new BlobId(payload.blobId, clusterMap), time.milliseconds());
ServerTestUtil.checkTtlUpdateStatus(channel1, clusterMap, new BlobIdFactory(clusterMap), blobId, payload.blob, true, Utils.Infinite_Time);
notificationSystem.awaitBlobUpdates(payload.blobId, UpdateType.TTL_UPDATE);
BlobProperties old = payload.blobProperties;
payload.blobProperties = new BlobProperties(old.getBlobSize(), old.getServiceId(), old.getOwnerId(), old.getContentType(), old.isEncrypted(), Utils.Infinite_Time, old.getCreationTimeInMs(), old.getAccountId(), old.getContainerId(), old.isEncrypted(), old.getExternalAssetTag(), old.getContentEncoding(), old.getFilename());
}
} catch (Exception e) {
if (channel1 != null) {
connectionPool.destroyConnection(channel1);
channel1 = null;
}
} finally {
if (channel1 != null) {
connectionPool.checkInConnection(channel1);
channel1 = null;
}
}
}
requestsVerified.incrementAndGet();
}
}
} catch (Exception e) {
e.printStackTrace();
cancelTest.set(true);
} finally {
completedLatch.countDown();
}
}
use of com.github.ambry.network.ConnectedChannel in project ambry by linkedin.
the class VcrRecoveryTest method testGetOnRecoveryNode.
/**
* Do a get on recovery node to test that all the blobids that were uploaded to vcr node have been recovered on recovery node.
* @param blobIdToSizeMap {@link Map} of blobid to size uploaded to vcr node.
* @throws IOException on {@link IOException}
*/
private void testGetOnRecoveryNode(Map<BlobId, Integer> blobIdToSizeMap) throws IOException {
ConnectedChannel channel = ServerTestUtil.getBlockingChannelBasedOnPortType(recoveryNodePort, "localhost", null, null);
channel.connect();
AtomicInteger correlationIdGenerator = new AtomicInteger(0);
List<PartitionRequestInfo> partitionRequestInfoList = Collections.singletonList(new PartitionRequestInfo(partitionId, blobIds));
GetRequest getRequest = new GetRequest(correlationIdGenerator.incrementAndGet(), GetRequest.Replication_Client_Id_Prefix + recoveryNode.getHostname(), MessageFormatFlags.All, partitionRequestInfoList, new ReplicationConfig(new VerifiableProperties(recoveryProperties)).replicationIncludeAll ? GetOption.Include_All : GetOption.None);
channel.send(getRequest);
GetResponse getResponse = GetResponse.readFrom(channel.receive().getInputStream(), recoveryCluster.getClusterMap());
for (PartitionResponseInfo partitionResponseInfo : getResponse.getPartitionResponseInfoList()) {
assertEquals("Error in getting the recovered blobs", ServerErrorCode.No_Error, partitionResponseInfo.getErrorCode());
// old value is 272. Adding 8 Bytes due to the two fields added 4 + 4 Blob Property BYTE.
for (MessageInfo messageInfo : partitionResponseInfo.getMessageInfoList()) {
assertEquals(blobIdToSizeMap.get(messageInfo.getStoreKey()) + 280, messageInfo.getSize());
}
}
}
use of com.github.ambry.network.ConnectedChannel in project ambry by linkedin.
the class ServerHardDeleteTest method endToEndTestHardDeletes.
/**
* Tests the hard delete functionality.
* <p>
* This test does the following:
* 1. Makes 6 puts, waits for notification.
* 2. Makes 2 deletes, waits for notification.
* 3. Waits for hard deletes to catch up to the expected token value.
* 4. Verifies that the two records that are deleted are zeroed out by hard deletes.
* 5. Makes 3 more puts, waits for notification.
* 6. Makes 3 deletes - 2 of records from the initial set of puts, and 1 from the new set.
* 7. Waits for hard deletes to catch up again to the expected token value.
* 8. Verifies that the three records that are deleted are zeroed out by hard deletes.
*
* @throws Exception
*/
@Test
public void endToEndTestHardDeletes() throws Exception {
DataNodeId dataNodeId = mockClusterMap.getDataNodeIds().get(0);
encryptionKey = new ArrayList<>(9);
usermetadata = new ArrayList<>(9);
data = new ArrayList<>(9);
Random random = new Random();
for (int i = 0; i < 9; i++) {
if (i % 2 == 0) {
encryptionKey.add(new byte[100]);
random.nextBytes(encryptionKey.get(i));
} else {
encryptionKey.add(null);
}
usermetadata.add(new byte[1000 + i]);
data.add(new byte[31870 + i]);
random.nextBytes(usermetadata.get(i));
random.nextBytes(data.get(i));
}
properties = new ArrayList<>(9);
properties.add(new BlobProperties(31870, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
properties.add(new BlobProperties(31871, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false));
properties.add(new BlobProperties(31872, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
properties.add(new BlobProperties(31873, "serviceid1", "ownerid", "jpeg", false, 0, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null));
properties.add(new BlobProperties(31874, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
properties.add(new BlobProperties(31875, "serviceid1", "ownerid", "jpeg", false, 0, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null));
properties.add(new BlobProperties(31876, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
properties.add(new BlobProperties(31877, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false));
properties.add(new BlobProperties(31878, "serviceid1", Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), true));
List<PartitionId> partitionIds = mockClusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
PartitionId chosenPartition = partitionIds.get(0);
blobIdList = new ArrayList<>(9);
for (int i = 0; i < 9; i++) {
blobIdList.add(new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, mockClusterMap.getLocalDatacenterId(), properties.get(i).getAccountId(), properties.get(i).getContainerId(), chosenPartition, false, BlobId.BlobDataType.DATACHUNK));
}
ConnectedChannel channel = ServerTestUtil.getBlockingChannelBasedOnPortType(new Port(dataNodeId.getPort(), PortType.PLAINTEXT), "localhost", null, null);
channel.connect();
for (int i = 0; i < 6; i++) {
// blob 3 and 5 are expired among these
putBlob(blobIdList.get(i), properties.get(i), encryptionKey.get(i), usermetadata.get(i), data.get(i), channel);
}
notificationSystem.awaitBlobCreations(blobIdList.get(0).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(1).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(2).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(4).getID());
// delete blob 1
deleteBlob(blobIdList.get(1), channel);
zeroOutBlobContent(1);
// delete blob 4
deleteBlob(blobIdList.get(4), channel);
zeroOutBlobContent(4);
notificationSystem.awaitBlobDeletions(blobIdList.get(1).getID());
notificationSystem.awaitBlobDeletions(blobIdList.get(4).getID());
time.sleep(TimeUnit.DAYS.toMillis(7));
// For each future change to this offset, add to this variable and write an explanation of why the number changed.
// old value: 198728. Increased by 4 to 198732 because the format for delete record went from 2 to 3 which adds
// 4 bytes (two shorts) extra. The last record is a delete record so its extra 4 bytes are not (yet) added
//
// Add 14 here when changing message header version to 3, since the message header version went from 2 to 3 and adds
// a short to every record, which include 6 puts and 1 delete. (last delete is not included).
// old value is 198732 + 14. Increased by 48 when adding two fields(4 BYTE CRC for each field) in blobProperty when putBlob.
// There are 6 * (4 + 4). 6 stands for the times for putBlob, 4 stands for 4 extra blobProperty Bytes for each field.
int expectedTokenValueT1 = 198732 + 14 + 48;
ensureCleanupTokenCatchesUp(chosenPartition.getReplicaIds().get(0).getReplicaPath(), mockClusterMap, expectedTokenValueT1);
getAndVerify(channel, 6);
// put blob 6
putBlob(blobIdList.get(6), properties.get(6), encryptionKey.get(6), usermetadata.get(6), data.get(6), channel);
// put blob 7
putBlob(blobIdList.get(7), properties.get(7), encryptionKey.get(7), usermetadata.get(7), data.get(7), channel);
// put blob 8
putBlob(blobIdList.get(8), properties.get(8), encryptionKey.get(8), usermetadata.get(8), data.get(8), channel);
notificationSystem.awaitBlobCreations(blobIdList.get(6).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(7).getID());
notificationSystem.awaitBlobCreations(blobIdList.get(8).getID());
// Do more deletes
// delete blob 3 that is expired.
deleteBlob(blobIdList.get(3), channel);
zeroOutBlobContent(3);
// delete blob 0, will undelete it later, so don't zero out the content
deleteBlob(blobIdList.get(0), channel);
// delete blob 6.
deleteBlob(blobIdList.get(6), channel);
zeroOutBlobContent(6);
notificationSystem.awaitBlobDeletions(blobIdList.get(0).getID());
notificationSystem.awaitBlobDeletions(blobIdList.get(6).getID());
undeleteBlob(blobIdList.get(0), channel);
notificationSystem.awaitBlobUndeletes(blobIdList.get(0).getID());
time.sleep(TimeUnit.DAYS.toMillis(1));
// For each future change to this offset, add to this variable and write an explanation of why the number changed.
int expectedTokenValueT2 = 298416 + 98 + 28 + 72;
// old value: 298400. Increased by 16 (4 * 4) to 298416 because the format for delete record went from 2 to 3 which
// adds 4 bytes (two shorts) extra. The last record is a delete record so its extra 4 bytes are not added
//
// old value 298416. Increased by 98. The end offset is now a journal-based offset, so the offset is not inclusive.
// It points to the last record in the journal. Before adding an undelete record, the last record in journal is the
// delete record for blob 6, now it's undelete for blob 0. Since a delete record is 98 bytes, so increase 98 bytes.
//
// old value is 298416 + 98. Increased by 28 when changing the message header version from 2 to 3, which adds a short
// to all the records, which includes 9 puts and 5 deletes and 1 undelete. Undelete is not include since it's the last
// record.
// old value is 298416 + 98 + 28. Increased by 72 when adding two fields(4 BYTE CRC for each field) in blobProperty when putBlob.
// There are 9 * (4 + 4). 9 stands for the times for putBlob, 4 stands for 4 extra blobProperty Bytes.
ensureCleanupTokenCatchesUp(chosenPartition.getReplicaIds().get(0).getReplicaPath(), mockClusterMap, expectedTokenValueT2);
getAndVerify(channel, 9);
}
use of com.github.ambry.network.ConnectedChannel in project ambry by linkedin.
the class ServerReadPerformance method main.
public static void main(String[] args) {
ConnectionPool connectionPool = null;
FileWriter writer = null;
try {
OptionParser parser = new OptionParser();
ArgumentAcceptingOptionSpec<String> logToReadOpt = parser.accepts("logToRead", "The log that needs to be replayed for traffic").withRequiredArg().describedAs("log_to_read").ofType(String.class);
ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<Integer> readsPerSecondOpt = parser.accepts("readsPerSecond", "The rate at which reads need to be performed").withRequiredArg().describedAs("The number of reads per second").ofType(Integer.class).defaultsTo(1000);
ArgumentAcceptingOptionSpec<Long> measurementIntervalOpt = parser.accepts("measurementInterval", "The interval in second to report performance result").withOptionalArg().describedAs("The CPU time spent for getting blobs, not wall time").ofType(Long.class).defaultsTo(300L);
ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
ArgumentAcceptingOptionSpec<String> sslEnabledDatacentersOpt = parser.accepts("sslEnabledDatacenters", "Datacenters to which ssl should be enabled").withOptionalArg().describedAs("Comma separated list").ofType(String.class).defaultsTo("");
ArgumentAcceptingOptionSpec<String> sslKeystorePathOpt = parser.accepts("sslKeystorePath", "SSL key store path").withOptionalArg().describedAs("The file path of SSL key store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslKeystoreTypeOpt = parser.accepts("sslKeystoreType", "SSL key store type").withOptionalArg().describedAs("The type of SSL key store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslTruststorePathOpt = parser.accepts("sslTruststorePath", "SSL trust store path").withOptionalArg().describedAs("The file path of SSL trust store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslKeystorePasswordOpt = parser.accepts("sslKeystorePassword", "SSL key store password").withOptionalArg().describedAs("The password of SSL key store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslKeyPasswordOpt = parser.accepts("sslKeyPassword", "SSL key password").withOptionalArg().describedAs("The password of SSL private key").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslTruststorePasswordOpt = parser.accepts("sslTruststorePassword", "SSL trust store password").withOptionalArg().describedAs("The password of SSL trust store").defaultsTo("").ofType(String.class);
ArgumentAcceptingOptionSpec<String> sslCipherSuitesOpt = parser.accepts("sslCipherSuites", "SSL enabled cipher suites").withOptionalArg().describedAs("Comma separated list").defaultsTo("TLS_RSA_WITH_AES_128_CBC_SHA").ofType(String.class);
OptionSet options = parser.parse(args);
ArrayList<OptionSpec> listOpt = new ArrayList<>();
listOpt.add(logToReadOpt);
listOpt.add(hardwareLayoutOpt);
listOpt.add(partitionLayoutOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
long measurementIntervalNs = options.valueOf(measurementIntervalOpt) * SystemTime.NsPerSec;
ToolUtils.validateSSLOptions(options, parser, sslEnabledDatacentersOpt, sslKeystorePathOpt, sslKeystoreTypeOpt, sslTruststorePathOpt, sslKeystorePasswordOpt, sslKeyPasswordOpt, sslTruststorePasswordOpt);
String sslEnabledDatacenters = options.valueOf(sslEnabledDatacentersOpt);
Properties sslProperties;
if (sslEnabledDatacenters.length() != 0) {
sslProperties = ToolUtils.createSSLProperties(sslEnabledDatacenters, options.valueOf(sslKeystorePathOpt), options.valueOf(sslKeystoreTypeOpt), options.valueOf(sslKeystorePasswordOpt), options.valueOf(sslKeyPasswordOpt), options.valueOf(sslTruststorePathOpt), options.valueOf(sslTruststorePasswordOpt), options.valueOf(sslCipherSuitesOpt));
} else {
sslProperties = new Properties();
}
ToolUtils.addClusterMapProperties(sslProperties);
String logToRead = options.valueOf(logToReadOpt);
int readsPerSecond = options.valueOf(readsPerSecondOpt);
boolean enableVerboseLogging = options.has(verboseLoggingOpt);
if (enableVerboseLogging) {
System.out.println("Enabled verbose logging");
}
File logFile = new File(System.getProperty("user.dir"), "readperfresult");
writer = new FileWriter(logFile);
String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(sslProperties));
ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
final AtomicLong totalTimeTaken = new AtomicLong(0);
final AtomicLong totalReads = new AtomicLong(0);
final AtomicBoolean shutdown = new AtomicBoolean(false);
// attach shutdown handler to catch control-c
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
try {
System.out.println("Shutdown invoked");
shutdown.set(true);
String message = "Total reads : " + totalReads.get() + " Total time taken : " + totalTimeTaken.get() + " Nano Seconds Average time taken per read " + ((double) totalTimeTaken.get()) / SystemTime.NsPerSec / totalReads.get() + " Seconds";
System.out.println(message);
} catch (Exception e) {
System.out.println("Error while shutting down " + e);
}
}
});
final BufferedReader br = new BufferedReader(new FileReader(logToRead));
Throttler throttler = new Throttler(readsPerSecond, 100, true, SystemTime.getInstance());
String line;
ConnectedChannel channel = null;
ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(new VerifiableProperties(new Properties()));
VerifiableProperties vProps = new VerifiableProperties(sslProperties);
SSLConfig sslConfig = new SSLConfig(vProps);
clusterMapConfig = new ClusterMapConfig(vProps);
connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, new MetricRegistry());
long totalNumberOfGetBlobs = 0;
long totalLatencyForGetBlobs = 0;
ArrayList<Long> latenciesForGetBlobs = new ArrayList<Long>();
long maxLatencyForGetBlobs = 0;
long minLatencyForGetBlobs = Long.MAX_VALUE;
while ((line = br.readLine()) != null) {
String[] id = line.split("-");
BlobData blobData = null;
BlobId blobId = new BlobId(id[1], map);
ArrayList<BlobId> blobIds = new ArrayList<BlobId>();
blobIds.add(blobId);
for (ReplicaId replicaId : blobId.getPartition().getReplicaIds()) {
long startTimeGetBlob = 0;
ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
try {
partitionRequestInfoList.clear();
PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequest = new GetRequest(1, "getperf", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
Port port = replicaId.getDataNodeId().getPortToConnectTo();
channel = connectionPool.checkOutConnection(replicaId.getDataNodeId().getHostname(), port, 10000);
startTimeGetBlob = SystemTime.getInstance().nanoseconds();
channel.send(getRequest);
DataInputStream receiveStream = channel.receive().getInputStream();
GetResponse getResponse = GetResponse.readFrom(receiveStream, map);
blobData = MessageFormatRecord.deserializeBlob(getResponse.getInputStream());
long sizeRead = 0;
byte[] outputBuffer = new byte[(int) blobData.getSize()];
ByteBufferOutputStream streamOut = new ByteBufferOutputStream(ByteBuffer.wrap(outputBuffer));
ByteBuf buffer = blobData.content();
try {
buffer.readBytes(streamOut, (int) blobData.getSize());
} finally {
buffer.release();
}
long latencyPerBlob = SystemTime.getInstance().nanoseconds() - startTimeGetBlob;
totalTimeTaken.addAndGet(latencyPerBlob);
latenciesForGetBlobs.add(latencyPerBlob);
totalReads.incrementAndGet();
totalNumberOfGetBlobs++;
totalLatencyForGetBlobs += latencyPerBlob;
if (enableVerboseLogging) {
System.out.println("Time taken to get blob id " + blobId + " in ms " + latencyPerBlob / SystemTime.NsPerMs);
}
if (latencyPerBlob > maxLatencyForGetBlobs) {
maxLatencyForGetBlobs = latencyPerBlob;
}
if (latencyPerBlob < minLatencyForGetBlobs) {
minLatencyForGetBlobs = latencyPerBlob;
}
if (totalLatencyForGetBlobs >= measurementIntervalNs) {
Collections.sort(latenciesForGetBlobs);
int index99 = (int) (latenciesForGetBlobs.size() * 0.99) - 1;
int index95 = (int) (latenciesForGetBlobs.size() * 0.95) - 1;
String message = totalNumberOfGetBlobs + "," + (double) latenciesForGetBlobs.get(index99) / SystemTime.NsPerSec + "," + (double) latenciesForGetBlobs.get(index95) / SystemTime.NsPerSec + "," + ((double) totalLatencyForGetBlobs / SystemTime.NsPerSec / totalNumberOfGetBlobs);
System.out.println(message);
writer.write(message + "\n");
totalLatencyForGetBlobs = 0;
latenciesForGetBlobs.clear();
totalNumberOfGetBlobs = 0;
maxLatencyForGetBlobs = 0;
minLatencyForGetBlobs = Long.MAX_VALUE;
}
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequestProperties = new GetRequest(1, "getperf", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.None);
long startTimeGetBlobProperties = SystemTime.getInstance().nanoseconds();
channel.send(getRequestProperties);
DataInputStream receivePropertyStream = channel.receive().getInputStream();
GetResponse getResponseProperty = GetResponse.readFrom(receivePropertyStream, map);
BlobProperties blobProperties = MessageFormatRecord.deserializeBlobProperties(getResponseProperty.getInputStream());
long endTimeGetBlobProperties = SystemTime.getInstance().nanoseconds() - startTimeGetBlobProperties;
partitionRequestInfoList.clear();
partitionRequestInfo = new PartitionRequestInfo(blobId.getPartition(), blobIds);
partitionRequestInfoList.add(partitionRequestInfo);
GetRequest getRequestUserMetadata = new GetRequest(1, "getperf", MessageFormatFlags.BlobUserMetadata, partitionRequestInfoList, GetOption.None);
long startTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds();
channel.send(getRequestUserMetadata);
DataInputStream receiveUserMetadataStream = channel.receive().getInputStream();
GetResponse getResponseUserMetadata = GetResponse.readFrom(receiveUserMetadataStream, map);
ByteBuffer userMetadata = MessageFormatRecord.deserializeUserMetadata(getResponseUserMetadata.getInputStream());
long endTimeGetBlobUserMetadata = SystemTime.getInstance().nanoseconds() - startTimeGetBlobUserMetadata;
// delete the blob
DeleteRequest deleteRequest = new DeleteRequest(0, "perf", blobId, System.currentTimeMillis());
channel.send(deleteRequest);
DeleteResponse deleteResponse = DeleteResponse.readFrom(channel.receive().getInputStream());
if (deleteResponse.getError() != ServerErrorCode.No_Error) {
throw new UnexpectedException("error " + deleteResponse.getError());
}
throttler.maybeThrottle(1);
} finally {
if (channel != null) {
connectionPool.checkInConnection(channel);
channel = null;
}
}
}
}
} catch (Exception e) {
e.printStackTrace();
System.out.println("Error in server read performance " + e);
} finally {
if (writer != null) {
try {
writer.close();
} catch (Exception e) {
System.out.println("Error when closing writer");
}
}
if (connectionPool != null) {
connectionPool.shutdown();
}
}
}
use of com.github.ambry.network.ConnectedChannel in project ambry by linkedin.
the class CloudAndStoreReplicationTest method testGetOnServerNode.
/**
* Do a get on recovery server node to test that all the blobids that were uploaded to vcr node have been recovered on
* recovery node.
* @param blobIdToSizeMap {@link Map} of blobid to size uploaded to vcr node.
* @param node recovery server node
* @throws IOException on {@link IOException}
*/
private void testGetOnServerNode(Map<BlobId, Integer> blobIdToSizeMap, DataNodeId node) throws IOException {
ConnectedChannel channel = ServerTestUtil.getBlockingChannelBasedOnPortType(node.getPortToConnectTo(), node.getHostname(), null, null);
channel.connect();
AtomicInteger correlationIdGenerator = new AtomicInteger(0);
List<BlobId> allBlobIds = Stream.concat(cloudBlobIds.stream(), serverBlobIds.stream()).collect(Collectors.toList());
List<PartitionRequestInfo> partitionRequestInfoList = Collections.singletonList(new PartitionRequestInfo(partitionId, allBlobIds));
GetRequest getRequest = new GetRequest(correlationIdGenerator.incrementAndGet(), GetRequest.Replication_Client_Id_Prefix + node.getHostname(), MessageFormatFlags.All, partitionRequestInfoList, new ReplicationConfig(new VerifiableProperties(recoveryProperties)).replicationIncludeAll ? GetOption.Include_All : GetOption.None);
channel.send(getRequest);
GetResponse getResponse = GetResponse.readFrom(channel.receive().getInputStream(), recoveryCluster.getClusterMap());
for (PartitionResponseInfo partitionResponseInfo : getResponse.getPartitionResponseInfoList()) {
assertEquals("Error in getting the recovered blobs", ServerErrorCode.No_Error, partitionResponseInfo.getErrorCode());
// old value is 272. Adding 8 Bytes due to the two fields added 4 + 4 Blob Property BYTE.
for (MessageInfo messageInfo : partitionResponseInfo.getMessageInfoList()) {
assertEquals(blobIdToSizeMap.get(messageInfo.getStoreKey()) + 280, messageInfo.getSize());
}
}
}
Aggregations