use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.
the class DiskTokenPersistorTest method setup.
/**
* Create the one time setup for the tests.
* @throws Exception if Exception happens during setup.
*/
@BeforeClass
public static void setup() throws Exception {
clusterMap = new MockClusterMap();
mountPathToPartitionInfoList = new HashMap<>();
mountPathToReplicaTokenInfos = new HashMap<>();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
StoreFindTokenFactory factory = new StoreFindTokenFactory(blobIdFactory);
DataNodeId dataNodeId = clusterMap.getDataNodeIds().get(0);
List<? extends ReplicaId> localReplicas = clusterMap.getReplicaIds(dataNodeId);
replicaId = localReplicas.get(0);
for (ReplicaId replicaId : localReplicas) {
List<? extends ReplicaId> peerReplicas = replicaId.getPeerReplicaIds();
List<RemoteReplicaInfo> remoteReplicas = new ArrayList<>();
for (ReplicaId remoteReplica : peerReplicas) {
RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(remoteReplica, replicaId, null, factory.getNewFindToken(), 10, SystemTime.getInstance(), remoteReplica.getDataNodeId().getPortToConnectTo());
remoteReplicas.add(remoteReplicaInfo);
mountPathToReplicaTokenInfos.computeIfAbsent(replicaId.getMountPath(), k -> new ArrayList<>()).add(new RemoteReplicaInfo.ReplicaTokenInfo(remoteReplicaInfo));
}
PartitionInfo partitionInfo = new PartitionInfo(remoteReplicas, replicaId.getPartitionId(), null, replicaId);
mountPathToPartitionInfoList.computeIfAbsent(replicaId.getMountPath(), key -> ConcurrentHashMap.newKeySet()).add(partitionInfo);
}
Properties replicationProperties = new Properties();
replicationProperties.setProperty("replication.cloud.token.factory", MockFindTokenFactory.class.getName());
ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(replicationProperties));
findTokenHelper = new FindTokenHelper(blobIdFactory, replicationConfig);
mockStoreManager = Mockito.mock(StoreManager.class);
Mockito.when(mockStoreManager.checkLocalPartitionStatus(any(), any())).thenReturn(ServerErrorCode.No_Error);
}
use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.
the class CloudRouterFactory method getRequestHandlerPool.
/**
* Utility method to build a {@link RequestHandlerPool}.
* @param verifiableProperties the properties to use.
* @param clusterMap the {@link ClusterMap} to use.
* @return the constructed {@link RequestHandlerPool}.
* @throws Exception if the construction fails.
*/
public RequestHandlerPool getRequestHandlerPool(VerifiableProperties verifiableProperties, ClusterMap clusterMap, CloudDestination cloudDestination, CloudConfig cloudConfig) throws Exception {
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
MetricRegistry registry = clusterMap.getMetricRegistry();
DataNodeId nodeId = new CloudDataNode(cloudConfig, clusterMapConfig);
VcrMetrics vcrMetrics = new VcrMetrics(registry);
StoreManager cloudStorageManager = new CloudStorageManager(verifiableProperties, vcrMetrics, cloudDestination, clusterMap);
LocalRequestResponseChannel channel = new LocalRequestResponseChannel();
ServerMetrics serverMetrics = new ServerMetrics(registry, AmbryRequests.class);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(routerConfig.routerStoreKeyConverterFactory, verifiableProperties, registry);
// A null notification system is passed into AmbryRequests so that replication events are not emitted from a
// frontend.
AmbryRequests requests = new AmbryRequests(cloudStorageManager, channel, clusterMap, nodeId, registry, serverMetrics, null, null, null, storeKeyFactory, storeKeyConverterFactory);
return new RequestHandlerPool(routerConfig.routerRequestHandlerNumOfThreads, channel, requests);
}
use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.
the class IndexWritePerformance method main.
public static void main(String[] args) {
FileWriter writer = null;
try {
OptionParser parser = new OptionParser();
ArgumentAcceptingOptionSpec<Integer> numberOfIndexesOpt = parser.accepts("numberOfIndexes", "The number of indexes to create").withRequiredArg().describedAs("number_of_indexes").ofType(Integer.class);
ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
ArgumentAcceptingOptionSpec<Integer> numberOfWritersOpt = parser.accepts("numberOfWriters", "The number of writers that write to a random index concurrently").withRequiredArg().describedAs("The number of writers").ofType(Integer.class).defaultsTo(4);
ArgumentAcceptingOptionSpec<Integer> writesPerSecondOpt = parser.accepts("writesPerSecond", "The rate at which writes need to be performed").withRequiredArg().describedAs("The number of writes per second").ofType(Integer.class).defaultsTo(1000);
ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
OptionSet options = parser.parse(args);
ArrayList<OptionSpec> listOpt = new ArrayList<>();
listOpt.add(numberOfIndexesOpt);
listOpt.add(hardwareLayoutOpt);
listOpt.add(partitionLayoutOpt);
ToolUtils.ensureOrExit(listOpt, options, parser);
int numberOfIndexes = options.valueOf(numberOfIndexesOpt);
int numberOfWriters = options.valueOf(numberOfWritersOpt);
int writesPerSecond = options.valueOf(writesPerSecondOpt);
boolean enableVerboseLogging = options.has(verboseLoggingOpt);
if (enableVerboseLogging) {
System.out.println("Enabled verbose logging");
}
final AtomicLong totalTimeTakenInNs = new AtomicLong(0);
final AtomicLong totalWrites = new AtomicLong(0);
String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(new Properties()));
ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
StoreKeyFactory factory = new BlobIdFactory(map);
File logFile = new File(System.getProperty("user.dir"), "writeperflog");
writer = new FileWriter(logFile);
MetricRegistry metricRegistry = new MetricRegistry();
StoreMetrics metrics = new StoreMetrics(metricRegistry);
DiskSpaceAllocator diskSpaceAllocator = new DiskSpaceAllocator(false, null, 0, new StorageManagerMetrics(metricRegistry));
Properties props = new Properties();
props.setProperty("store.index.memory.size.bytes", "2097152");
props.setProperty("store.segment.size.in.bytes", "10");
StoreConfig config = new StoreConfig(new VerifiableProperties(props));
Log log = new Log(System.getProperty("user.dir"), 10, diskSpaceAllocator, config, metrics, null);
ScheduledExecutorService s = Utils.newScheduler(numberOfWriters, "index", false);
ArrayList<BlobIndexMetrics> indexWithMetrics = new ArrayList<BlobIndexMetrics>(numberOfIndexes);
for (int i = 0; i < numberOfIndexes; i++) {
File indexFile = new File(System.getProperty("user.dir"), Integer.toString(i));
if (indexFile.exists()) {
for (File c : indexFile.listFiles()) {
c.delete();
}
} else {
indexFile.mkdir();
}
System.out.println("Creating index folder " + indexFile.getAbsolutePath());
writer.write("logdir-" + indexFile.getAbsolutePath() + "\n");
indexWithMetrics.add(new BlobIndexMetrics(indexFile.getAbsolutePath(), s, log, enableVerboseLogging, totalWrites, totalTimeTakenInNs, totalWrites, config, writer, factory));
}
final CountDownLatch latch = new CountDownLatch(numberOfWriters);
final AtomicBoolean shutdown = new AtomicBoolean(false);
// attach shutdown handler to catch control-c
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
try {
System.out.println("Shutdown invoked");
shutdown.set(true);
latch.await();
System.out.println("Total writes : " + totalWrites.get() + " Total time taken : " + totalTimeTakenInNs.get() + " Nano Seconds Average time taken per write " + ((double) totalWrites.get() / totalTimeTakenInNs.get()) / SystemTime.NsPerSec + " Seconds");
} catch (Exception e) {
System.out.println("Error while shutting down " + e);
}
}
});
Throttler throttler = new Throttler(writesPerSecond, 100, true, SystemTime.getInstance());
Thread[] threadIndexPerf = new Thread[numberOfWriters];
for (int i = 0; i < numberOfWriters; i++) {
threadIndexPerf[i] = new Thread(new IndexWritePerfRun(indexWithMetrics, throttler, shutdown, latch, map));
threadIndexPerf[i].start();
}
for (int i = 0; i < numberOfWriters; i++) {
threadIndexPerf[i].join();
}
} catch (StoreException e) {
System.err.println("Index creation error on exit " + e.getMessage());
} catch (Exception e) {
System.err.println("Error on exit " + e);
} finally {
if (writer != null) {
try {
writer.close();
} catch (Exception e) {
System.out.println("Error when closing the writer");
}
}
}
}
use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.
the class CloudBlobStoreTest method testPutWithTtl.
/**
* Test PUT(with TTL) and TtlUpdate record replication.
* Replication may happen after PUT and after TtlUpdate, or after TtlUpdate only.
* PUT may already expired, expiration time < upload threshold or expiration time >= upload threshold.
* @throws Exception
*/
@Test
public void testPutWithTtl() throws Exception {
// Set up remote host
MockClusterMap clusterMap = new MockClusterMap();
MockHost remoteHost = getLocalAndRemoteHosts(clusterMap).getSecond();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
PartitionId partitionId = partitionIds.get(0);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<DataNodeId, MockHost> hosts = new HashMap<>();
hosts.put(remoteHost.dataNodeId, remoteHost);
MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, 4);
// Generate BlobIds for following PUT.
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
boolean toEncrypt = TestUtils.RANDOM.nextBoolean();
List<BlobId> blobIdList = new ArrayList<>();
for (int i = 0; i < 6; i++) {
blobIdList.add(new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt, BlobId.BlobDataType.DATACHUNK));
}
// Set up VCR
Properties props = new Properties();
setBasicProperties(props);
props.setProperty("clustermap.port", "12300");
props.setProperty("vcr.ssl.port", "12345");
ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
CloudDataNode cloudDataNode = new CloudDataNode(cloudConfig, clusterMapConfig);
LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIdList, clusterMap);
CloudReplica cloudReplica = new CloudReplica(partitionId, cloudDataNode);
CloudBlobStore cloudBlobStore = new CloudBlobStore(new VerifiableProperties(props), partitionId, latchBasedInMemoryCloudDestination, clusterMap, new VcrMetrics(new MetricRegistry()));
cloudBlobStore.start();
// Create ReplicaThread and add RemoteReplicaInfo to it.
ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), Collections.emptyList());
ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), cloudDataNode, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, cloudDataNode.getDatacenterName(), new ResponseHandler(clusterMap), new MockTime(), null, null, null);
for (ReplicaId replica : partitionId.getReplicaIds()) {
if (replica.getDataNodeId() == remoteHost.dataNodeId) {
RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(replica, cloudReplica, cloudBlobStore, new MockFindToken(0, 0), Long.MAX_VALUE, SystemTime.getInstance(), new Port(remoteHost.dataNodeId.getPort(), PortType.PLAINTEXT));
replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
break;
}
}
long referenceTime = System.currentTimeMillis();
// Case 1: Put already expired. Replication happens after Put and after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
BlobId id = blobIdList.get(0);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
replicaThread.replicate();
assertFalse("Blob should not exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 2: Put already expired. Replication happens after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(1);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 3: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(2);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
replicaThread.replicate();
if (isVcr) {
assertFalse("Blob should not exist (vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
} else {
assertTrue("Blob should exist (not vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
}
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 4: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(3);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 5: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
// Upload to Cloud after Put and update ttl after TtlUpdate.
id = blobIdList.get(4);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
replicaThread.replicate();
assertTrue(latchBasedInMemoryCloudDestination.doesBlobExist(id));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 6: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
// Upload to Cloud after TtlUpdate.
id = blobIdList.get(5);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Verify expiration time of all blobs.
Map<String, CloudBlobMetadata> map = latchBasedInMemoryCloudDestination.getBlobMetadata(blobIdList);
for (BlobId blobId : blobIdList) {
assertEquals("Blob ttl should be infinite now.", Utils.Infinite_Time, map.get(blobId.toString()).getExpirationTime());
}
}
use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.
the class CloudTokenPersistorTest method basicTest.
@Test
public void basicTest() throws Exception {
Properties props = VcrTestUtil.createVcrProperties("DC1", "vcrClusterName", "zkConnectString", 12310, 12410, 12510, null);
props.setProperty("replication.cloud.token.factory", replicationCloudTokenFactory);
CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
ClusterMap clusterMap = new MockClusterMap();
DataNodeId dataNodeId = new CloudDataNode(cloudConfig, clusterMapConfig);
Map<String, Set<PartitionInfo>> mountPathToPartitionInfoList = new HashMap<>();
BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
StoreFindTokenFactory factory = new StoreFindTokenFactory(blobIdFactory);
PartitionId partitionId = clusterMap.getAllPartitionIds(null).get(0);
ReplicaId cloudReplicaId = new CloudReplica(partitionId, dataNodeId);
List<? extends ReplicaId> peerReplicas = cloudReplicaId.getPeerReplicaIds();
List<RemoteReplicaInfo> remoteReplicas = new ArrayList<RemoteReplicaInfo>();
List<RemoteReplicaInfo.ReplicaTokenInfo> replicaTokenInfos = new ArrayList<>();
for (ReplicaId remoteReplica : peerReplicas) {
RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(remoteReplica, cloudReplicaId, null, factory.getNewFindToken(), 10, SystemTime.getInstance(), remoteReplica.getDataNodeId().getPortToConnectTo());
remoteReplicas.add(remoteReplicaInfo);
replicaTokenInfos.add(new RemoteReplicaInfo.ReplicaTokenInfo(remoteReplicaInfo));
}
PartitionInfo partitionInfo = new PartitionInfo(remoteReplicas, partitionId, null, cloudReplicaId);
mountPathToPartitionInfoList.computeIfAbsent(cloudReplicaId.getMountPath(), key -> ConcurrentHashMap.newKeySet()).add(partitionInfo);
LatchBasedInMemoryCloudDestination cloudDestination = new LatchBasedInMemoryCloudDestination(Collections.emptyList(), AzureCloudDestinationFactory.getReplicationFeedType(new VerifiableProperties(props)), clusterMap);
ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
CloudTokenPersistor cloudTokenPersistor = new CloudTokenPersistor("replicaTokens", mountPathToPartitionInfoList, new ReplicationMetrics(new MetricRegistry(), Collections.emptyList()), clusterMap, new FindTokenHelper(blobIdFactory, replicationConfig), cloudDestination);
cloudTokenPersistor.persist(cloudReplicaId.getMountPath(), replicaTokenInfos);
List<RemoteReplicaInfo.ReplicaTokenInfo> retrievedReplicaTokenInfos = cloudTokenPersistor.retrieve(cloudReplicaId.getMountPath());
Assert.assertEquals("Number of tokens doesn't match.", replicaTokenInfos.size(), retrievedReplicaTokenInfos.size());
for (int i = 0; i < replicaTokenInfos.size(); i++) {
Assert.assertArrayEquals("Token is not correct.", replicaTokenInfos.get(i).getReplicaToken().toBytes(), retrievedReplicaTokenInfos.get(i).getReplicaToken().toBytes());
}
}
Aggregations