Search in sources :

Example 16 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class DiskTokenPersistorTest method setup.

/**
 * Create the one time setup for the tests.
 * @throws Exception if Exception happens during setup.
 */
@BeforeClass
public static void setup() throws Exception {
    clusterMap = new MockClusterMap();
    mountPathToPartitionInfoList = new HashMap<>();
    mountPathToReplicaTokenInfos = new HashMap<>();
    BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
    StoreFindTokenFactory factory = new StoreFindTokenFactory(blobIdFactory);
    DataNodeId dataNodeId = clusterMap.getDataNodeIds().get(0);
    List<? extends ReplicaId> localReplicas = clusterMap.getReplicaIds(dataNodeId);
    replicaId = localReplicas.get(0);
    for (ReplicaId replicaId : localReplicas) {
        List<? extends ReplicaId> peerReplicas = replicaId.getPeerReplicaIds();
        List<RemoteReplicaInfo> remoteReplicas = new ArrayList<>();
        for (ReplicaId remoteReplica : peerReplicas) {
            RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(remoteReplica, replicaId, null, factory.getNewFindToken(), 10, SystemTime.getInstance(), remoteReplica.getDataNodeId().getPortToConnectTo());
            remoteReplicas.add(remoteReplicaInfo);
            mountPathToReplicaTokenInfos.computeIfAbsent(replicaId.getMountPath(), k -> new ArrayList<>()).add(new RemoteReplicaInfo.ReplicaTokenInfo(remoteReplicaInfo));
        }
        PartitionInfo partitionInfo = new PartitionInfo(remoteReplicas, replicaId.getPartitionId(), null, replicaId);
        mountPathToPartitionInfoList.computeIfAbsent(replicaId.getMountPath(), key -> ConcurrentHashMap.newKeySet()).add(partitionInfo);
    }
    Properties replicationProperties = new Properties();
    replicationProperties.setProperty("replication.cloud.token.factory", MockFindTokenFactory.class.getName());
    ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(replicationProperties));
    findTokenHelper = new FindTokenHelper(blobIdFactory, replicationConfig);
    mockStoreManager = Mockito.mock(StoreManager.class);
    Mockito.when(mockStoreManager.checkLocalPartitionStatus(any(), any())).thenReturn(ServerErrorCode.No_Error);
}
Also used : StoreManager(com.github.ambry.server.StoreManager) ArgumentMatchers(org.mockito.ArgumentMatchers) BeforeClass(org.junit.BeforeClass) ServerErrorCode(com.github.ambry.server.ServerErrorCode) CrcOutputStream(com.github.ambry.utils.CrcOutputStream) DataNodeId(com.github.ambry.clustermap.DataNodeId) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) DataOutputStream(java.io.DataOutputStream) Map(java.util.Map) SystemTime(com.github.ambry.utils.SystemTime) ReplicationConfig(com.github.ambry.config.ReplicationConfig) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) Iterator(java.util.Iterator) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) FileOutputStream(java.io.FileOutputStream) Set(java.util.Set) ClusterMap(com.github.ambry.clustermap.ClusterMap) Utils(com.github.ambry.utils.Utils) IOException(java.io.IOException) Test(org.junit.Test) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) File(java.io.File) Mockito(org.mockito.Mockito) List(java.util.List) ReplicaId(com.github.ambry.clustermap.ReplicaId) StoreFindTokenFactory(com.github.ambry.store.StoreFindTokenFactory) Assert(org.junit.Assert) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) StoreFindTokenFactory(com.github.ambry.store.StoreFindTokenFactory) ReplicationConfig(com.github.ambry.config.ReplicationConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ArrayList(java.util.ArrayList) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) ReplicaId(com.github.ambry.clustermap.ReplicaId) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) StoreManager(com.github.ambry.server.StoreManager) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) BeforeClass(org.junit.BeforeClass)

Example 17 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class CloudRouterFactory method getRequestHandlerPool.

/**
 * Utility method to build  a {@link RequestHandlerPool}.
 * @param verifiableProperties the properties to use.
 * @param clusterMap the {@link ClusterMap} to use.
 * @return the constructed {@link RequestHandlerPool}.
 * @throws Exception if the construction fails.
 */
public RequestHandlerPool getRequestHandlerPool(VerifiableProperties verifiableProperties, ClusterMap clusterMap, CloudDestination cloudDestination, CloudConfig cloudConfig) throws Exception {
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    MetricRegistry registry = clusterMap.getMetricRegistry();
    DataNodeId nodeId = new CloudDataNode(cloudConfig, clusterMapConfig);
    VcrMetrics vcrMetrics = new VcrMetrics(registry);
    StoreManager cloudStorageManager = new CloudStorageManager(verifiableProperties, vcrMetrics, cloudDestination, clusterMap);
    LocalRequestResponseChannel channel = new LocalRequestResponseChannel();
    ServerMetrics serverMetrics = new ServerMetrics(registry, AmbryRequests.class);
    StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
    StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(routerConfig.routerStoreKeyConverterFactory, verifiableProperties, registry);
    // A null notification system is passed into AmbryRequests so that replication events are not emitted from a
    // frontend.
    AmbryRequests requests = new AmbryRequests(cloudStorageManager, channel, clusterMap, nodeId, registry, serverMetrics, null, null, null, storeKeyFactory, storeKeyConverterFactory);
    return new RequestHandlerPool(routerConfig.routerRequestHandlerNumOfThreads, channel, requests);
}
Also used : CloudStorageManager(com.github.ambry.cloud.CloudStorageManager) LocalRequestResponseChannel(com.github.ambry.network.LocalRequestResponseChannel) VcrMetrics(com.github.ambry.cloud.VcrMetrics) MetricRegistry(com.codahale.metrics.MetricRegistry) AmbryRequests(com.github.ambry.protocol.AmbryRequests) CloudDataNode(com.github.ambry.clustermap.CloudDataNode) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) StoreManager(com.github.ambry.server.StoreManager) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) StoreKeyConverterFactory(com.github.ambry.store.StoreKeyConverterFactory) RequestHandlerPool(com.github.ambry.protocol.RequestHandlerPool) ServerMetrics(com.github.ambry.commons.ServerMetrics) DataNodeId(com.github.ambry.clustermap.DataNodeId)

Example 18 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class IndexWritePerformance method main.

public static void main(String[] args) {
    FileWriter writer = null;
    try {
        OptionParser parser = new OptionParser();
        ArgumentAcceptingOptionSpec<Integer> numberOfIndexesOpt = parser.accepts("numberOfIndexes", "The number of indexes to create").withRequiredArg().describedAs("number_of_indexes").ofType(Integer.class);
        ArgumentAcceptingOptionSpec<String> hardwareLayoutOpt = parser.accepts("hardwareLayout", "The path of the hardware layout file").withRequiredArg().describedAs("hardware_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<String> partitionLayoutOpt = parser.accepts("partitionLayout", "The path of the partition layout file").withRequiredArg().describedAs("partition_layout").ofType(String.class);
        ArgumentAcceptingOptionSpec<Integer> numberOfWritersOpt = parser.accepts("numberOfWriters", "The number of writers that write to a random index concurrently").withRequiredArg().describedAs("The number of writers").ofType(Integer.class).defaultsTo(4);
        ArgumentAcceptingOptionSpec<Integer> writesPerSecondOpt = parser.accepts("writesPerSecond", "The rate at which writes need to be performed").withRequiredArg().describedAs("The number of writes per second").ofType(Integer.class).defaultsTo(1000);
        ArgumentAcceptingOptionSpec<Boolean> verboseLoggingOpt = parser.accepts("enableVerboseLogging", "Enables verbose logging").withOptionalArg().describedAs("Enable verbose logging").ofType(Boolean.class).defaultsTo(false);
        OptionSet options = parser.parse(args);
        ArrayList<OptionSpec> listOpt = new ArrayList<>();
        listOpt.add(numberOfIndexesOpt);
        listOpt.add(hardwareLayoutOpt);
        listOpt.add(partitionLayoutOpt);
        ToolUtils.ensureOrExit(listOpt, options, parser);
        int numberOfIndexes = options.valueOf(numberOfIndexesOpt);
        int numberOfWriters = options.valueOf(numberOfWritersOpt);
        int writesPerSecond = options.valueOf(writesPerSecondOpt);
        boolean enableVerboseLogging = options.has(verboseLoggingOpt);
        if (enableVerboseLogging) {
            System.out.println("Enabled verbose logging");
        }
        final AtomicLong totalTimeTakenInNs = new AtomicLong(0);
        final AtomicLong totalWrites = new AtomicLong(0);
        String hardwareLayoutPath = options.valueOf(hardwareLayoutOpt);
        String partitionLayoutPath = options.valueOf(partitionLayoutOpt);
        ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(new Properties()));
        ClusterMap map = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, hardwareLayoutPath, partitionLayoutPath)).getClusterMap();
        StoreKeyFactory factory = new BlobIdFactory(map);
        File logFile = new File(System.getProperty("user.dir"), "writeperflog");
        writer = new FileWriter(logFile);
        MetricRegistry metricRegistry = new MetricRegistry();
        StoreMetrics metrics = new StoreMetrics(metricRegistry);
        DiskSpaceAllocator diskSpaceAllocator = new DiskSpaceAllocator(false, null, 0, new StorageManagerMetrics(metricRegistry));
        Properties props = new Properties();
        props.setProperty("store.index.memory.size.bytes", "2097152");
        props.setProperty("store.segment.size.in.bytes", "10");
        StoreConfig config = new StoreConfig(new VerifiableProperties(props));
        Log log = new Log(System.getProperty("user.dir"), 10, diskSpaceAllocator, config, metrics, null);
        ScheduledExecutorService s = Utils.newScheduler(numberOfWriters, "index", false);
        ArrayList<BlobIndexMetrics> indexWithMetrics = new ArrayList<BlobIndexMetrics>(numberOfIndexes);
        for (int i = 0; i < numberOfIndexes; i++) {
            File indexFile = new File(System.getProperty("user.dir"), Integer.toString(i));
            if (indexFile.exists()) {
                for (File c : indexFile.listFiles()) {
                    c.delete();
                }
            } else {
                indexFile.mkdir();
            }
            System.out.println("Creating index folder " + indexFile.getAbsolutePath());
            writer.write("logdir-" + indexFile.getAbsolutePath() + "\n");
            indexWithMetrics.add(new BlobIndexMetrics(indexFile.getAbsolutePath(), s, log, enableVerboseLogging, totalWrites, totalTimeTakenInNs, totalWrites, config, writer, factory));
        }
        final CountDownLatch latch = new CountDownLatch(numberOfWriters);
        final AtomicBoolean shutdown = new AtomicBoolean(false);
        // attach shutdown handler to catch control-c
        Runtime.getRuntime().addShutdownHook(new Thread() {

            public void run() {
                try {
                    System.out.println("Shutdown invoked");
                    shutdown.set(true);
                    latch.await();
                    System.out.println("Total writes : " + totalWrites.get() + "  Total time taken : " + totalTimeTakenInNs.get() + " Nano Seconds  Average time taken per write " + ((double) totalWrites.get() / totalTimeTakenInNs.get()) / SystemTime.NsPerSec + " Seconds");
                } catch (Exception e) {
                    System.out.println("Error while shutting down " + e);
                }
            }
        });
        Throttler throttler = new Throttler(writesPerSecond, 100, true, SystemTime.getInstance());
        Thread[] threadIndexPerf = new Thread[numberOfWriters];
        for (int i = 0; i < numberOfWriters; i++) {
            threadIndexPerf[i] = new Thread(new IndexWritePerfRun(indexWithMetrics, throttler, shutdown, latch, map));
            threadIndexPerf[i].start();
        }
        for (int i = 0; i < numberOfWriters; i++) {
            threadIndexPerf[i].join();
        }
    } catch (StoreException e) {
        System.err.println("Index creation error on exit " + e.getMessage());
    } catch (Exception e) {
        System.err.println("Error on exit " + e);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (Exception e) {
                System.out.println("Error when closing the writer");
            }
        }
    }
}
Also used : OptionSpec(joptsimple.OptionSpec) ArgumentAcceptingOptionSpec(joptsimple.ArgumentAcceptingOptionSpec) ClusterMap(com.github.ambry.clustermap.ClusterMap) FileWriter(java.io.FileWriter) ArrayList(java.util.ArrayList) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) OptionParser(joptsimple.OptionParser) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Throttler(com.github.ambry.utils.Throttler) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) CountDownLatch(java.util.concurrent.CountDownLatch) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) StoreConfig(com.github.ambry.config.StoreConfig) OptionSet(joptsimple.OptionSet) File(java.io.File)

Example 19 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class CloudBlobStoreTest method testPutWithTtl.

/**
 * Test PUT(with TTL) and TtlUpdate record replication.
 * Replication may happen after PUT and after TtlUpdate, or after TtlUpdate only.
 * PUT may already expired, expiration time < upload threshold or expiration time >= upload threshold.
 * @throws Exception
 */
@Test
public void testPutWithTtl() throws Exception {
    // Set up remote host
    MockClusterMap clusterMap = new MockClusterMap();
    MockHost remoteHost = getLocalAndRemoteHosts(clusterMap).getSecond();
    List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
    PartitionId partitionId = partitionIds.get(0);
    StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    storeKeyConverterFactory.setReturnInputIfAbsent(true);
    MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
    Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
    Map<DataNodeId, MockHost> hosts = new HashMap<>();
    hosts.put(remoteHost.dataNodeId, remoteHost);
    MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, 4);
    // Generate BlobIds for following PUT.
    short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    boolean toEncrypt = TestUtils.RANDOM.nextBoolean();
    List<BlobId> blobIdList = new ArrayList<>();
    for (int i = 0; i < 6; i++) {
        blobIdList.add(new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt, BlobId.BlobDataType.DATACHUNK));
    }
    // Set up VCR
    Properties props = new Properties();
    setBasicProperties(props);
    props.setProperty("clustermap.port", "12300");
    props.setProperty("vcr.ssl.port", "12345");
    ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
    CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
    CloudDataNode cloudDataNode = new CloudDataNode(cloudConfig, clusterMapConfig);
    LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIdList, clusterMap);
    CloudReplica cloudReplica = new CloudReplica(partitionId, cloudDataNode);
    CloudBlobStore cloudBlobStore = new CloudBlobStore(new VerifiableProperties(props), partitionId, latchBasedInMemoryCloudDestination, clusterMap, new VcrMetrics(new MetricRegistry()));
    cloudBlobStore.start();
    // Create ReplicaThread and add RemoteReplicaInfo to it.
    ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), Collections.emptyList());
    ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), cloudDataNode, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, cloudDataNode.getDatacenterName(), new ResponseHandler(clusterMap), new MockTime(), null, null, null);
    for (ReplicaId replica : partitionId.getReplicaIds()) {
        if (replica.getDataNodeId() == remoteHost.dataNodeId) {
            RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(replica, cloudReplica, cloudBlobStore, new MockFindToken(0, 0), Long.MAX_VALUE, SystemTime.getInstance(), new Port(remoteHost.dataNodeId.getPort(), PortType.PLAINTEXT));
            replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
            break;
        }
    }
    long referenceTime = System.currentTimeMillis();
    // Case 1: Put already expired. Replication happens after Put and after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    BlobId id = blobIdList.get(0);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
    replicaThread.replicate();
    assertFalse("Blob should not exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 2: Put already expired. Replication happens after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    id = blobIdList.get(1);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 3: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    id = blobIdList.get(2);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
    replicaThread.replicate();
    if (isVcr) {
        assertFalse("Blob should not exist (vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    } else {
        assertTrue("Blob should exist (not vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    }
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 4: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
    // Upload to Cloud only after replicating ttlUpdate.
    id = blobIdList.get(3);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 5: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
    // Upload to Cloud after Put and update ttl after TtlUpdate.
    id = blobIdList.get(4);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
    replicaThread.replicate();
    assertTrue(latchBasedInMemoryCloudDestination.doesBlobExist(id));
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Case 6: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
    // Upload to Cloud after TtlUpdate.
    id = blobIdList.get(5);
    addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
    addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
    replicaThread.replicate();
    assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
    // Verify expiration time of all blobs.
    Map<String, CloudBlobMetadata> map = latchBasedInMemoryCloudDestination.getBlobMetadata(blobIdList);
    for (BlobId blobId : blobIdList) {
        assertEquals("Blob ttl should be infinite now.", Utils.Infinite_Time, map.get(blobId.toString()).getExpirationTime());
    }
}
Also used : ReplicaThread(com.github.ambry.replication.ReplicaThread) Transformer(com.github.ambry.store.Transformer) BlobIdTransformer(com.github.ambry.replication.BlobIdTransformer) RemoteReplicaInfo(com.github.ambry.replication.RemoteReplicaInfo) ResponseHandler(com.github.ambry.commons.ResponseHandler) HashMap(java.util.HashMap) MockConnectionPool(com.github.ambry.replication.MockConnectionPool) ReplicationMetrics(com.github.ambry.replication.ReplicationMetrics) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) CloudConfig(com.github.ambry.config.CloudConfig) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) BlobIdTransformer(com.github.ambry.replication.BlobIdTransformer) MockTime(com.github.ambry.utils.MockTime) CloudReplica(com.github.ambry.clustermap.CloudReplica) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) ReplicationConfig(com.github.ambry.config.ReplicationConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) CloudDataNode(com.github.ambry.clustermap.CloudDataNode) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ReplicaId(com.github.ambry.clustermap.ReplicaId) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) MockFindTokenHelper(com.github.ambry.replication.MockFindTokenHelper) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockHost(com.github.ambry.replication.MockHost) MockFindToken(com.github.ambry.replication.MockFindToken) DataNodeId(com.github.ambry.clustermap.DataNodeId) BlobId(com.github.ambry.commons.BlobId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) ReplicationTest(com.github.ambry.replication.ReplicationTest) Test(org.junit.Test)

Example 20 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class CloudTokenPersistorTest method basicTest.

@Test
public void basicTest() throws Exception {
    Properties props = VcrTestUtil.createVcrProperties("DC1", "vcrClusterName", "zkConnectString", 12310, 12410, 12510, null);
    props.setProperty("replication.cloud.token.factory", replicationCloudTokenFactory);
    CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
    ClusterMap clusterMap = new MockClusterMap();
    DataNodeId dataNodeId = new CloudDataNode(cloudConfig, clusterMapConfig);
    Map<String, Set<PartitionInfo>> mountPathToPartitionInfoList = new HashMap<>();
    BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
    StoreFindTokenFactory factory = new StoreFindTokenFactory(blobIdFactory);
    PartitionId partitionId = clusterMap.getAllPartitionIds(null).get(0);
    ReplicaId cloudReplicaId = new CloudReplica(partitionId, dataNodeId);
    List<? extends ReplicaId> peerReplicas = cloudReplicaId.getPeerReplicaIds();
    List<RemoteReplicaInfo> remoteReplicas = new ArrayList<RemoteReplicaInfo>();
    List<RemoteReplicaInfo.ReplicaTokenInfo> replicaTokenInfos = new ArrayList<>();
    for (ReplicaId remoteReplica : peerReplicas) {
        RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(remoteReplica, cloudReplicaId, null, factory.getNewFindToken(), 10, SystemTime.getInstance(), remoteReplica.getDataNodeId().getPortToConnectTo());
        remoteReplicas.add(remoteReplicaInfo);
        replicaTokenInfos.add(new RemoteReplicaInfo.ReplicaTokenInfo(remoteReplicaInfo));
    }
    PartitionInfo partitionInfo = new PartitionInfo(remoteReplicas, partitionId, null, cloudReplicaId);
    mountPathToPartitionInfoList.computeIfAbsent(cloudReplicaId.getMountPath(), key -> ConcurrentHashMap.newKeySet()).add(partitionInfo);
    LatchBasedInMemoryCloudDestination cloudDestination = new LatchBasedInMemoryCloudDestination(Collections.emptyList(), AzureCloudDestinationFactory.getReplicationFeedType(new VerifiableProperties(props)), clusterMap);
    ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
    CloudTokenPersistor cloudTokenPersistor = new CloudTokenPersistor("replicaTokens", mountPathToPartitionInfoList, new ReplicationMetrics(new MetricRegistry(), Collections.emptyList()), clusterMap, new FindTokenHelper(blobIdFactory, replicationConfig), cloudDestination);
    cloudTokenPersistor.persist(cloudReplicaId.getMountPath(), replicaTokenInfos);
    List<RemoteReplicaInfo.ReplicaTokenInfo> retrievedReplicaTokenInfos = cloudTokenPersistor.retrieve(cloudReplicaId.getMountPath());
    Assert.assertEquals("Number of tokens doesn't match.", replicaTokenInfos.size(), retrievedReplicaTokenInfos.size());
    for (int i = 0; i < replicaTokenInfos.size(); i++) {
        Assert.assertArrayEquals("Token is not correct.", replicaTokenInfos.get(i).getReplicaToken().toBytes(), retrievedReplicaTokenInfos.get(i).getReplicaToken().toBytes());
    }
}
Also used : Arrays(java.util.Arrays) AzureCloudDestinationFactory(com.github.ambry.cloud.azure.AzureCloudDestinationFactory) CloudReplica(com.github.ambry.clustermap.CloudReplica) DataNodeId(com.github.ambry.clustermap.DataNodeId) ReplicationMetrics(com.github.ambry.replication.ReplicationMetrics) RunWith(org.junit.runner.RunWith) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) CloudConfig(com.github.ambry.config.CloudConfig) Map(java.util.Map) SystemTime(com.github.ambry.utils.SystemTime) RemoteReplicaInfo(com.github.ambry.replication.RemoteReplicaInfo) CloudDataNode(com.github.ambry.clustermap.CloudDataNode) PartitionInfo(com.github.ambry.replication.PartitionInfo) Parameterized(org.junit.runners.Parameterized) ReplicationConfig(com.github.ambry.config.ReplicationConfig) FindTokenHelper(com.github.ambry.replication.FindTokenHelper) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ClusterMap(com.github.ambry.clustermap.ClusterMap) Test(org.junit.Test) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) List(java.util.List) ReplicaId(com.github.ambry.clustermap.ReplicaId) StoreFindTokenFactory(com.github.ambry.store.StoreFindTokenFactory) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Assert(org.junit.Assert) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) PartitionId(com.github.ambry.clustermap.PartitionId) ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Set(java.util.Set) RemoteReplicaInfo(com.github.ambry.replication.RemoteReplicaInfo) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ReplicationMetrics(com.github.ambry.replication.ReplicationMetrics) CloudConfig(com.github.ambry.config.CloudConfig) ArrayList(java.util.ArrayList) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PartitionInfo(com.github.ambry.replication.PartitionInfo) CloudReplica(com.github.ambry.clustermap.CloudReplica) StoreFindTokenFactory(com.github.ambry.store.StoreFindTokenFactory) ReplicationConfig(com.github.ambry.config.ReplicationConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) FindTokenHelper(com.github.ambry.replication.FindTokenHelper) MetricRegistry(com.codahale.metrics.MetricRegistry) PartitionId(com.github.ambry.clustermap.PartitionId) CloudDataNode(com.github.ambry.clustermap.CloudDataNode) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ReplicaId(com.github.ambry.clustermap.ReplicaId) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Aggregations

BlobIdFactory (com.github.ambry.commons.BlobIdFactory)32 ArrayList (java.util.ArrayList)28 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)22 HashMap (java.util.HashMap)21 VerifiableProperties (com.github.ambry.config.VerifiableProperties)20 ClusterMap (com.github.ambry.clustermap.ClusterMap)19 List (java.util.List)19 DataNodeId (com.github.ambry.clustermap.DataNodeId)18 PartitionId (com.github.ambry.clustermap.PartitionId)18 Map (java.util.Map)18 Test (org.junit.Test)18 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)17 BlobId (com.github.ambry.commons.BlobId)17 StoreKeyFactory (com.github.ambry.store.StoreKeyFactory)16 Properties (java.util.Properties)16 MockStoreKeyConverterFactory (com.github.ambry.store.MockStoreKeyConverterFactory)13 MetricRegistry (com.codahale.metrics.MetricRegistry)12 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)12 Transformer (com.github.ambry.store.Transformer)12 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)11