Search in sources :

Example 6 with FindTokenFactory

use of com.github.ambry.replication.FindTokenFactory in project ambry by linkedin.

the class AzureIntegrationTest method testFindEntriesSince.

/**
 * Test findEntriesSince with specified cloud token factory.
 * @param replicationCloudTokenFactory the factory to use.
 * @throws Exception on error
 */
private void testFindEntriesSince(String replicationCloudTokenFactory) throws Exception {
    logger.info("Testing findEntriesSince with {}", replicationCloudTokenFactory);
    testProperties.setProperty(ReplicationConfig.REPLICATION_CLOUD_TOKEN_FACTORY, replicationCloudTokenFactory);
    VerifiableProperties verifiableProperties = new VerifiableProperties(testProperties);
    ReplicationConfig replicationConfig = new ReplicationConfig(verifiableProperties);
    FindTokenFactory findTokenFactory = new FindTokenHelper(null, replicationConfig).getFindTokenFactoryFromReplicaType(ReplicaType.CLOUD_BACKED);
    azureDest = (AzureCloudDestination) new AzureCloudDestinationFactory(verifiableProperties, new MetricRegistry(), clusterMap).getCloudDestination();
    cleanup();
    PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
    String partitionPath = String.valueOf(testPartition);
    // Upload some blobs with different upload times
    int blobCount = 90;
    int chunkSize = 1000;
    int maxTotalSize = 20000;
    int expectedNumQueries = (blobCount * chunkSize) / maxTotalSize + 1;
    long now = System.currentTimeMillis();
    long startTime = now - TimeUnit.DAYS.toMillis(7);
    for (int j = 0; j < blobCount; j++) {
        BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
        InputStream inputStream = getBlobInputStream(chunkSize);
        CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, startTime, Utils.Infinite_Time, chunkSize, CloudBlobMetadata.EncryptionOrigin.VCR, vcrKmsContext, cryptoAgentFactory, chunkSize, (short) 0);
        cloudBlobMetadata.setUploadTime(startTime + j * 1000);
        assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, chunkSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest));
    }
    FindToken findToken = findTokenFactory.getNewFindToken();
    // Call findEntriesSince in a loop until no new entries are returned
    FindResult findResult;
    int numQueries = 0;
    int totalBlobsReturned = 0;
    do {
        findResult = findEntriesSinceWithRetry(partitionPath, findToken, maxTotalSize);
        findToken = findResult.getUpdatedFindToken();
        if (!findResult.getMetadataList().isEmpty()) {
            numQueries++;
        }
        totalBlobsReturned += findResult.getMetadataList().size();
    } while (!noMoreFindSinceEntries(findResult, findToken));
    assertEquals("Wrong number of queries", expectedNumQueries, numQueries);
    assertEquals("Wrong number of blobs", blobCount, totalBlobsReturned);
    assertEquals("Wrong byte count", blobCount * chunkSize, findToken.getBytesRead());
    cleanup();
}
Also used : ReplicationConfig(com.github.ambry.config.ReplicationConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) FindTokenHelper(com.github.ambry.replication.FindTokenHelper) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) FindTokenFactory(com.github.ambry.replication.FindTokenFactory) FindToken(com.github.ambry.replication.FindToken) BlobId(com.github.ambry.commons.BlobId) FindResult(com.github.ambry.cloud.FindResult)

Example 7 with FindTokenFactory

use of com.github.ambry.replication.FindTokenFactory in project ambry by linkedin.

the class ServerHardDeleteTest method ensureCleanupTokenCatchesUp.

/**
 * Waits and ensures that the hard delete cleanup token catches up to the expected token value.
 * @param path the path to the cleanup token.
 * @param mockClusterMap the {@link MockClusterMap} being used for the cluster.
 * @param expectedTokenValue the expected value that the cleanup token should contain. Until this value is reached,
 *                           the method will keep reopening the file and read the value or until a predefined
 *                           timeout is reached.
 * @throws Exception if there were any I/O errors or the sleep gets interrupted.
 */
void ensureCleanupTokenCatchesUp(String path, MockClusterMap mockClusterMap, long expectedTokenValue) throws Exception {
    final int TIMEOUT = 10000;
    File cleanupTokenFile = new File(path, "cleanuptoken");
    StoreFindToken endToken;
    long parsedTokenValue = -1;
    long endTime = SystemTime.getInstance().milliseconds() + TIMEOUT;
    do {
        if (cleanupTokenFile.exists()) {
            /* The cleanup token format is as follows:
           --
           token_version
           startTokenForRecovery
           endTokenForRecovery
           numBlobsInRange
           pause flag
           --
           blob1_blobReadOptions {version, offset, sz, ttl, key}
           blob2_blobReadOptions
           ....
           blobN_blobReadOptions
           --
           length_of_blob1_messageStoreRecoveryInfo
           blob1_messageStoreRecoveryInfo {headerVersion, userMetadataVersion, userMetadataSize, blobRecordVersion,
            blobType, blobStreamSize}
           length_of_blob2_messageStoreRecoveryInfo
           blob2_messageStoreRecoveryInfo
           ....
           length_of_blobN_messageStoreRecoveryInfo
           blobN_messageStoreRecoveryInfo
           crc
           ---
         */
            CrcInputStream crcStream = new CrcInputStream(new FileInputStream(cleanupTokenFile));
            DataInputStream stream = new DataInputStream(crcStream);
            try {
                short version = stream.readShort();
                Assert.assertEquals(version, HardDeleter.Cleanup_Token_Version_V1);
                StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", mockClusterMap);
                FindTokenFactory factory = Utils.getObj("com.github.ambry.store.StoreFindTokenFactory", storeKeyFactory);
                factory.getFindToken(stream);
                endToken = (StoreFindToken) factory.getFindToken(stream);
                Offset endTokenOffset = endToken.getOffset();
                parsedTokenValue = endTokenOffset == null ? -1 : endTokenOffset.getOffset();
                boolean pauseFlag = stream.readByte() == (byte) 1;
                int num = stream.readInt();
                List<StoreKey> storeKeyList = new ArrayList<StoreKey>(num);
                for (int i = 0; i < num; i++) {
                    // Read BlobReadOptions
                    short blobReadOptionsVersion = stream.readShort();
                    switch(blobReadOptionsVersion) {
                        case 1:
                            Offset.fromBytes(stream);
                            stream.readLong();
                            stream.readLong();
                            StoreKey key = storeKeyFactory.getStoreKey(stream);
                            storeKeyList.add(key);
                            break;
                        default:
                            Assert.assertFalse(true);
                    }
                }
                for (int i = 0; i < num; i++) {
                    int length = stream.readInt();
                    short headerVersion = stream.readShort();
                    short userMetadataVersion = stream.readShort();
                    int userMetadataSize = stream.readInt();
                    short blobRecordVersion = stream.readShort();
                    if (blobRecordVersion == MessageFormatRecord.Blob_Version_V2) {
                        short blobType = stream.readShort();
                    }
                    long blobStreamSize = stream.readLong();
                    StoreKey key = storeKeyFactory.getStoreKey(stream);
                    Assert.assertTrue(storeKeyList.get(i).equals(key));
                }
                long crc = crcStream.getValue();
                Assert.assertEquals(crc, stream.readLong());
                Thread.sleep(1000);
            } finally {
                stream.close();
            }
        }
    } while (SystemTime.getInstance().milliseconds() < endTime && parsedTokenValue < expectedTokenValue);
    Assert.assertEquals(expectedTokenValue, parsedTokenValue);
}
Also used : ArrayList(java.util.ArrayList) StoreFindToken(com.github.ambry.store.StoreFindToken) DataInputStream(java.io.DataInputStream) FindTokenFactory(com.github.ambry.replication.FindTokenFactory) StoreKey(com.github.ambry.store.StoreKey) FileInputStream(java.io.FileInputStream) Offset(com.github.ambry.store.Offset) CrcInputStream(com.github.ambry.utils.CrcInputStream) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) File(java.io.File)

Example 8 with FindTokenFactory

use of com.github.ambry.replication.FindTokenFactory in project ambry by linkedin.

the class DumpReplicaTokenTool method dumpReplicaToken.

/**
 * Dumps replica token file
 * @throws Exception
 */
private void dumpReplicaToken() throws Exception {
    logger.info("Dumping replica token file {}", fileToRead);
    DataInputStream stream = new DataInputStream(new FileInputStream(fileToRead));
    short version = stream.readShort();
    switch(version) {
        case 0:
            int Crc_Size = 8;
            StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
            FindTokenFactory findTokenFactory = Utils.getObj("com.github.ambry.store.StoreFindTokenFactory", storeKeyFactory);
            while (stream.available() > Crc_Size) {
                // read partition id
                PartitionId partitionId = clusterMap.getPartitionIdFromStream(stream);
                // read remote node host name
                String hostname = Utils.readIntString(stream);
                // read remote replica path
                String replicaPath = Utils.readIntString(stream);
                // read remote port
                int port = stream.readInt();
                // read total bytes read from local store
                long totalBytesReadFromLocalStore = stream.readLong();
                // read replica token
                FindToken token = findTokenFactory.getFindToken(stream);
                logger.info("partitionId {} hostname {} replicaPath {} port {} totalBytesReadFromLocalStore {} token {}", partitionId, hostname, replicaPath, port, totalBytesReadFromLocalStore, token);
            }
            logger.info("crc {}", stream.readLong());
            break;
        default:
            logger.error("Version {} unsupported ", version);
    }
}
Also used : FindToken(com.github.ambry.replication.FindToken) DataInputStream(java.io.DataInputStream) PartitionId(com.github.ambry.clustermap.PartitionId) FindTokenFactory(com.github.ambry.replication.FindTokenFactory) FileInputStream(java.io.FileInputStream)

Aggregations

FindTokenFactory (com.github.ambry.replication.FindTokenFactory)8 PartitionId (com.github.ambry.clustermap.PartitionId)5 FindToken (com.github.ambry.replication.FindToken)5 DataInputStream (java.io.DataInputStream)4 FileInputStream (java.io.FileInputStream)4 ReplicaType (com.github.ambry.clustermap.ReplicaType)3 CrcInputStream (com.github.ambry.utils.CrcInputStream)3 ArrayList (java.util.ArrayList)3 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)2 ReplicaId (com.github.ambry.clustermap.ReplicaId)2 Offset (com.github.ambry.store.Offset)2 StoreFindToken (com.github.ambry.store.StoreFindToken)2 StoreKeyFactory (com.github.ambry.store.StoreKeyFactory)2 File (java.io.File)2 MetricRegistry (com.codahale.metrics.MetricRegistry)1 CloudBlobMetadata (com.github.ambry.cloud.CloudBlobMetadata)1 FindResult (com.github.ambry.cloud.FindResult)1 CloudReplica (com.github.ambry.clustermap.CloudReplica)1 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)1 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)1