Search in sources :

Example 1 with Offset

use of com.github.ambry.store.Offset in project ambry by linkedin.

the class ServerTestUtil method checkReplicaTokens.

/**
 * Repeatedly check the replication token file until a certain offset value on all nodes on a certain
 * partition is found.  Fail if {@code numTries} is exceeded or a token offset larger than the target
 * is found.
 * @param clusterMap the cluster map that contains the data node to inspect
 * @param dataNodeId the data node to inspect
 * @param targetOffset the token offset to look for in the {@code targetPartition}
 * @param targetPartition the name of the partition to look for the {@code targetOffset}
 * @throws Exception
 */
private static void checkReplicaTokens(MockClusterMap clusterMap, DataNodeId dataNodeId, long targetOffset, String targetPartition) throws Exception {
    List<String> mountPaths = ((MockDataNodeId) dataNodeId).getMountPaths();
    // we should have an entry for each partition - remote replica pair
    Set<String> completeSetToCheck = new HashSet<>();
    List<ReplicaId> replicaIds = clusterMap.getReplicaIds(dataNodeId);
    int numRemoteNodes = 0;
    for (ReplicaId replicaId : replicaIds) {
        List<? extends ReplicaId> peerReplicas = replicaId.getPeerReplicaIds();
        if (replicaId.getPartitionId().isEqual(targetPartition)) {
            numRemoteNodes = peerReplicas.size();
        }
        for (ReplicaId peerReplica : peerReplicas) {
            completeSetToCheck.add(replicaId.getPartitionId().toString() + peerReplica.getDataNodeId().getHostname() + peerReplica.getDataNodeId().getPort());
        }
    }
    StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
    FindTokenFactory factory = Utils.getObj("com.github.ambry.store.StoreFindTokenFactory", storeKeyFactory);
    int numTries = 4;
    boolean foundTarget = false;
    while (!foundTarget && numTries > 0) {
        Thread.sleep(5000);
        numTries--;
        Set<String> setToCheck = new HashSet<String>(completeSetToCheck);
        int numFound = 0;
        for (String mountPath : mountPaths) {
            File replicaTokenFile = new File(mountPath, "replicaTokens");
            if (replicaTokenFile.exists()) {
                CrcInputStream crcStream = new CrcInputStream(new FileInputStream(replicaTokenFile));
                DataInputStream dataInputStream = new DataInputStream(crcStream);
                try {
                    short version = dataInputStream.readShort();
                    assertEquals(1, version);
                    while (dataInputStream.available() > 8) {
                        // read partition id
                        PartitionId partitionId = clusterMap.getPartitionIdFromStream(dataInputStream);
                        // read remote node host name
                        String hostname = Utils.readIntString(dataInputStream);
                        // read remote replica path
                        Utils.readIntString(dataInputStream);
                        // read remote port
                        int port = dataInputStream.readInt();
                        assertTrue(setToCheck.contains(partitionId.toString() + hostname + port));
                        setToCheck.remove(partitionId.toString() + hostname + port);
                        // read total bytes read from local store
                        dataInputStream.readLong();
                        // read replica type
                        ReplicaType replicaType = ReplicaType.values()[dataInputStream.readShort()];
                        // read replica token
                        StoreFindToken token = (StoreFindToken) factory.getFindToken(dataInputStream);
                        System.out.println("partitionId " + partitionId + " hostname " + hostname + " port " + port + " token " + token);
                        Offset endTokenOffset = token.getOffset();
                        long parsedToken = endTokenOffset == null ? -1 : endTokenOffset.getOffset();
                        System.out.println("The parsed token is " + parsedToken);
                        if (partitionId.isEqual(targetPartition)) {
                            assertFalse("Parsed offset: " + parsedToken + " must not be larger than target value: " + targetOffset, parsedToken > targetOffset);
                            if (parsedToken == targetOffset) {
                                numFound++;
                            }
                        } else {
                            assertEquals("Tokens should remain at -1 offsets on unmodified partitions", -1, parsedToken);
                        }
                    }
                    long crc = crcStream.getValue();
                    assertEquals(crc, dataInputStream.readLong());
                } catch (IOException e) {
                    fail();
                } finally {
                    dataInputStream.close();
                }
            }
        }
        if (numFound == numRemoteNodes) {
            foundTarget = true;
        }
    }
    if (!foundTarget) {
        fail("Could not find target token offset: " + targetOffset);
    }
}
Also used : StoreFindToken(com.github.ambry.store.StoreFindToken) IOException(java.io.IOException) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) DataInputStream(java.io.DataInputStream) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) FindTokenFactory(com.github.ambry.replication.FindTokenFactory) ReplicaId(com.github.ambry.clustermap.ReplicaId) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) FileInputStream(java.io.FileInputStream) Offset(com.github.ambry.store.Offset) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) CrcInputStream(com.github.ambry.utils.CrcInputStream) ReplicaType(com.github.ambry.clustermap.ReplicaType) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) File(java.io.File) HashSet(java.util.HashSet)

Example 2 with Offset

use of com.github.ambry.store.Offset in project ambry by linkedin.

the class ServerHardDeleteTest method ensureCleanupTokenCatchesUp.

/**
 * Waits and ensures that the hard delete cleanup token catches up to the expected token value.
 * @param path the path to the cleanup token.
 * @param mockClusterMap the {@link MockClusterMap} being used for the cluster.
 * @param expectedTokenValue the expected value that the cleanup token should contain. Until this value is reached,
 *                           the method will keep reopening the file and read the value or until a predefined
 *                           timeout is reached.
 * @throws Exception if there were any I/O errors or the sleep gets interrupted.
 */
void ensureCleanupTokenCatchesUp(String path, MockClusterMap mockClusterMap, long expectedTokenValue) throws Exception {
    final int TIMEOUT = 10000;
    File cleanupTokenFile = new File(path, "cleanuptoken");
    StoreFindToken endToken;
    long parsedTokenValue = -1;
    long endTime = SystemTime.getInstance().milliseconds() + TIMEOUT;
    do {
        if (cleanupTokenFile.exists()) {
            /* The cleanup token format is as follows:
           --
           token_version
           startTokenForRecovery
           endTokenForRecovery
           numBlobsInRange
           pause flag
           --
           blob1_blobReadOptions {version, offset, sz, ttl, key}
           blob2_blobReadOptions
           ....
           blobN_blobReadOptions
           --
           length_of_blob1_messageStoreRecoveryInfo
           blob1_messageStoreRecoveryInfo {headerVersion, userMetadataVersion, userMetadataSize, blobRecordVersion,
            blobType, blobStreamSize}
           length_of_blob2_messageStoreRecoveryInfo
           blob2_messageStoreRecoveryInfo
           ....
           length_of_blobN_messageStoreRecoveryInfo
           blobN_messageStoreRecoveryInfo
           crc
           ---
         */
            CrcInputStream crcStream = new CrcInputStream(new FileInputStream(cleanupTokenFile));
            DataInputStream stream = new DataInputStream(crcStream);
            try {
                short version = stream.readShort();
                Assert.assertEquals(version, HardDeleter.Cleanup_Token_Version_V1);
                StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", mockClusterMap);
                FindTokenFactory factory = Utils.getObj("com.github.ambry.store.StoreFindTokenFactory", storeKeyFactory);
                factory.getFindToken(stream);
                endToken = (StoreFindToken) factory.getFindToken(stream);
                Offset endTokenOffset = endToken.getOffset();
                parsedTokenValue = endTokenOffset == null ? -1 : endTokenOffset.getOffset();
                boolean pauseFlag = stream.readByte() == (byte) 1;
                int num = stream.readInt();
                List<StoreKey> storeKeyList = new ArrayList<StoreKey>(num);
                for (int i = 0; i < num; i++) {
                    // Read BlobReadOptions
                    short blobReadOptionsVersion = stream.readShort();
                    switch(blobReadOptionsVersion) {
                        case 1:
                            Offset.fromBytes(stream);
                            stream.readLong();
                            stream.readLong();
                            StoreKey key = storeKeyFactory.getStoreKey(stream);
                            storeKeyList.add(key);
                            break;
                        default:
                            Assert.assertFalse(true);
                    }
                }
                for (int i = 0; i < num; i++) {
                    int length = stream.readInt();
                    short headerVersion = stream.readShort();
                    short userMetadataVersion = stream.readShort();
                    int userMetadataSize = stream.readInt();
                    short blobRecordVersion = stream.readShort();
                    if (blobRecordVersion == MessageFormatRecord.Blob_Version_V2) {
                        short blobType = stream.readShort();
                    }
                    long blobStreamSize = stream.readLong();
                    StoreKey key = storeKeyFactory.getStoreKey(stream);
                    Assert.assertTrue(storeKeyList.get(i).equals(key));
                }
                long crc = crcStream.getValue();
                Assert.assertEquals(crc, stream.readLong());
                Thread.sleep(1000);
            } finally {
                stream.close();
            }
        }
    } while (SystemTime.getInstance().milliseconds() < endTime && parsedTokenValue < expectedTokenValue);
    Assert.assertEquals(expectedTokenValue, parsedTokenValue);
}
Also used : ArrayList(java.util.ArrayList) StoreFindToken(com.github.ambry.store.StoreFindToken) DataInputStream(java.io.DataInputStream) FindTokenFactory(com.github.ambry.replication.FindTokenFactory) StoreKey(com.github.ambry.store.StoreKey) FileInputStream(java.io.FileInputStream) Offset(com.github.ambry.store.Offset) CrcInputStream(com.github.ambry.utils.CrcInputStream) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) File(java.io.File)

Aggregations

FindTokenFactory (com.github.ambry.replication.FindTokenFactory)2 Offset (com.github.ambry.store.Offset)2 StoreFindToken (com.github.ambry.store.StoreFindToken)2 StoreKeyFactory (com.github.ambry.store.StoreKeyFactory)2 CrcInputStream (com.github.ambry.utils.CrcInputStream)2 DataInputStream (java.io.DataInputStream)2 File (java.io.File)2 FileInputStream (java.io.FileInputStream)2 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)1 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)1 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)1 PartitionId (com.github.ambry.clustermap.PartitionId)1 ReplicaId (com.github.ambry.clustermap.ReplicaId)1 ReplicaType (com.github.ambry.clustermap.ReplicaType)1 StoreKey (com.github.ambry.store.StoreKey)1 NettyByteBufDataInputStream (com.github.ambry.utils.NettyByteBufDataInputStream)1 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 HashSet (java.util.HashSet)1