Search in sources :

Example 1 with FindTokenFactory

use of com.github.ambry.replication.FindTokenFactory in project ambry by linkedin.

the class HardDeleteVerifier method getOffsetFromCleanupToken.

private long getOffsetFromCleanupToken(File cleanupTokenFile) throws Exception {
    long parsedTokenValue = -1;
    if (cleanupTokenFile.exists()) {
        CrcInputStream crcStream = new CrcInputStream(new FileInputStream(cleanupTokenFile));
        DataInputStream stream = new DataInputStream(crcStream);
        try {
            // The format of the cleanup token is documented in PersistentIndex.persistCleanupToken()
            short version = stream.readShort();
            if (version != HARD_DELETE_TOKEN_V0) {
                throw new IllegalStateException("Unknown version encountered while parsing cleanup token");
            }
            StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", map);
            FindTokenFactory factory = Utils.getObj("com.github.ambry.store.StoreFindTokenFactory", storeKeyFactory);
            FindToken startToken = factory.getFindToken(stream);
            // read past the end token.
            factory.getFindToken(stream);
            ByteBuffer bytebufferToken = ByteBuffer.wrap(startToken.toBytes());
            short tokenVersion = bytebufferToken.getShort();
            if (tokenVersion != 0) {
                throw new IllegalArgumentException("token version: " + tokenVersion + " is unknown");
            }
            int sessionIdsize = bytebufferToken.getInt();
            bytebufferToken.position(bytebufferToken.position() + sessionIdsize);
            parsedTokenValue = bytebufferToken.getLong();
            if (parsedTokenValue == -1) {
                /* Index based token, get index start offset */
                parsedTokenValue = bytebufferToken.getLong();
            }
            /* Just read the remaining fields and verify that the crc matches. We don't really need the fields for this
           test */
            int num = stream.readInt();
            List<StoreKey> storeKeyList = new ArrayList<StoreKey>(num);
            for (int i = 0; i < num; i++) {
                // Read BlobReadOptions
                short blobReadOptionsVersion = stream.readShort();
                if (blobReadOptionsVersion != 0) {
                    throw new IllegalStateException("Unknown blobReadOptionsVersion: " + blobReadOptionsVersion);
                }
                long offset = stream.readLong();
                long sz = stream.readLong();
                long ttl = stream.readLong();
                StoreKey key = storeKeyFactory.getStoreKey(stream);
                storeKeyList.add(key);
            }
            for (int i = 0; i < num; i++) {
                int length = stream.readInt();
                short headerVersion = stream.readShort();
                short userMetadataVersion = stream.readShort();
                int userMetadataSize = stream.readInt();
                short blobRecordVersion = stream.readShort();
                long blobStreamSize = stream.readLong();
                StoreKey key = storeKeyFactory.getStoreKey(stream);
                if (!storeKeyList.get(i).equals(key)) {
                    throw new IllegalStateException("Parsed key mismatch");
                }
            }
            long crc = crcStream.getValue();
            if (crc != stream.readLong()) {
                throw new IllegalStateException("Crc mismatch while reading cleanup token");
            }
        } finally {
            stream.close();
        }
    } else {
        throw new IllegalStateException("No cleanup token");
    }
    return parsedTokenValue;
}
Also used : ArrayList(java.util.ArrayList) DataInputStream(java.io.DataInputStream) FindTokenFactory(com.github.ambry.replication.FindTokenFactory) ByteBuffer(java.nio.ByteBuffer) FileInputStream(java.io.FileInputStream) CrcInputStream(com.github.ambry.utils.CrcInputStream) FindToken(com.github.ambry.replication.FindToken)

Example 2 with FindTokenFactory

use of com.github.ambry.replication.FindTokenFactory in project ambry by linkedin.

the class ReplicaMetadataRequestInfo method readFrom.

public static ReplicaMetadataRequestInfo readFrom(DataInputStream stream, ClusterMap clusterMap, FindTokenHelper findTokenHelper, short requestVersion) throws IOException {
    String hostName = Utils.readIntString(stream);
    String replicaPath = Utils.readIntString(stream);
    ReplicaType replicaType;
    if (requestVersion == ReplicaMetadataRequest.Replica_Metadata_Request_Version_V2) {
        replicaType = ReplicaType.values()[stream.readShort()];
    } else {
        // before version 2 we only have disk based replicas
        replicaType = ReplicaType.DISK_BACKED;
    }
    PartitionId partitionId = clusterMap.getPartitionIdFromStream(stream);
    FindTokenFactory findTokenFactory = findTokenHelper.getFindTokenFactoryFromReplicaType(replicaType);
    FindToken token = findTokenFactory.getFindToken(stream);
    return new ReplicaMetadataRequestInfo(partitionId, token, hostName, replicaPath, replicaType, requestVersion);
}
Also used : ReplicaType(com.github.ambry.clustermap.ReplicaType) FindToken(com.github.ambry.replication.FindToken) PartitionId(com.github.ambry.clustermap.PartitionId) FindTokenFactory(com.github.ambry.replication.FindTokenFactory)

Example 3 with FindTokenFactory

use of com.github.ambry.replication.FindTokenFactory in project ambry by linkedin.

the class ReplicaMetadataResponseInfo method readFrom.

public static ReplicaMetadataResponseInfo readFrom(DataInputStream stream, FindTokenHelper helper, ClusterMap clusterMap, short replicaMetadataResponseVersion) throws IOException {
    PartitionId partitionId = clusterMap.getPartitionIdFromStream(stream);
    ReplicaType replicaType;
    if (replicaMetadataResponseVersion == ReplicaMetadataResponse.REPLICA_METADATA_RESPONSE_VERSION_V_6) {
        replicaType = ReplicaType.values()[stream.readShort()];
    } else {
        // before REPLICA_METADATA_RESPONSE_VERSION_V_6 there were only disk based replicas
        replicaType = ReplicaType.DISK_BACKED;
    }
    ServerErrorCode error = ServerErrorCode.values()[stream.readShort()];
    if (error != ServerErrorCode.No_Error) {
        return new ReplicaMetadataResponseInfo(partitionId, replicaType, error, replicaMetadataResponseVersion);
    } else {
        FindTokenFactory findTokenFactory = helper.getFindTokenFactoryFromReplicaType(replicaType);
        FindToken token = findTokenFactory.getFindToken(stream);
        MessageInfoAndMetadataListSerde messageInfoAndMetadataList = MessageInfoAndMetadataListSerde.deserializeMessageInfoAndMetadataList(stream, clusterMap, getMessageInfoAndMetadataListSerDeVersion(replicaMetadataResponseVersion));
        long remoteReplicaLag = stream.readLong();
        return new ReplicaMetadataResponseInfo(partitionId, replicaType, token, messageInfoAndMetadataList.getMessageInfoList(), remoteReplicaLag, replicaMetadataResponseVersion);
    }
}
Also used : ReplicaType(com.github.ambry.clustermap.ReplicaType) FindToken(com.github.ambry.replication.FindToken) PartitionId(com.github.ambry.clustermap.PartitionId) FindTokenFactory(com.github.ambry.replication.FindTokenFactory) ServerErrorCode(com.github.ambry.server.ServerErrorCode)

Example 4 with FindTokenFactory

use of com.github.ambry.replication.FindTokenFactory in project ambry by linkedin.

the class ServerTestUtil method checkReplicaTokens.

/**
 * Repeatedly check the replication token file until a certain offset value on all nodes on a certain
 * partition is found.  Fail if {@code numTries} is exceeded or a token offset larger than the target
 * is found.
 * @param clusterMap the cluster map that contains the data node to inspect
 * @param dataNodeId the data node to inspect
 * @param targetOffset the token offset to look for in the {@code targetPartition}
 * @param targetPartition the name of the partition to look for the {@code targetOffset}
 * @throws Exception
 */
private static void checkReplicaTokens(MockClusterMap clusterMap, DataNodeId dataNodeId, long targetOffset, String targetPartition) throws Exception {
    List<String> mountPaths = ((MockDataNodeId) dataNodeId).getMountPaths();
    // we should have an entry for each partition - remote replica pair
    Set<String> completeSetToCheck = new HashSet<>();
    List<ReplicaId> replicaIds = clusterMap.getReplicaIds(dataNodeId);
    int numRemoteNodes = 0;
    for (ReplicaId replicaId : replicaIds) {
        List<? extends ReplicaId> peerReplicas = replicaId.getPeerReplicaIds();
        if (replicaId.getPartitionId().isEqual(targetPartition)) {
            numRemoteNodes = peerReplicas.size();
        }
        for (ReplicaId peerReplica : peerReplicas) {
            completeSetToCheck.add(replicaId.getPartitionId().toString() + peerReplica.getDataNodeId().getHostname() + peerReplica.getDataNodeId().getPort());
        }
    }
    StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
    FindTokenFactory factory = Utils.getObj("com.github.ambry.store.StoreFindTokenFactory", storeKeyFactory);
    int numTries = 4;
    boolean foundTarget = false;
    while (!foundTarget && numTries > 0) {
        Thread.sleep(5000);
        numTries--;
        Set<String> setToCheck = new HashSet<String>(completeSetToCheck);
        int numFound = 0;
        for (String mountPath : mountPaths) {
            File replicaTokenFile = new File(mountPath, "replicaTokens");
            if (replicaTokenFile.exists()) {
                CrcInputStream crcStream = new CrcInputStream(new FileInputStream(replicaTokenFile));
                DataInputStream dataInputStream = new DataInputStream(crcStream);
                try {
                    short version = dataInputStream.readShort();
                    assertEquals(1, version);
                    while (dataInputStream.available() > 8) {
                        // read partition id
                        PartitionId partitionId = clusterMap.getPartitionIdFromStream(dataInputStream);
                        // read remote node host name
                        String hostname = Utils.readIntString(dataInputStream);
                        // read remote replica path
                        Utils.readIntString(dataInputStream);
                        // read remote port
                        int port = dataInputStream.readInt();
                        assertTrue(setToCheck.contains(partitionId.toString() + hostname + port));
                        setToCheck.remove(partitionId.toString() + hostname + port);
                        // read total bytes read from local store
                        dataInputStream.readLong();
                        // read replica type
                        ReplicaType replicaType = ReplicaType.values()[dataInputStream.readShort()];
                        // read replica token
                        StoreFindToken token = (StoreFindToken) factory.getFindToken(dataInputStream);
                        System.out.println("partitionId " + partitionId + " hostname " + hostname + " port " + port + " token " + token);
                        Offset endTokenOffset = token.getOffset();
                        long parsedToken = endTokenOffset == null ? -1 : endTokenOffset.getOffset();
                        System.out.println("The parsed token is " + parsedToken);
                        if (partitionId.isEqual(targetPartition)) {
                            assertFalse("Parsed offset: " + parsedToken + " must not be larger than target value: " + targetOffset, parsedToken > targetOffset);
                            if (parsedToken == targetOffset) {
                                numFound++;
                            }
                        } else {
                            assertEquals("Tokens should remain at -1 offsets on unmodified partitions", -1, parsedToken);
                        }
                    }
                    long crc = crcStream.getValue();
                    assertEquals(crc, dataInputStream.readLong());
                } catch (IOException e) {
                    fail();
                } finally {
                    dataInputStream.close();
                }
            }
        }
        if (numFound == numRemoteNodes) {
            foundTarget = true;
        }
    }
    if (!foundTarget) {
        fail("Could not find target token offset: " + targetOffset);
    }
}
Also used : StoreFindToken(com.github.ambry.store.StoreFindToken) IOException(java.io.IOException) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) DataInputStream(java.io.DataInputStream) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) FindTokenFactory(com.github.ambry.replication.FindTokenFactory) ReplicaId(com.github.ambry.clustermap.ReplicaId) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) FileInputStream(java.io.FileInputStream) Offset(com.github.ambry.store.Offset) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) CrcInputStream(com.github.ambry.utils.CrcInputStream) ReplicaType(com.github.ambry.clustermap.ReplicaType) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) File(java.io.File) HashSet(java.util.HashSet)

Example 5 with FindTokenFactory

use of com.github.ambry.replication.FindTokenFactory in project ambry by linkedin.

the class VcrReplicationManager method addReplica.

/**
 * Add a replica of given {@link PartitionId} and its {@link RemoteReplicaInfo}s to backup list.
 * @param partitionId the {@link PartitionId} of the replica to add.
 * @throws ReplicationException if replicas initialization failed.
 */
void addReplica(PartitionId partitionId) throws ReplicationException {
    if (partitionToPartitionInfo.containsKey(partitionId)) {
        throw new ReplicationException("Partition " + partitionId + " already exists on " + dataNodeId);
    }
    ReplicaId cloudReplica = new CloudReplica(partitionId, vcrClusterParticipant.getCurrentDataNodeId());
    if (!storeManager.addBlobStore(cloudReplica)) {
        logger.error("Can't start cloudstore for replica {}", cloudReplica);
        throw new ReplicationException("Can't start cloudstore for replica " + cloudReplica);
    }
    List<? extends ReplicaId> peerReplicas = cloudReplica.getPeerReplicaIds();
    List<RemoteReplicaInfo> remoteReplicaInfos = new ArrayList<>();
    Store store = storeManager.getStore(partitionId);
    if (peerReplicas != null) {
        for (ReplicaId peerReplica : peerReplicas) {
            if (!shouldReplicateFromDc(peerReplica.getDataNodeId().getDatacenterName())) {
                continue;
            }
            // We need to ensure that a replica token gets persisted only after the corresponding data in the
            // store gets flushed to cloud. We use the store flush interval multiplied by a constant factor
            // to determine the token flush interval
            FindTokenFactory findTokenFactory = tokenHelper.getFindTokenFactoryFromReplicaType(peerReplica.getReplicaType());
            RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(peerReplica, cloudReplica, store, findTokenFactory.getNewFindToken(), storeConfig.storeDataFlushIntervalSeconds * SystemTime.MsPerSec * Replication_Delay_Multiplier, SystemTime.getInstance(), peerReplica.getDataNodeId().getPortToConnectTo());
            replicationMetrics.addMetricsForRemoteReplicaInfo(remoteReplicaInfo, trackPerDatacenterLagInMetric);
            remoteReplicaInfos.add(remoteReplicaInfo);
        }
        rwLock.writeLock().lock();
        try {
            updatePartitionInfoMaps(remoteReplicaInfos, cloudReplica);
            partitionStoreMap.put(partitionId.toPathString(), store);
            // Reload replication token if exist.
            int tokenReloadFailCount = reloadReplicationTokenIfExists(cloudReplica, remoteReplicaInfos);
            vcrMetrics.tokenReloadWarnCount.inc(tokenReloadFailCount);
            // Add remoteReplicaInfos to {@link ReplicaThread}.
            addRemoteReplicaInfoToReplicaThread(remoteReplicaInfos, true);
            if (replicationConfig.replicationTrackPerPartitionLagFromRemote) {
                replicationMetrics.addLagMetricForPartition(partitionId, true);
            }
        } finally {
            rwLock.writeLock().unlock();
        }
    } else {
        try {
            storeManager.shutdownBlobStore(partitionId);
            storeManager.removeBlobStore(partitionId);
        } finally {
            throw new ReplicationException("Failed to add Partition " + partitionId + " on " + dataNodeId + " , because no peer replicas found.");
        }
    }
}
Also used : CloudReplica(com.github.ambry.clustermap.CloudReplica) RemoteReplicaInfo(com.github.ambry.replication.RemoteReplicaInfo) ArrayList(java.util.ArrayList) Store(com.github.ambry.store.Store) ReplicationException(com.github.ambry.replication.ReplicationException) FindTokenFactory(com.github.ambry.replication.FindTokenFactory) ReplicaId(com.github.ambry.clustermap.ReplicaId)

Aggregations

FindTokenFactory (com.github.ambry.replication.FindTokenFactory)8 PartitionId (com.github.ambry.clustermap.PartitionId)5 FindToken (com.github.ambry.replication.FindToken)5 DataInputStream (java.io.DataInputStream)4 FileInputStream (java.io.FileInputStream)4 ReplicaType (com.github.ambry.clustermap.ReplicaType)3 CrcInputStream (com.github.ambry.utils.CrcInputStream)3 ArrayList (java.util.ArrayList)3 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)2 ReplicaId (com.github.ambry.clustermap.ReplicaId)2 Offset (com.github.ambry.store.Offset)2 StoreFindToken (com.github.ambry.store.StoreFindToken)2 StoreKeyFactory (com.github.ambry.store.StoreKeyFactory)2 File (java.io.File)2 MetricRegistry (com.codahale.metrics.MetricRegistry)1 CloudBlobMetadata (com.github.ambry.cloud.CloudBlobMetadata)1 FindResult (com.github.ambry.cloud.FindResult)1 CloudReplica (com.github.ambry.clustermap.CloudReplica)1 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)1 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)1