Search in sources :

Example 11 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class ReplicationTest method replicaThreadLifeVersionLocalLessThanRemote_FinalState_Delete.

/**
 * Tests when the lifeVersion in local is less than the lifeVersion in remote and the final state from remote
 * is delete.
 * @throws Exception
 */
@Test
public void replicaThreadLifeVersionLocalLessThanRemote_FinalState_Delete() throws Exception {
    MockClusterMap clusterMap = new MockClusterMap();
    Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
    MockHost localHost = localAndRemoteHosts.getFirst();
    MockHost remoteHost = localAndRemoteHosts.getSecond();
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    storeKeyConverterFactory.setReturnInputIfAbsent(true);
    MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
    Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
    storeKeyConverter.setConversionMap(conversionMap);
    StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
    Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
    Map<PartitionId, List<StoreKey>> idsByPartition = new HashMap<>();
    Map<PartitionId, StoreKey> idsToBeIgnoredByPartition = new HashMap<>();
    List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
    // 1 missing 2 Delete 3 Put(w/ or w/o ttl update)
    for (int i = 0; i < partitionIds.size(); i++) {
        PartitionId partitionId = partitionIds.get(i);
        List<StoreKey> ids = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), 1);
        // Adding a Put and Delete to remote but nothing in local
        StoreKey id = ids.get(0);
        addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
        idsToBeIgnoredByPartition.put(partitionId, id);
        // Adding one Delete to remote and add delete to local but with lower lifeVersion
        id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
        ids.add(id);
        addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, EXPIRY_TIME_MS);
        addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
        // Adding one Put and Delete to remote and add the same put to local host
        id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
        ids.add(id);
        addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
        // Adding one Put and Delete to remote and add same Put and a TtlUpdate to local host
        id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
        ids.add(id);
        addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
        addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
        // Adding one Put and Delete to remote and add same Put and a Delete and Undelete to local.
        id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
        ids.add(id);
        addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, EXPIRY_TIME_MS);
        addUndeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1);
        addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 2, EXPIRY_TIME_MS);
        ids.add(id);
        idsByPartition.put(partitionId, ids);
    }
    int batchSize = 100;
    Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
    List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
    ReplicaThread replicaThread = replicasAndThread.getSecond();
    // It's all deletes, there is no missing key.
    List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
    assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
    for (int i = 0; i < response.size(); i++) {
        assertEquals(0, response.get(i).missingStoreMessages.size());
        remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
    }
    // Before exchange metadata, the number of message infos in local host is 7. Exchange metadata would add another 4(all deletes).
    for (Map.Entry<PartitionId, List<MessageInfo>> localInfoEntry : localHost.infosByPartition.entrySet()) {
        assertEquals("MessageInfo number mismatch", 11, localInfoEntry.getValue().size());
    }
    for (Map.Entry<PartitionId, List<StoreKey>> idsEntry : idsByPartition.entrySet()) {
        List<MessageInfo> remoteInfos = remoteHost.infosByPartition.get(idsEntry.getKey());
        List<MessageInfo> localInfos = localHost.infosByPartition.get(idsEntry.getKey());
        for (StoreKey id : idsEntry.getValue()) {
            if (!idsToBeIgnoredByPartition.get(idsEntry.getKey()).equals(id)) {
                MessageInfo localInfo = getMergedMessageInfo(id, localInfos);
                MessageInfo remoteInfo = getMergedMessageInfo(id, remoteInfos);
                assertTrue(localInfo.isDeleted());
                assertTrue(remoteInfo.isDeleted());
                assertEquals(localInfo.getLifeVersion(), remoteInfo.getLifeVersion());
            }
        }
    }
}
Also used : ValidatingTransformer(com.github.ambry.messageformat.ValidatingTransformer) Transformer(com.github.ambry.store.Transformer) HashMap(java.util.HashMap) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) List(java.util.List) ArrayList(java.util.ArrayList) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) StoreKey(com.github.ambry.store.StoreKey) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) MessageInfo(com.github.ambry.store.MessageInfo) Map(java.util.Map) HashMap(java.util.HashMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 12 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class ReplicationTest method replicaThreadSleepTest.

@Test
public void replicaThreadSleepTest() throws Exception {
    MockClusterMap clusterMap = new MockClusterMap();
    Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
    MockHost localHost = localAndRemoteHosts.getFirst();
    MockHost remoteHost = localAndRemoteHosts.getSecond();
    long expectedThrottleDurationMs = localHost.dataNodeId.getDatacenterName().equals(remoteHost.dataNodeId.getDatacenterName()) ? replicationConfig.replicationIntraReplicaThreadThrottleSleepDurationMs : replicationConfig.replicationInterReplicaThreadThrottleSleepDurationMs;
    MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
    storeKeyConverterFactory.setConversionMap(new HashMap<>());
    storeKeyConverterFactory.setReturnInputIfAbsent(true);
    MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
    StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
    Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
    int batchSize = 4;
    Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
    Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = replicasAndThread.getFirst();
    ReplicaThread replicaThread = replicasAndThread.getSecond();
    // populate data, add 1 messages to both hosts.
    for (PartitionId partitionId : clusterMap.getAllPartitionIds(null)) {
        addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(localHost, remoteHost), 1);
    }
    // tests to verify replica thread throttling and idling functions in the following steps:
    // 1. all replicas are in sync, thread level sleep and replica quarantine are both enabled.
    // 2. add put messages to some replica and verify that replication for replicas remain disabled.
    // 3. forward the time so replication for replicas are re-enabled and check replication resumes.
    // 4. add more put messages to ensure replication happens continuously when needed and is throttled appropriately.
    // 1. verify that the replica thread sleeps and replicas are temporarily disable when all replicas are synced.
    List<List<RemoteReplicaInfo>> replicasToReplicateList = new ArrayList<>(replicasToReplicate.values());
    // replicate is called and time is moved forward to prepare the replicas for testing.
    replicaThread.replicate();
    time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs + 1);
    long currentTimeMs = time.milliseconds();
    replicaThread.replicate();
    for (List<RemoteReplicaInfo> replicaInfos : replicasToReplicateList) {
        for (RemoteReplicaInfo replicaInfo : replicaInfos) {
            assertEquals("Unexpected re-enable replication time", currentTimeMs + replicationConfig.replicationSyncedReplicaBackoffDurationMs, replicaInfo.getReEnableReplicationTime());
        }
    }
    currentTimeMs = time.milliseconds();
    replicaThread.replicate();
    assertEquals("Replicas are in sync, replica thread should sleep by replication.thread.idle.sleep.duration.ms", currentTimeMs + replicationConfig.replicationReplicaThreadIdleSleepDurationMs, time.milliseconds());
    // 2. add 3 messages to a partition in the remote host only and verify replication for all replicas should be disabled.
    PartitionId partitionId = clusterMap.getWritablePartitionIds(null).get(0);
    addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 3);
    int[] missingKeys = new int[replicasToReplicate.get(remoteHost.dataNodeId).size()];
    for (int i = 0; i < missingKeys.length; i++) {
        missingKeys[i] = replicasToReplicate.get(remoteHost.dataNodeId).get(i).getReplicaId().getPartitionId().isEqual(partitionId.toPathString()) ? 3 : 0;
    }
    currentTimeMs = time.milliseconds();
    replicaThread.replicate();
    assertEquals("Replication for all replicas should be disabled and the thread should sleep", currentTimeMs + replicationConfig.replicationReplicaThreadIdleSleepDurationMs, time.milliseconds());
    assertMissingKeys(missingKeys, batchSize, replicaThread, remoteHost, replicasToReplicate);
    // 3. forward the time and run replicate and verify the replication.
    time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs);
    replicaThread.replicate();
    missingKeys = new int[replicasToReplicate.get(remoteHost.dataNodeId).size()];
    assertMissingKeys(missingKeys, batchSize, replicaThread, remoteHost, replicasToReplicate);
    // Since, now we moved setting of remoteReplicaInfo::setReEnableReplicationTime inside replicaThread::exchangeMetaData and
    // above assertMissingKeys() does exchangeMetadata() for replicas up to date, each replica will have
    // ReEnableReplicationTime set by replicationSyncedReplicaBackoffDurationMs. Forward the time here.
    time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs);
    // 4. add more put messages and verify that replication continues and is throttled appropriately.
    addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(localHost, remoteHost), 3);
    currentTimeMs = time.milliseconds();
    replicaThread.replicate();
    assertEquals("Replica thread should sleep exactly " + expectedThrottleDurationMs + " since remote has new token", currentTimeMs + expectedThrottleDurationMs, time.milliseconds());
    assertMissingKeys(missingKeys, batchSize, replicaThread, remoteHost, replicasToReplicate);
    // Since, now we moved setting of remoteReplicaInfo::setReEnableReplicationTime inside replicaThread::exchangeMetaData and
    // above assertMissingKeys() does exchangeMetadata() for replicas up to date, each replica will have
    // ReEnableReplicationTime set by replicationSyncedReplicaBackoffDurationMs. Forward the time here.
    time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs);
    // verify that throttling on the replica thread is disabled when relevant configs are 0.
    Properties properties = new Properties();
    properties.setProperty("replication.intra.replica.thread.throttle.sleep.duration.ms", "0");
    properties.setProperty("replication.inter.replica.thread.throttle.sleep.duration.ms", "0");
    replicationConfig = new ReplicationConfig(new VerifiableProperties(properties));
    replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
    replicaThread = replicasAndThread.getSecond();
    currentTimeMs = time.milliseconds();
    replicaThread.replicate();
    assertEquals("Replica thread should not sleep when throttling is disabled and replicas are out of sync", currentTimeMs, time.milliseconds());
}
Also used : ValidatingTransformer(com.github.ambry.messageformat.ValidatingTransformer) Transformer(com.github.ambry.store.Transformer) ArrayList(java.util.ArrayList) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) StoreKeyFactory(com.github.ambry.store.StoreKeyFactory) List(java.util.List) ArrayList(java.util.ArrayList) MockStoreKeyConverterFactory(com.github.ambry.store.MockStoreKeyConverterFactory) ReplicationConfig(com.github.ambry.config.ReplicationConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) Map(java.util.Map) HashMap(java.util.HashMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 13 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class ConsistencyCheckerTool method main.

public static void main(String[] args) throws Exception {
    VerifiableProperties properties = ToolUtils.getVerifiableProperties(args);
    ConsistencyCheckerToolConfig config = new ConsistencyCheckerToolConfig(properties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
    ServerConfig serverConfig = new ServerConfig(properties);
    try (ClusterMap clusterMap = new StaticClusterAgentsFactory(clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath).getClusterMap()) {
        StoreToolsMetrics metrics = new StoreToolsMetrics(clusterMap.getMetricRegistry());
        StoreConfig storeConfig = new StoreConfig(properties);
        // this tool supports only blob IDs. It can become generic if StoreKeyFactory provides a deserFromString method.
        BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
        Set<StoreKey> filterKeySet = new HashSet<>();
        for (String key : config.filterSet) {
            filterKeySet.add(new BlobId(key, clusterMap));
        }
        Time time = SystemTime.getInstance();
        Throttler throttler = new Throttler(config.indexEntriesToProcessPerSec, 1000, true, time);
        StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, clusterMap.getMetricRegistry());
        ConsistencyCheckerTool consistencyCheckerTool = new ConsistencyCheckerTool(clusterMap, blobIdFactory, storeConfig, filterKeySet, throttler, metrics, time, storeKeyConverterFactory.getStoreKeyConverter());
        boolean success = consistencyCheckerTool.checkConsistency(config.pathOfInput.listFiles(File::isDirectory)).getFirst();
        System.exit(success ? 0 : 1);
    }
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) VerifiableProperties(com.github.ambry.config.VerifiableProperties) SystemTime(com.github.ambry.utils.SystemTime) Time(com.github.ambry.utils.Time) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) ServerConfig(com.github.ambry.config.ServerConfig) StaticClusterAgentsFactory(com.github.ambry.clustermap.StaticClusterAgentsFactory) StoreConfig(com.github.ambry.config.StoreConfig) BlobId(com.github.ambry.commons.BlobId) File(java.io.File) HashSet(java.util.HashSet) Throttler(com.github.ambry.utils.Throttler)

Example 14 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class DumpCompactionLogTool method main.

public static void main(String[] args) throws Exception {
    VerifiableProperties verifiableProperties = ToolUtils.getVerifiableProperties(args);
    DumpCompactionLogConfig config = new DumpCompactionLogConfig(verifiableProperties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
    try (ClusterMap clusterMap = ((ClusterAgentsFactory) Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory, clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath)).getClusterMap()) {
        File file = new File(config.compactionLogFilePath);
        BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
        StoreConfig storeConfig = new StoreConfig(verifiableProperties);
        Time time = SystemTime.getInstance();
        CompactionLog compactionLog = new CompactionLog(file, blobIdFactory, time, storeConfig);
        System.out.println(compactionLog.toString());
    }
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) VerifiableProperties(com.github.ambry.config.VerifiableProperties) StoreConfig(com.github.ambry.config.StoreConfig) SystemTime(com.github.ambry.utils.SystemTime) Time(com.github.ambry.utils.Time) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) File(java.io.File) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) BlobIdFactory(com.github.ambry.commons.BlobIdFactory)

Example 15 with BlobIdFactory

use of com.github.ambry.commons.BlobIdFactory in project ambry by linkedin.

the class DumpDataTool method compareIndexEntriesToLogContent.

/**
 * Compares every entry in an index file with those in the log. Checks to see if each blob in index is successfully
 * deserializable from the log
 * @param indexFile the file that represents the index segment.
 * @param checkLogEndOffsetMatch if {@code true}, checks that the end offset of the log matches the end offset of the
 *                               index.
 * @throws Exception
 */
private void compareIndexEntriesToLogContent(File indexFile, boolean checkLogEndOffsetMatch) throws Exception {
    if (!indexFile.exists()) {
        throw new IllegalArgumentException("File does not exist " + indexFile);
    }
    final Timer.Context context = metrics.compareIndexFileToLogTimeMs.time();
    try {
        logger.info("Dumping index {}", indexFile.getAbsolutePath());
        StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
        StoreConfig config = new StoreConfig(new VerifiableProperties(new Properties()));
        MetricRegistry metricRegistry = new MetricRegistry();
        StoreMetrics storeMetrics = new StoreMetrics(metricRegistry);
        IndexSegment segment = new IndexSegment(indexFile, false, storeKeyFactory, config, storeMetrics, new Journal(indexFile.getParent(), 0, 0), time);
        Offset startOffset = segment.getStartOffset();
        TreeMap<Long, Long> coveredRanges = new TreeMap<>();
        String logFileName = segment.getLogSegmentName().toFilename();
        File logFile = new File(indexFile.getParent(), logFileName);
        if (!logFile.exists()) {
            throw new IllegalStateException("Log file does not exist " + logFile);
        }
        RandomAccessFile randomAccessFile = new RandomAccessFile(logFile, "r");
        long logFileSize = randomAccessFile.getChannel().size();
        List<MessageInfo> entries = new ArrayList<>();
        segment.getEntriesSince(null, new FindEntriesCondition(Long.MAX_VALUE), entries, new AtomicLong(0), false);
        for (MessageInfo entry : entries) {
            StoreKey key = entry.getStoreKey();
            IndexValue value = segment.find(key).last();
            boolean isDeleted = value.isFlagSet(IndexValue.Flags.Delete_Index);
            if (value.getOffset().getOffset() < logFileSize) {
                boolean success = readFromLogAndVerify(randomAccessFile, key.getID(), value, coveredRanges);
                if (success) {
                    if (isDeleted) {
                        long originalOffset = value.getOriginalMessageOffset();
                        if (originalOffset != -1) {
                            if (!coveredRanges.containsKey(originalOffset)) {
                                if (startOffset.getOffset() > originalOffset) {
                                    logger.trace("Put Record at {} with delete msg offset {} ignored because it is prior to startOffset {}", originalOffset, value.getOffset(), startOffset);
                                } else {
                                    try {
                                        DumpDataHelper.LogBlobRecordInfo logBlobRecordInfo = DumpDataHelper.readSingleRecordFromLog(randomAccessFile, originalOffset, clusterMap, currentTimeInMs, metrics);
                                        coveredRanges.put(originalOffset, originalOffset + logBlobRecordInfo.totalRecordSize);
                                        logger.trace("PUT Record {} with start offset {} and end offset {} for a delete msg {} at offset {} ", logBlobRecordInfo.blobId, originalOffset, (originalOffset + logBlobRecordInfo.totalRecordSize), key.getID(), value.getOffset());
                                        if (!logBlobRecordInfo.blobId.getID().equals(key.getID())) {
                                            logger.error("BlobId value mismatch between delete record {} and put record {}", key.getID(), logBlobRecordInfo.blobId.getID());
                                        }
                                    } catch (IllegalArgumentException e) {
                                        metrics.logDeserializationError.inc();
                                        logger.error("Illegal arg exception thrown at  {}, while reading blob starting at offset {} with exception: ", randomAccessFile.getChannel().position(), originalOffset, e);
                                    } catch (MessageFormatException e) {
                                        metrics.logDeserializationError.inc();
                                        logger.error("MessageFormat exception thrown at  {} while reading blob starting at offset {} with exception: ", randomAccessFile.getChannel().position(), originalOffset, e);
                                    } catch (EOFException e) {
                                        metrics.endOfFileOnDumpLogError.inc();
                                        logger.error("EOFException thrown at {} ", randomAccessFile.getChannel().position(), e);
                                    } catch (Exception e) {
                                        metrics.unknownErrorOnDumpIndex.inc();
                                        logger.error("Unknown exception thrown {} ", e.getMessage(), e);
                                    }
                                }
                            }
                        }
                    }
                } else {
                    metrics.indexToLogBlobRecordComparisonFailure.inc();
                    logger.error("Failed for key {} with value {} ", key, value);
                }
            } else {
                logger.trace("Blob's {} offset {} is outside of log size {}, with a diff of {}", key, value.getOffset().getOffset(), logFileSize, (value.getOffset().getOffset() - logFileSize));
            }
        }
        throttler.maybeThrottle(entries.size());
        long indexEndOffset = segment.getEndOffset().getOffset();
        if (checkLogEndOffsetMatch && indexEndOffset != randomAccessFile.length()) {
            metrics.indexLogEndOffsetMisMatchError.inc();
            logger.error("Log end offset {} and index end offset {} do not match", randomAccessFile.length(), indexEndOffset);
        }
        logRangesNotCovered(coveredRanges, indexEndOffset);
    } finally {
        context.stop();
    }
}
Also used : ArrayList(java.util.ArrayList) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) EOFException(java.io.EOFException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) TreeMap(java.util.TreeMap) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) EOFException(java.io.EOFException) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) AtomicLong(java.util.concurrent.atomic.AtomicLong) Timer(com.codahale.metrics.Timer) RandomAccessFile(java.io.RandomAccessFile) AtomicLong(java.util.concurrent.atomic.AtomicLong) StoreConfig(com.github.ambry.config.StoreConfig) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Aggregations

BlobIdFactory (com.github.ambry.commons.BlobIdFactory)32 ArrayList (java.util.ArrayList)28 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)22 HashMap (java.util.HashMap)21 VerifiableProperties (com.github.ambry.config.VerifiableProperties)20 ClusterMap (com.github.ambry.clustermap.ClusterMap)19 List (java.util.List)19 DataNodeId (com.github.ambry.clustermap.DataNodeId)18 PartitionId (com.github.ambry.clustermap.PartitionId)18 Map (java.util.Map)18 Test (org.junit.Test)18 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)17 BlobId (com.github.ambry.commons.BlobId)17 StoreKeyFactory (com.github.ambry.store.StoreKeyFactory)16 Properties (java.util.Properties)16 MockStoreKeyConverterFactory (com.github.ambry.store.MockStoreKeyConverterFactory)13 MetricRegistry (com.codahale.metrics.MetricRegistry)12 ClusterMapConfig (com.github.ambry.config.ClusterMapConfig)12 Transformer (com.github.ambry.store.Transformer)12 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)11