Search in sources :

Example 46 with Container

use of com.github.ambry.account.Container in project ambry by linkedin.

the class RestUtilsTest method getBlobPropertiesGoodInputTest.

/**
 * Tests building of {@link BlobProperties} given good input (all arguments in the number and format expected).
 * @throws Exception
 */
@Test
public void getBlobPropertiesGoodInputTest() throws Exception {
    JSONObject headers = new JSONObject();
    Container[] containers = { Container.DEFAULT_PRIVATE_CONTAINER, Container.DEFAULT_PUBLIC_CONTAINER };
    for (Container container : containers) {
        setAmbryHeadersForPut(headers, Long.toString(RANDOM.nextInt(10000)), generateRandomString(10), container, "image/gif", generateRandomString(10));
        verifyBlobPropertiesConstructionSuccess(headers);
    }
}
Also used : Container(com.github.ambry.account.Container) JSONObject(org.json.JSONObject) Test(org.junit.Test)

Example 47 with Container

use of com.github.ambry.account.Container in project ambry by linkedin.

the class DeprecatedContainerCloudSyncTask method run.

@Override
public TaskResult run() {
    TaskResult taskResult = null;
    Timer.Context deprecationTaskRunTimer = vcrMetrics.deprecationTaskRunTime.time();
    try {
        logger.info("DeprecatedContainerCloudSyncTask run started.");
        Timer.Context accountServiceFetchTimer = vcrMetrics.accountServiceFetchTime.time();
        Set<Container> deprecatedContainers = AccountUtils.getDeprecatedContainers(accountService, containerDeletionRetentionDays);
        accountServiceFetchTimer.stop();
        logger.info("Attempting deprecation of {} containers.", deprecatedContainers.size());
        cloudDestination.deprecateContainers(deprecatedContainers);
        taskResult = new TaskResult(TaskResult.Status.COMPLETED, "DeprecatedContainerCloudSyncTask completed successfully.");
    } catch (CloudStorageException cloudStorageException) {
        logger.error("Error in updating deprecated containers from account service to cloud: ", cloudStorageException);
        taskResult = new TaskResult(TaskResult.Status.FAILED, "DeprecatedContainerCloudSyncTask failed due to ." + cloudStorageException.getMessage());
    } finally {
        logger.info("DeprecatedContainerCloudSyncTask done.");
        deprecationTaskRunTimer.stop();
    }
    return taskResult;
}
Also used : Container(com.github.ambry.account.Container) Timer(com.codahale.metrics.Timer) TaskResult(org.apache.helix.task.TaskResult)

Example 48 with Container

use of com.github.ambry.account.Container in project ambry by linkedin.

the class BlobStoreCompactorTest method testCloseLastLogSegmentIfQualified.

@Test
public void testCloseLastLogSegmentIfQualified() throws Exception {
    // create first active log segment
    refreshState(false, false, true);
    // leave five log segment space for auto close last log segment purpose
    long requiredCount = state.log.getCapacityInBytes() / state.log.getSegmentCapacity() - 5;
    writeDataToMeetRequiredSegmentCountForAutoCloseLogSegmentTest(requiredCount);
    // delete blobs in last index segment.
    ConcurrentSkipListMap<Offset, IndexSegment> AllIndexSegments = state.index.getIndexSegments();
    IndexSegment indexSegment = AllIndexSegments.floorEntry(state.log.getEndOffset()).getValue();
    ListIterator<IndexEntry> segmentListIter = indexSegment.listIterator(indexSegment.size());
    Set<Container> deleteInProgressSet = new HashSet<>();
    int cnt = 0;
    while (segmentListIter.hasNext()) {
        IndexEntry indexEntry = segmentListIter.next();
        MockId mockId = (MockId) indexEntry.getKey();
        Container container = new ContainerBuilder(mockId.getContainerId(), "containerName" + cnt++, Container.ContainerStatus.DELETE_IN_PROGRESS, null, mockId.getAccountId()).build();
        deleteInProgressSet.add(container);
    }
    Mockito.when(accountService.getContainersByStatus(Container.ContainerStatus.DELETE_IN_PROGRESS)).thenReturn(deleteInProgressSet);
    // reload index to make sure journal is on only the latest log segment
    state.reloadIndex(true, false);
    List<LogSegmentName> segmentsUnderCompaction = getLogSegments(0, state.index.getLogSegmentCount() - 1);
    // Prepare the compactionPolicyInfo before compaction.
    CompactionPolicySwitchInfo compactionPolicySwitchInfo = new CompactionPolicySwitchInfo(System.currentTimeMillis(), true);
    backUpCompactionPolicyInfo(tempDir.toString(), compactionPolicySwitchInfo);
    // instantiate compactor.
    compactor = getCompactor(state.log, DISK_IO_SCHEDULER, null, true);
    compactor.initialize(state.index);
    compactAndVerifyForContainerDeletion(segmentsUnderCompaction, state.time.milliseconds(), false);
    // increment the counter
    compactionPolicySwitchInfo.setNextRoundIsCompactAllPolicy(false);
    // reload index to make sure journal is on only the latest log segment
    backUpCompactionPolicyInfo(tempDir.toString(), compactionPolicySwitchInfo);
    segmentsUnderCompaction = getLogSegments(0, state.log.getCapacityInBytes() / state.log.getSegmentCapacity() - state.log.getRemainingUnallocatedSegments() - 1);
    compactAndVerifyForContainerDeletion(segmentsUnderCompaction, state.time.milliseconds(), true);
    // Edge case test: if the last log segment is empty, no need to auto close it.
    int beforeAutoCloseLogSegmentsCnt = state.log.getLogSegmentCount();
    compactionPolicySwitchInfo = new CompactionPolicySwitchInfo(System.currentTimeMillis(), true);
    backUpCompactionPolicyInfo(tempDir.toString(), compactionPolicySwitchInfo);
    compactor = getCompactor(state.log, DISK_IO_SCHEDULER, null, true);
    compactor.initialize(state.index);
    if (state.log.autoCloseLastLogSegmentIfQualified()) {
        // refresh journal.
        state.index.journal.cleanUpJournal();
    }
    int afterAutoCloseLogSegmentsCnt = state.log.getLogSegmentCount();
    assertEquals("No segments should be created since last log segment is empty", beforeAutoCloseLogSegmentsCnt, afterAutoCloseLogSegmentsCnt);
    // make sure new data will be added into the last log segment
    long logSegmentCountBeforeNewDataAddIntoAutoClosedLogSegment = state.index.getLogSegmentCount();
    state.addPutEntries(1, PUT_RECORD_SIZE, Utils.Infinite_Time);
    long logSegmentCountAfterNewDataAddIntoAutoClosedLogSegment = state.index.getLogSegmentCount();
    assertEquals("Log Segment count should be increased after some data has been added to the last log segment.", logSegmentCountBeforeNewDataAddIntoAutoClosedLogSegment, logSegmentCountAfterNewDataAddIntoAutoClosedLogSegment - 1);
    assertEquals("Last index segment should belongs to auto closed log segment", state.index.getIndexSegments().lastEntry().getValue().getLogSegmentName(), state.log.getLastSegment().getName());
    // close the last log segment again.
    beforeAutoCloseLogSegmentsCnt = state.log.getLogSegmentCount();
    if (state.log.autoCloseLastLogSegmentIfQualified()) {
        // refresh journal.
        state.index.journal.cleanUpJournal();
    }
    afterAutoCloseLogSegmentsCnt = state.log.getLogSegmentCount();
    assertEquals("One log segment should be created since last log segment is not empty", beforeAutoCloseLogSegmentsCnt, afterAutoCloseLogSegmentsCnt - 1);
    // make sure new data will be added into the last log segment
    logSegmentCountBeforeNewDataAddIntoAutoClosedLogSegment = state.index.getLogSegmentCount();
    state.addPutEntries(1, PUT_RECORD_SIZE, Utils.Infinite_Time);
    logSegmentCountAfterNewDataAddIntoAutoClosedLogSegment = state.index.getLogSegmentCount();
    assertEquals("Log Segment count should be increased after some data has been added to the last log segment.", logSegmentCountBeforeNewDataAddIntoAutoClosedLogSegment, logSegmentCountAfterNewDataAddIntoAutoClosedLogSegment - 1);
    assertEquals("Last index segment should belongs to auto closed log segment", state.index.getIndexSegments().lastEntry().getValue().getLogSegmentName(), state.log.getLastSegment().getName());
    // check flow after deployment
    state.reloadLog(true);
}
Also used : Container(com.github.ambry.account.Container) ContainerBuilder(com.github.ambry.account.ContainerBuilder) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 49 with Container

use of com.github.ambry.account.Container in project ambry by linkedin.

the class BlobStoreCompactorTest method containerDeletionTest.

/**
 * Tests compaction on a log that contains a PUT record which the corresponding container marked as INACTIVE/DELETE_IN_PROGRESS
 * @throws Exception
 */
@Test
public void containerDeletionTest() throws Exception {
    refreshState(false, false, false);
    state.properties.setProperty("store.container.deletion.enabled", "true");
    state.properties.setProperty("store.index.max.number.of.inmem.elements", "5");
    state.initIndex(null);
    long notExpiredMs = state.time.milliseconds() + TimeUnit.SECONDS.toMillis(Short.MAX_VALUE);
    // LS (Log Segment) 0
    // IS (Index Segment) 0.1
    // p1 DeleteInProgress
    // p2 Inactive
    IndexEntry p1 = state.addPutEntries(1, PUT_RECORD_SIZE, notExpiredMs).get(0);
    IndexEntry p2 = state.addPutEntries(1, PUT_RECORD_SIZE, notExpiredMs).get(0);
    state.addPutEntries(1, PUT_RECORD_SIZE, notExpiredMs).get(0);
    state.addPutEntries(1, PUT_RECORD_SIZE, notExpiredMs).get(0);
    state.addPutEntries(1, PUT_RECORD_SIZE, notExpiredMs).get(0);
    // IS (Index Segment) 0.2
    // p3 DeleteInProgress
    // p4 Inactive
    IndexEntry p3 = state.addPutEntries(1, PUT_RECORD_SIZE, notExpiredMs).get(0);
    IndexEntry p4 = state.addPutEntries(1, PUT_RECORD_SIZE, notExpiredMs).get(0);
    long lastRecSize = state.log.getSegmentCapacity() - state.index.getCurrentEndOffset().getOffset();
    state.addPutEntries(1, lastRecSize, notExpiredMs).get(0);
    // LS 1
    state.addPutEntries(1, PUT_RECORD_SIZE, notExpiredMs).get(0);
    // Make sure we have two log segments
    writeDataToMeetRequiredSegmentCount(2, null);
    // add to be deleted index entries into indexEntriesDeleteInProgress list.
    List<IndexEntry> indexEntriesDeleteInProgress = new ArrayList<>();
    indexEntriesDeleteInProgress.add(p1);
    indexEntriesDeleteInProgress.add(p3);
    // add invalid index entries into indexEntriesInvalid list.
    List<IndexEntry> indexEntriesInactive = new ArrayList<>();
    indexEntriesInactive.add(p2);
    indexEntriesInactive.add(p4);
    // Mock indexEntries' corresponding container as INACTIVE/DELETE_IN_PROGRESS.
    Set<Container> deleteInProgressSet = new HashSet<>();
    Set<MockId> deletedInProgressKeys = new HashSet<>();
    Set<Container> InactiveSet = new HashSet<>();
    Set<MockId> InactiveKeys = new HashSet<>();
    long cleanedUpSize = 0;
    for (IndexEntry indexEntry : indexEntriesDeleteInProgress) {
        Container container = Mockito.mock(Container.class);
        Mockito.when(container.getParentAccountId()).thenReturn(indexEntry.getValue().getAccountId());
        Mockito.when(container.getId()).thenReturn(indexEntry.getValue().getContainerId());
        Mockito.when(container.getDeleteTriggerTime()).thenReturn(System.currentTimeMillis() - TimeUnit.DAYS.toMillis(config.storeContainerDeletionRetentionDays));
        deleteInProgressSet.add(container);
        deletedInProgressKeys.add((MockId) indexEntry.getKey());
        cleanedUpSize += indexEntry.getValue().getSize();
    }
    for (IndexEntry indexEntry : indexEntriesInactive) {
        Container container = Mockito.mock(Container.class);
        Mockito.when(container.getParentAccountId()).thenReturn(indexEntry.getValue().getAccountId());
        Mockito.when(container.getId()).thenReturn(indexEntry.getValue().getContainerId());
        InactiveSet.add(container);
        InactiveKeys.add((MockId) indexEntry.getKey());
        cleanedUpSize += indexEntry.getValue().getSize();
    }
    Mockito.when(accountService.getContainersByStatus(Container.ContainerStatus.DELETE_IN_PROGRESS)).thenReturn(deleteInProgressSet);
    Mockito.when(accountService.getContainersByStatus(Container.ContainerStatus.INACTIVE)).thenReturn(InactiveSet);
    // get everything except the last log segment entries out of the journal
    state.reloadIndex(true, false);
    List<LogSegmentName> segmentsUnderCompaction = getLogSegments(0, 1);
    long logSegmentSizeSumBeforeCompaction = getSumOfLogSegmentEndOffsets();
    CompactionDetails details = new CompactionDetails(notExpiredMs, segmentsUnderCompaction, null);
    LogSegmentName logSegmentName = p1.getValue().getOffset().getName();
    LogSegmentName compactedLogSegmentName = logSegmentName.getNextGenerationName();
    long endOffsetOfSegmentBeforeCompaction = state.log.getSegment(logSegmentName).getEndOffset();
    compactor = getCompactor(state.log, DISK_IO_SCHEDULER, null, false);
    compactor.initialize(state.index);
    int indexSegmentCountBeforeCompaction = state.index.getIndexSegments().size();
    try {
        compactor.compact(details, bundleReadBuffer);
    } finally {
        compactor.close(0);
    }
    assertFalse("Sum of size of log segments did not change after compaction", logSegmentSizeSumBeforeCompaction == getSumOfLogSegmentEndOffsets());
    // inactive/deleted indexEntries should not be found
    for (MockId deletedKey : deletedInProgressKeys) {
        assertNull("There should be no record of " + deletedKey, state.index.findKey(deletedKey));
    }
    for (MockId inactiveKey : InactiveKeys) {
        assertNull("There should be no record of " + inactiveKey, state.index.findKey(inactiveKey));
    }
    assertEquals("End offset of log segment not as expected after compaction", endOffsetOfSegmentBeforeCompaction - cleanedUpSize, state.log.getSegment(compactedLogSegmentName).getEndOffset());
    int indexSegmentDiff = 1;
    assertEquals("Index Segment not as expected after compaction", indexSegmentCountBeforeCompaction - indexSegmentDiff, state.index.getIndexSegments().size());
}
Also used : ArrayList(java.util.ArrayList) Container(com.github.ambry.account.Container) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 50 with Container

use of com.github.ambry.account.Container in project ambry by linkedin.

the class HelixAccountServiceTest method testSelectInactiveContainer.

/**
 * Tests select INACTIVE {@link Container}s from DELETE_IN_PROGRESS {@link Container}s.
 */
@Test
public void testSelectInactiveContainer() throws Exception {
    // generates store stats
    int accountCount = 1;
    int containerCount = 3;
    StatsSnapshot statsSnapshot = generateStoreStats(accountCount, containerCount, random, StatsReportType.ACCOUNT_REPORT);
    // a set that records the account ids that have already been taken.
    Set<Short> accountIdSet = new HashSet<>();
    // generate a single reference account and container that can be referenced by refAccount and refContainer respectively.
    refAccountId = Utils.getRandomShort(random);
    accountIdSet.add(refAccountId);
    generateRefAccounts(idToRefAccountMap, idToRefContainerMap, accountIdSet, 2, 3);
    accountService = mockHelixAccountServiceFactory.getAccountService();
    accountService.updateAccounts(idToRefAccountMap.values());
    assertAccountsInAccountService(idToRefAccountMap.values(), 2, accountService);
    Set<Container> expectContainerSet = new HashSet<>();
    List<Account> accountsToUpdate = new ArrayList<>();
    int accountId = 0;
    for (Account account : accountService.getAllAccounts()) {
        AccountBuilder accountBuilder = new AccountBuilder((short) accountId, "A[" + accountId + "]", AccountStatus.ACTIVE);
        int containerId = 0;
        for (Container container : account.getAllContainers()) {
            ContainerBuilder containerBuilder = new ContainerBuilder((short) containerId, "C[" + containerId + "]", ContainerStatus.DELETE_IN_PROGRESS, container.getDescription() + "--extra", (short) accountId);
            accountBuilder.addOrUpdateContainer(containerBuilder.build());
            containerId++;
        }
        accountsToUpdate.add(accountBuilder.build());
        if (accountId == 1) {
            expectContainerSet.addAll(accountsToUpdate.get(accountId).getAllContainers());
        }
        accountId++;
    }
    updateAccountsAndAssertAccountExistence(accountsToUpdate, 4, true);
    Set<Container> inactiveContainerSet = AccountUtils.selectInactiveContainerCandidates(statsSnapshot, accountService.getContainersByStatus(ContainerStatus.DELETE_IN_PROGRESS));
    assertEquals("Mismatch in container Set after detect", expectContainerSet, inactiveContainerSet);
    ((HelixAccountService) accountService).markContainersInactive(inactiveContainerSet);
    Account testAccount0 = accountService.getAccountById((short) 0);
    for (Container container : testAccount0.getAllContainers()) {
        assertEquals("Based on the stats report, container has not been compacted yet", ContainerStatus.DELETE_IN_PROGRESS, container.getStatus());
    }
    Account testAccount1 = accountService.getAccountById((short) 1);
    for (Container container : testAccount1.getAllContainers()) {
        assertEquals("Based on the stats report, inactive container status needs to be set as INACTIVE", ContainerStatus.INACTIVE, container.getStatus());
    }
}
Also used : HelixAccountService(com.github.ambry.account.HelixAccountService) Account(com.github.ambry.account.Account) ArrayList(java.util.ArrayList) Container(com.github.ambry.account.Container) StatsSnapshot(com.github.ambry.server.StatsSnapshot) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

Container (com.github.ambry.account.Container)119 Account (com.github.ambry.account.Account)88 Test (org.junit.Test)61 ArrayList (java.util.ArrayList)30 RestServiceException (com.github.ambry.rest.RestServiceException)20 ContainerBuilder (com.github.ambry.account.ContainerBuilder)17 JSONObject (org.json.JSONObject)17 VerifiableProperties (com.github.ambry.config.VerifiableProperties)16 HashSet (java.util.HashSet)15 HashMap (java.util.HashMap)14 Properties (java.util.Properties)14 AccountBuilder (com.github.ambry.account.AccountBuilder)13 RestRequest (com.github.ambry.rest.RestRequest)13 ByteBuffer (java.nio.ByteBuffer)13 Map (java.util.Map)13 MetricRegistry (com.codahale.metrics.MetricRegistry)12 TestUtils (com.github.ambry.utils.TestUtils)12 Collections (java.util.Collections)12 List (java.util.List)12 Assert (org.junit.Assert)12