Search in sources :

Example 6 with PersistentID

use of org.apache.geode.cache.persistence.PersistentID in project geode by apache.

the class DiskStoreCommands method backupDiskStore.

@CliCommand(value = CliStrings.BACKUP_DISK_STORE, help = CliStrings.BACKUP_DISK_STORE__HELP)
@CliMetaData(relatedTopic = { CliStrings.TOPIC_GEODE_DISKSTORE })
@ResourceOperation(resource = Resource.DATA, operation = Operation.READ)
public Result backupDiskStore(@CliOption(key = CliStrings.BACKUP_DISK_STORE__DISKDIRS, unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE, help = CliStrings.BACKUP_DISK_STORE__DISKDIRS__HELP, mandatory = true) String targetDir, @CliOption(key = CliStrings.BACKUP_DISK_STORE__BASELINEDIR, help = CliStrings.BACKUP_DISK_STORE__BASELINEDIR__HELP) String baselineDir) {
    Result result = null;
    try {
        InternalCache cache = getCache();
        DM dm = cache.getDistributionManager();
        BackupStatus backupStatus = null;
        if (baselineDir != null && !baselineDir.isEmpty()) {
            backupStatus = AdminDistributedSystemImpl.backupAllMembers(dm, new File(targetDir), new File(baselineDir));
        } else {
            backupStatus = AdminDistributedSystemImpl.backupAllMembers(dm, new File(targetDir), null);
        }
        Map<DistributedMember, Set<PersistentID>> backedupMemberDiskstoreMap = backupStatus.getBackedUpDiskStores();
        Set<DistributedMember> backedupMembers = backedupMemberDiskstoreMap.keySet();
        CompositeResultData crd = ResultBuilder.createCompositeResultData();
        if (!backedupMembers.isEmpty()) {
            SectionResultData backedupDiskStoresSection = crd.addSection();
            backedupDiskStoresSection.setHeader(CliStrings.BACKUP_DISK_STORE_MSG_BACKED_UP_DISK_STORES);
            TabularResultData backedupDiskStoresTable = backedupDiskStoresSection.addTable();
            for (DistributedMember member : backedupMembers) {
                Set<PersistentID> backedupDiskStores = backedupMemberDiskstoreMap.get(member);
                boolean printMember = true;
                String memberName = member.getName();
                if (memberName == null || memberName.isEmpty()) {
                    memberName = member.getId();
                }
                for (PersistentID persistentId : backedupDiskStores) {
                    if (persistentId != null) {
                        String UUID = persistentId.getUUID().toString();
                        String hostName = persistentId.getHost().getHostName();
                        String directory = persistentId.getDirectory();
                        if (printMember) {
                            writeToBackupDisktoreTable(backedupDiskStoresTable, memberName, UUID, hostName, directory);
                            printMember = false;
                        } else {
                            writeToBackupDisktoreTable(backedupDiskStoresTable, "", UUID, hostName, directory);
                        }
                    }
                }
            }
        } else {
            SectionResultData noMembersBackedUp = crd.addSection();
            noMembersBackedUp.setHeader(CliStrings.BACKUP_DISK_STORE_MSG_NO_DISKSTORES_BACKED_UP);
        }
        Set<PersistentID> offlineDiskStores = backupStatus.getOfflineDiskStores();
        if (!offlineDiskStores.isEmpty()) {
            SectionResultData offlineDiskStoresSection = crd.addSection();
            TabularResultData offlineDiskStoresTable = offlineDiskStoresSection.addTable();
            offlineDiskStoresSection.setHeader(CliStrings.BACKUP_DISK_STORE_MSG_OFFLINE_DISK_STORES);
            for (PersistentID offlineDiskStore : offlineDiskStores) {
                offlineDiskStoresTable.accumulate(CliStrings.BACKUP_DISK_STORE_MSG_UUID, offlineDiskStore.getUUID().toString());
                offlineDiskStoresTable.accumulate(CliStrings.BACKUP_DISK_STORE_MSG_HOST, offlineDiskStore.getHost().getHostName());
                offlineDiskStoresTable.accumulate(CliStrings.BACKUP_DISK_STORE_MSG_DIRECTORY, offlineDiskStore.getDirectory());
            }
        }
        result = ResultBuilder.buildResult(crd);
    } catch (Exception e) {
        result = ResultBuilder.createGemFireErrorResult(e.getMessage());
    }
    return result;
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) CompositeResultData(org.apache.geode.management.internal.cli.result.CompositeResultData) TabularResultData(org.apache.geode.management.internal.cli.result.TabularResultData) InternalCache(org.apache.geode.internal.cache.InternalCache) DM(org.apache.geode.distributed.internal.DM) CommandResultException(org.apache.geode.management.internal.cli.result.CommandResultException) CacheExistsException(org.apache.geode.cache.CacheExistsException) MemberNotFoundException(org.apache.geode.management.internal.cli.util.MemberNotFoundException) DiskStoreNotFoundException(org.apache.geode.management.internal.cli.util.DiskStoreNotFoundException) FunctionInvocationTargetException(org.apache.geode.cache.execute.FunctionInvocationTargetException) ResultDataException(org.apache.geode.management.internal.cli.result.ResultDataException) GemFireIOException(org.apache.geode.GemFireIOException) IOException(java.io.IOException) Result(org.apache.geode.management.cli.Result) CliFunctionResult(org.apache.geode.management.internal.cli.functions.CliFunctionResult) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) DistributedMember(org.apache.geode.distributed.DistributedMember) SectionResultData(org.apache.geode.management.internal.cli.result.CompositeResultData.SectionResultData) File(java.io.File) BackupStatus(org.apache.geode.admin.BackupStatus) PersistentID(org.apache.geode.cache.persistence.PersistentID) CliCommand(org.springframework.shell.core.annotation.CliCommand) CliMetaData(org.apache.geode.management.cli.CliMetaData) ResourceOperation(org.apache.geode.management.internal.security.ResourceOperation)

Example 7 with PersistentID

use of org.apache.geode.cache.persistence.PersistentID in project geode by apache.

the class DiskStoreCommands method compactDiskStore.

@CliCommand(value = CliStrings.COMPACT_DISK_STORE, help = CliStrings.COMPACT_DISK_STORE__HELP)
@CliMetaData(shellOnly = false, relatedTopic = { CliStrings.TOPIC_GEODE_DISKSTORE })
@ResourceOperation(resource = Resource.DATA, operation = Operation.MANAGE)
public Result compactDiskStore(@CliOption(key = CliStrings.COMPACT_DISK_STORE__NAME, mandatory = true, optionContext = ConverterHint.DISKSTORE, help = CliStrings.COMPACT_DISK_STORE__NAME__HELP) String diskStoreName, @CliOption(key = CliStrings.COMPACT_DISK_STORE__GROUP, unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE, help = CliStrings.COMPACT_DISK_STORE__GROUP__HELP) String[] groups) {
    Result result = null;
    try {
        // disk store exists validation
        if (!diskStoreExists(diskStoreName)) {
            result = ResultBuilder.createUserErrorResult(CliStrings.format(CliStrings.COMPACT_DISK_STORE__DISKSTORE_0_DOESNOT_EXIST, new Object[] { diskStoreName }));
        } else {
            InternalDistributedSystem ds = getCache().getInternalDistributedSystem();
            Map<DistributedMember, PersistentID> overallCompactInfo = new HashMap<DistributedMember, PersistentID>();
            Set<?> otherMembers = ds.getDistributionManager().getOtherNormalDistributionManagerIds();
            Set<InternalDistributedMember> allMembers = new HashSet<InternalDistributedMember>();
            for (Object member : otherMembers) {
                allMembers.add((InternalDistributedMember) member);
            }
            allMembers.add(ds.getDistributedMember());
            otherMembers = null;
            String groupInfo = "";
            // if groups are specified, find members in the specified group
            if (groups != null && groups.length > 0) {
                groupInfo = CliStrings.format(CliStrings.COMPACT_DISK_STORE__MSG__FOR_GROUP, new Object[] { Arrays.toString(groups) + "." });
                final Set<InternalDistributedMember> selectedMembers = new HashSet<InternalDistributedMember>();
                List<String> targetedGroups = Arrays.asList(groups);
                for (Iterator<InternalDistributedMember> iterator = allMembers.iterator(); iterator.hasNext(); ) {
                    InternalDistributedMember member = iterator.next();
                    List<String> memberGroups = member.getGroups();
                    if (!Collections.disjoint(targetedGroups, memberGroups)) {
                        selectedMembers.add(member);
                    }
                }
                allMembers = selectedMembers;
            }
            // have at least one member
            if (allMembers.isEmpty()) {
                result = ResultBuilder.createUserErrorResult(CliStrings.format(CliStrings.COMPACT_DISK_STORE__NO_MEMBERS_FOUND_IN_SPECIFED_GROUP, new Object[] { Arrays.toString(groups) }));
            } else {
                // first invoke on local member if it exists in the targeted set
                if (allMembers.remove(ds.getDistributedMember())) {
                    PersistentID compactedDiskStoreId = CompactRequest.compactDiskStore(diskStoreName);
                    if (compactedDiskStoreId != null) {
                        overallCompactInfo.put(ds.getDistributedMember(), compactedDiskStoreId);
                    }
                }
                // CompactRequest. Otherwise, send the request to others
                if (!allMembers.isEmpty()) {
                    // Invoke compact on all 'other' members
                    Map<DistributedMember, PersistentID> memberCompactInfo = CompactRequest.send(ds.getDistributionManager(), diskStoreName, allMembers);
                    if (memberCompactInfo != null && !memberCompactInfo.isEmpty()) {
                        overallCompactInfo.putAll(memberCompactInfo);
                        memberCompactInfo.clear();
                    }
                    String notExecutedMembers = CompactRequest.getNotExecutedMembers();
                    LogWrapper.getInstance().info("compact disk-store \"" + diskStoreName + "\" message was scheduled to be sent to but was not send to " + notExecutedMembers);
                }
                // If compaction happened at all, then prepare the summary
                if (overallCompactInfo != null && !overallCompactInfo.isEmpty()) {
                    CompositeResultData compositeResultData = ResultBuilder.createCompositeResultData();
                    SectionResultData section = null;
                    Set<Entry<DistributedMember, PersistentID>> entries = overallCompactInfo.entrySet();
                    for (Entry<DistributedMember, PersistentID> entry : entries) {
                        String memberId = entry.getKey().getId();
                        section = compositeResultData.addSection(memberId);
                        section.addData("On Member", memberId);
                        PersistentID persistentID = entry.getValue();
                        if (persistentID != null) {
                            SectionResultData subSection = section.addSection("DiskStore" + memberId);
                            subSection.addData("UUID", persistentID.getUUID());
                            subSection.addData("Host", persistentID.getHost().getHostName());
                            subSection.addData("Directory", persistentID.getDirectory());
                        }
                    }
                    compositeResultData.setHeader("Compacted " + diskStoreName + groupInfo);
                    result = ResultBuilder.buildResult(compositeResultData);
                } else {
                    result = ResultBuilder.createInfoResult(CliStrings.COMPACT_DISK_STORE__COMPACTION_ATTEMPTED_BUT_NOTHING_TO_COMPACT);
                }
            }
        // all members' if
        }
    // disk store exists' if
    } catch (RuntimeException e) {
        LogWrapper.getInstance().info(e.getMessage(), e);
        result = ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COMPACT_DISK_STORE__ERROR_WHILE_COMPACTING_REASON_0, new Object[] { e.getMessage() }));
    }
    return result;
}
Also used : CompositeResultData(org.apache.geode.management.internal.cli.result.CompositeResultData) HashMap(java.util.HashMap) Result(org.apache.geode.management.cli.Result) CliFunctionResult(org.apache.geode.management.internal.cli.functions.CliFunctionResult) Entry(java.util.Map.Entry) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) DistributedMember(org.apache.geode.distributed.DistributedMember) SectionResultData(org.apache.geode.management.internal.cli.result.CompositeResultData.SectionResultData) InternalDistributedSystem(org.apache.geode.distributed.internal.InternalDistributedSystem) PersistentID(org.apache.geode.cache.persistence.PersistentID) HashSet(java.util.HashSet) CliCommand(org.springframework.shell.core.annotation.CliCommand) CliMetaData(org.apache.geode.management.cli.CliMetaData) ResourceOperation(org.apache.geode.management.internal.security.ResourceOperation)

Example 8 with PersistentID

use of org.apache.geode.cache.persistence.PersistentID in project geode by apache.

the class IncrementalBackupDUnitTest method testMissingMemberInBaseline.

/**
   * Successful if a member performs a full backup when its backup data is not present in the
   * baseline (for whatever reason). This also tests what happens when a member is offline during
   * the baseline backup.
   * 
   * The test is regarded as successful when all of the missing members oplog files are backed up
   * during an incremental backup. This means that the member peformed a full backup because its
   * oplogs were missing in the baseline.
   */
@Test
public void testMissingMemberInBaseline() throws Exception {
    // Simulate the missing member by forcing a persistent member
    // to go offline.
    final PersistentID missingMember = disconnect(Host.getHost(0).getVM(0), Host.getHost(0).getVM(1));
    /*
     * Perform baseline and make sure that the list of offline disk stores contains our missing
     * member.
     */
    BackupStatus baselineStatus = performBaseline();
    assertBackupStatus(baselineStatus);
    assertNotNull(baselineStatus.getOfflineDiskStores());
    assertEquals(2, baselineStatus.getOfflineDiskStores().size());
    // Find all of the member's oplogs in the missing member's diskstore directory structure
    // (*.crf,*.krf,*.drf)
    Collection<File> missingMemberOplogFiles = FileUtils.listFiles(new File(missingMember.getDirectory()), new RegexFileFilter(OPLOG_REGEX), DirectoryFileFilter.DIRECTORY);
    assertFalse(missingMemberOplogFiles.isEmpty());
    /*
     * Restart our missing member and make sure it is back online and part of the distributed system
     */
    openCache(Host.getHost(0).getVM(0));
    /*
     * After reconnecting make sure the other members agree that the missing member is back online.
     */
    final Set<PersistentID> missingMembers = new HashSet<>();
    Wait.waitForCriterion(new WaitCriterion() {

        @Override
        public boolean done() {
            missingMembers.clear();
            missingMembers.addAll(getMissingMembers(Host.getHost(0).getVM(1)));
            return !missingMembers.contains(missingMember);
        }

        @Override
        public String description() {
            return "[testMissingMemberInBasline] Wait for missing member.";
        }
    }, 10000, 500, false);
    assertEquals(0, missingMembers.size());
    /*
     * Peform incremental and make sure we have no offline disk stores.
     */
    BackupStatus incrementalStatus = performIncremental();
    assertBackupStatus(incrementalStatus);
    assertNotNull(incrementalStatus.getOfflineDiskStores());
    assertEquals(0, incrementalStatus.getOfflineDiskStores().size());
    // Get the missing member's member id which is different from the PersistentID
    String memberId = getMemberId(Host.getHost(0).getVM(0));
    assertNotNull(memberId);
    // Get list of backed up oplog files in the incremental backup for the missing member
    File incrementalMemberDir = getBackupDirForMember(getIncrementalDir(), memberId);
    Collection<File> backupOplogFiles = FileUtils.listFiles(incrementalMemberDir, new RegexFileFilter(OPLOG_REGEX), DirectoryFileFilter.DIRECTORY);
    assertFalse(backupOplogFiles.isEmpty());
    // Transform missing member oplogs to just their file names.
    List<String> missingMemberOplogNames = new LinkedList<>();
    TransformUtils.transform(missingMemberOplogFiles, missingMemberOplogNames, TransformUtils.fileNameTransformer);
    // Transform missing member's incremental backup oplogs to just their file names.
    List<String> backupOplogNames = new LinkedList<>();
    TransformUtils.transform(backupOplogFiles, backupOplogNames, TransformUtils.fileNameTransformer);
    /*
     * Make sure that the incremental backup for the missing member contains all of the operation
     * logs for that member. This proves that a full backup was performed for that member.
     */
    assertTrue(backupOplogNames.containsAll(missingMemberOplogNames));
}
Also used : WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) RegexFileFilter(org.apache.commons.io.filefilter.RegexFileFilter) File(java.io.File) LinkedList(java.util.LinkedList) PersistentID(org.apache.geode.cache.persistence.PersistentID) BackupStatus(org.apache.geode.admin.BackupStatus) HashSet(java.util.HashSet) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 9 with PersistentID

use of org.apache.geode.cache.persistence.PersistentID in project geode by apache.

the class IncrementalBackupDUnitTest method testIncrementalBackup.

/**
   * This tests the basic features of incremental backup. This means that operation logs that are
   * present in both the baseline and member's disk store should not be copied during the
   * incremental backup. Additionally, the restore script should reference and copy operation logs
   * from the baseline backup.
   */
@Test
public void testIncrementalBackup() throws Exception {
    String memberId = getMemberId(Host.getHost(0).getVM(1));
    File memberDir = getVMDir(Host.getHost(0).getVM(1));
    assertNotNull(memberDir);
    // Find all of the member's oplogs in the disk directory (*.crf,*.krf,*.drf)
    Collection<File> memberOplogFiles = FileUtils.listFiles(memberDir, new RegexFileFilter(OPLOG_REGEX), DirectoryFileFilter.DIRECTORY);
    assertFalse(memberOplogFiles.isEmpty());
    // Perform a full backup and wait for it to finish
    assertBackupStatus(performBaseline());
    waitForBackup(Host.getHost(0).getVM(1));
    // Find all of the member's oplogs in the baseline (*.crf,*.krf,*.drf)
    Collection<File> memberBaselineOplogs = FileUtils.listFiles(getBackupDirForMember(getBaselineDir(), memberId), new RegexFileFilter(OPLOG_REGEX), DirectoryFileFilter.DIRECTORY);
    assertFalse(memberBaselineOplogs.isEmpty());
    List<String> memberBaselineOplogNames = new LinkedList<>();
    TransformUtils.transform(memberBaselineOplogs, memberBaselineOplogNames, TransformUtils.fileNameTransformer);
    // Peform and incremental backup and wait for it to finish
    // Doing this preserves the new oplogs created by the baseline backup
    loadMoreData();
    assertBackupStatus(performIncremental());
    waitForBackup(Host.getHost(0).getVM(1));
    // Find all of the member's oplogs in the incremental (*.crf,*.krf,*.drf)
    Collection<File> memberIncrementalOplogs = FileUtils.listFiles(getBackupDirForMember(getIncrementalDir(), memberId), new RegexFileFilter(OPLOG_REGEX), DirectoryFileFilter.DIRECTORY);
    assertFalse(memberIncrementalOplogs.isEmpty());
    List<String> memberIncrementalOplogNames = new LinkedList<>();
    TransformUtils.transform(memberIncrementalOplogs, memberIncrementalOplogNames, TransformUtils.fileNameTransformer);
    log("BASELINE OPLOGS = " + memberBaselineOplogNames);
    log("INCREMENTAL OPLOGS = " + memberIncrementalOplogNames);
    /*
     * Assert that the incremental backup does not contain baseline operation logs that the member
     * still has copies of.
     */
    for (String oplog : memberBaselineOplogNames) {
        assertFalse(memberIncrementalOplogNames.contains(oplog));
    }
    // Perform a second incremental and wait for it to finish.
    // Doing this preserves the new oplogs created by the incremental backup
    loadMoreData();
    assertBackupStatus(performIncremental2());
    waitForBackup(Host.getHost(0).getVM(1));
    Collection<File> memberIncremental2Oplogs = FileUtils.listFiles(getBackupDirForMember(getIncremental2Dir(), memberId), new RegexFileFilter(OPLOG_REGEX), DirectoryFileFilter.DIRECTORY);
    assertFalse(memberIncremental2Oplogs.isEmpty());
    List<String> memberIncremental2OplogNames = new LinkedList<>();
    TransformUtils.transform(memberIncremental2Oplogs, memberIncremental2OplogNames, TransformUtils.fileNameTransformer);
    log("INCREMENTAL 2 OPLOGS = " + memberIncremental2OplogNames);
    /*
     * Assert that the second incremental backup does not contain operation logs copied into the
     * baseline.
     */
    for (String oplog : memberBaselineOplogNames) {
        assertFalse(memberIncremental2OplogNames.contains(oplog));
    }
    /*
     * Also assert that the second incremental backup does not contain operation logs copied into
     * the member's first incremental backup.
     */
    for (String oplog : memberIncrementalOplogNames) {
        assertFalse(memberIncremental2OplogNames.contains(oplog));
    }
    // Shut down our member so we can perform a restore
    PersistentID id = getPersistentID(Host.getHost(0).getVM(1));
    closeCache(Host.getHost(0).getVM(1));
    // Execute the restore
    performRestore(new File(id.getDirectory()), getBackupDirForMember(getIncremental2Dir(), memberId));
    /*
     * Collect all of the restored operation logs.
     */
    Collection<File> restoredOplogs = FileUtils.listFiles(new File(id.getDirectory()), new RegexFileFilter(OPLOG_REGEX), DirectoryFileFilter.DIRECTORY);
    assertFalse(restoredOplogs.isEmpty());
    List<String> restoredOplogNames = new LinkedList<>();
    TransformUtils.transform(restoredOplogs, restoredOplogNames, TransformUtils.fileNameTransformer);
    /*
     * Assert that baseline operation logs have been copied over to the member's disk directory.
     */
    for (String oplog : memberBaselineOplogNames) {
        assertTrue(restoredOplogNames.contains(oplog));
    }
    /*
     * Assert that the incremental operation logs have been copied over to the member's disk
     * directory.
     */
    for (String oplog : memberIncrementalOplogNames) {
        assertTrue(restoredOplogNames.contains(oplog));
    }
    /*
     * Assert that the second incremental operation logs have been copied over to the member's disk
     * directory.
     */
    for (String oplog : memberIncremental2OplogNames) {
        assertTrue(restoredOplogNames.contains(oplog));
    }
    /*
     * Reconnect the member.
     */
    openCache(Host.getHost(0).getVM(1));
}
Also used : RegexFileFilter(org.apache.commons.io.filefilter.RegexFileFilter) File(java.io.File) LinkedList(java.util.LinkedList) PersistentID(org.apache.geode.cache.persistence.PersistentID) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Example 10 with PersistentID

use of org.apache.geode.cache.persistence.PersistentID in project geode by apache.

the class PersistentRecoveryOrderDUnitTest method testCompactFromAdmin.

/**
   * Tests to make sure that we stop waiting for a member that we revoke.
   * 
   * @throws Exception
   */
@Test
public void testCompactFromAdmin() throws Exception {
    Host host = Host.getHost(0);
    final VM vm0 = host.getVM(0);
    VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    createPersistentRegionWithoutCompaction(vm0);
    createPersistentRegionWithoutCompaction(vm1);
    vm1.invoke(new SerializableRunnable("Create some data") {

        public void run() {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            Region region = cache.getRegion(REGION_NAME);
            for (int i = 0; i < 1024; i++) {
                region.put(i, new byte[1024]);
            }
            for (int i = 2; i < 1024; i++) {
                assertTrue(region.destroy(i) != null);
            }
            DiskStore store = cache.findDiskStore(REGION_NAME);
            store.forceRoll();
        }
    });
    // vm1.invoke(new SerializableRunnable("compact") {
    // public void run() {
    // Cache cache = getCache();
    // DiskStore ds = cache.findDiskStore(REGION_NAME);
    // assertTrue(ds.forceCompaction());
    // }
    // });
    //
    // vm0.invoke(new SerializableRunnable("compact") {
    // public void run() {
    // Cache cache = getCache();
    // DiskStore ds = cache.findDiskStore(REGION_NAME);
    // assertTrue(ds.forceCompaction());
    // }
    // });
    vm2.invoke(new SerializableRunnable("Compact") {

        public void run() {
            GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
            DistributedSystemConfig config;
            AdminDistributedSystem adminDS = null;
            try {
                config = AdminDistributedSystemFactory.defineDistributedSystem(getSystem(), "");
                adminDS = AdminDistributedSystemFactory.getDistributedSystem(config);
                adminDS.connect();
                Map<DistributedMember, Set<PersistentID>> missingIds = adminDS.compactAllDiskStores();
                assertEquals(2, missingIds.size());
                for (Set<PersistentID> value : missingIds.values()) {
                    assertEquals(1, value.size());
                }
            } catch (AdminException e) {
                throw new RuntimeException(e);
            } finally {
                if (adminDS != null) {
                    adminDS.disconnect();
                }
            }
        }
    });
    SerializableRunnable compactVM = new SerializableRunnable("compact") {

        public void run() {
            Cache cache = getCache();
            DiskStore ds = cache.findDiskStore(REGION_NAME);
            assertFalse(ds.forceCompaction());
        }
    };
    vm0.invoke(compactVM);
    vm1.invoke(compactVM);
}
Also used : AdminException(org.apache.geode.admin.AdminException) Set(java.util.Set) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) Host(org.apache.geode.test.dunit.Host) AdminDistributedSystem(org.apache.geode.admin.AdminDistributedSystem) DiskStore(org.apache.geode.cache.DiskStore) DistributedSystemConfig(org.apache.geode.admin.DistributedSystemConfig) VM(org.apache.geode.test.dunit.VM) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) LocalRegion(org.apache.geode.internal.cache.LocalRegion) DistributedRegion(org.apache.geode.internal.cache.DistributedRegion) DiskRegion(org.apache.geode.internal.cache.DiskRegion) Region(org.apache.geode.cache.Region) Map(java.util.Map) HashMap(java.util.HashMap) PersistentID(org.apache.geode.cache.persistence.PersistentID) Cache(org.apache.geode.cache.Cache) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test)

Aggregations

PersistentID (org.apache.geode.cache.persistence.PersistentID)29 HashSet (java.util.HashSet)11 File (java.io.File)8 Set (java.util.Set)8 IOException (java.io.IOException)7 DiskStore (org.apache.geode.cache.DiskStore)7 InternalCache (org.apache.geode.internal.cache.InternalCache)6 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)6 Test (org.junit.Test)6 HashMap (java.util.HashMap)5 DistributedMember (org.apache.geode.distributed.DistributedMember)5 Map (java.util.Map)4 AdminException (org.apache.geode.admin.AdminException)4 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)4 LinkedList (java.util.LinkedList)3 TreeSet (java.util.TreeSet)3 RegexFileFilter (org.apache.commons.io.filefilter.RegexFileFilter)3 AdminDistributedSystem (org.apache.geode.admin.AdminDistributedSystem)3 BackupStatus (org.apache.geode.admin.BackupStatus)3 DistributedSystemConfig (org.apache.geode.admin.DistributedSystemConfig)3