Search in sources :

Example 31 with StoredTabletFile

use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.

the class BasicCompactionStrategy method calculateTotalSize.

/**
 * Calculates the total size of input files in the compaction plan
 */
private Long calculateTotalSize(MajorCompactionRequest request, CompactionPlan plan) {
    long totalSize = 0;
    Map<StoredTabletFile, DataFileValue> allFiles = request.getFiles();
    for (StoredTabletFile fileRef : plan.inputFiles) {
        totalSize += allFiles.get(fileRef).getSize();
    }
    return totalSize;
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile)

Example 32 with StoredTabletFile

use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.

the class VolumeIT method testReplaceVolume.

private void testReplaceVolume(AccumuloClient client, boolean cleanShutdown) throws Exception {
    String[] tableNames = getUniqueNames(3);
    verifyVolumesUsed(client, tableNames[0], false, v1, v2);
    // write to 2nd table, but do not flush data to disk before shutdown
    try (AccumuloClient c2 = cluster.createAccumuloClient("root", new PasswordToken(ROOT_PASSWORD))) {
        writeData(tableNames[1], c2);
    }
    if (cleanShutdown)
        assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());
    cluster.stop();
    File v1f = new File(v1.toUri());
    File v8f = new File(new File(v1.getParent().toUri()), "v8");
    assertTrue("Failed to rename " + v1f + " to " + v8f, v1f.renameTo(v8f));
    Path v8 = new Path(v8f.toURI());
    File v2f = new File(v2.toUri());
    File v9f = new File(new File(v2.getParent().toUri()), "v9");
    assertTrue("Failed to rename " + v2f + " to " + v9f, v2f.renameTo(v9f));
    Path v9 = new Path(v9f.toURI());
    updateConfig(config -> {
        config.setProperty(Property.INSTANCE_VOLUMES.getKey(), v8 + "," + v9);
        config.setProperty(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey(), v1 + " " + v8 + "," + v2 + " " + v9);
    });
    // start cluster and verify that volumes were replaced
    cluster.start();
    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);
    // verify writes to new dir
    client.tableOperations().compact(tableNames[0], null, null, true, true);
    client.tableOperations().compact(tableNames[1], null, null, true, true);
    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);
    client.tableOperations().compact(RootTable.NAME, new CompactionConfig().setWait(true));
    // check that root tablet is not on volume 1 or 2
    int count = 0;
    for (StoredTabletFile file : ((ClientContext) client).getAmple().readTablet(RootTable.EXTENT).getFiles()) {
        assertTrue(file.getMetaUpdateDelete().startsWith(v8.toString()) || file.getMetaUpdateDelete().startsWith(v9.toString()));
        count++;
    }
    assertTrue(count > 0);
    client.tableOperations().clone(tableNames[1], tableNames[2], true, new HashMap<>(), new HashSet<>());
    client.tableOperations().flush(MetadataTable.NAME, null, null, true);
    client.tableOperations().flush(RootTable.NAME, null, null, true);
    verifyVolumesUsed(client, tableNames[0], true, v8, v9);
    verifyVolumesUsed(client, tableNames[1], true, v8, v9);
    verifyVolumesUsed(client, tableNames[2], true, v8, v9);
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) File(java.io.File)

Example 33 with StoredTabletFile

use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.

the class SplitRecoveryIT method splitPartiallyAndRecover.

private void splitPartiallyAndRecover(ServerContext context, KeyExtent extent, KeyExtent high, KeyExtent low, double splitRatio, SortedMap<StoredTabletFile, DataFileValue> mapFiles, Text midRow, String location, int steps, ServiceLock zl) throws Exception {
    SortedMap<StoredTabletFile, DataFileValue> lowDatafileSizes = new TreeMap<>();
    SortedMap<StoredTabletFile, DataFileValue> highDatafileSizes = new TreeMap<>();
    List<StoredTabletFile> highDatafilesToRemove = new ArrayList<>();
    MetadataTableUtil.splitDatafiles(midRow, splitRatio, new HashMap<>(), mapFiles, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove);
    MetadataTableUtil.splitTablet(high, extent.prevEndRow(), splitRatio, context, zl, Set.of());
    TServerInstance instance = new TServerInstance(location, zl.getSessionId());
    Assignment assignment = new Assignment(high, instance);
    TabletMutator tabletMutator = context.getAmple().mutateTablet(extent);
    tabletMutator.putLocation(assignment.server, LocationType.FUTURE);
    tabletMutator.mutate();
    if (steps >= 1) {
        Map<Long, List<TabletFile>> bulkFiles = getBulkFilesLoaded(context, high);
        ManagerMetadataUtil.addNewTablet(context, low, "lowDir", instance, lowDatafileSizes, bulkFiles, new MetadataTime(0, TimeType.LOGICAL), -1L, -1L, zl);
    }
    if (steps >= 2) {
        MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, context, zl);
    }
    TabletMetadata meta = context.getAmple().readTablet(high);
    KeyExtent fixedExtent = ManagerMetadataUtil.fixSplit(context, meta, zl);
    if (steps < 2)
        assertEquals(splitRatio, meta.getSplitRatio(), 0.0);
    if (steps >= 1) {
        assertEquals(high, fixedExtent);
        ensureTabletHasNoUnexpectedMetadataEntries(context, low, lowDatafileSizes);
        ensureTabletHasNoUnexpectedMetadataEntries(context, high, highDatafileSizes);
        Map<Long, ? extends Collection<TabletFile>> lowBulkFiles = getBulkFilesLoaded(context, low);
        Map<Long, ? extends Collection<TabletFile>> highBulkFiles = getBulkFilesLoaded(context, high);
        if (!lowBulkFiles.equals(highBulkFiles)) {
            throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " " + high);
        }
        if (lowBulkFiles.isEmpty()) {
            throw new Exception(" no bulk files " + low);
        }
    } else {
        assertEquals(extent, fixedExtent);
        ensureTabletHasNoUnexpectedMetadataEntries(context, extent, mapFiles);
    }
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) ArrayList(java.util.ArrayList) TabletMutator(org.apache.accumulo.core.metadata.schema.Ample.TabletMutator) TreeMap(java.util.TreeMap) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TServerInstance(org.apache.accumulo.core.metadata.TServerInstance) Assignment(org.apache.accumulo.server.manager.state.Assignment) TabletMetadata(org.apache.accumulo.core.metadata.schema.TabletMetadata) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) List(java.util.List) ArrayList(java.util.ArrayList) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TabletFile(org.apache.accumulo.core.metadata.TabletFile) MetadataTime(org.apache.accumulo.core.metadata.schema.MetadataTime)

Example 34 with StoredTabletFile

use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.

the class SplitRecoveryIT method ensureTabletHasNoUnexpectedMetadataEntries.

private void ensureTabletHasNoUnexpectedMetadataEntries(ServerContext context, KeyExtent extent, SortedMap<StoredTabletFile, DataFileValue> expectedMapFiles) throws Exception {
    try (Scanner scanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
        scanner.setRange(extent.toMetaRange());
        HashSet<ColumnFQ> expectedColumns = new HashSet<>();
        expectedColumns.add(ServerColumnFamily.DIRECTORY_COLUMN);
        expectedColumns.add(TabletColumnFamily.PREV_ROW_COLUMN);
        expectedColumns.add(ServerColumnFamily.TIME_COLUMN);
        expectedColumns.add(ServerColumnFamily.LOCK_COLUMN);
        HashSet<Text> expectedColumnFamilies = new HashSet<>();
        expectedColumnFamilies.add(DataFileColumnFamily.NAME);
        expectedColumnFamilies.add(FutureLocationColumnFamily.NAME);
        expectedColumnFamilies.add(CurrentLocationColumnFamily.NAME);
        expectedColumnFamilies.add(LastLocationColumnFamily.NAME);
        expectedColumnFamilies.add(BulkFileColumnFamily.NAME);
        Iterator<Entry<Key, Value>> iter = scanner.iterator();
        boolean sawPer = false;
        while (iter.hasNext()) {
            Entry<Key, Value> entry = iter.next();
            Key key = entry.getKey();
            if (!key.getRow().equals(extent.toMetaRow())) {
                throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
            }
            if (TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
                sawPer = true;
                if (!KeyExtent.fromMetaPrevRow(entry).equals(extent)) {
                    throw new Exception("Unexpected prev end row " + entry);
                }
            }
            if (expectedColumnFamilies.contains(key.getColumnFamily())) {
                continue;
            }
            if (expectedColumns.remove(new ColumnFQ(key))) {
                continue;
            }
            throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
        }
        if (expectedColumns.size() > 1 || (expectedColumns.size() == 1)) {
            throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns);
        }
        assertTrue(sawPer);
        SortedMap<StoredTabletFile, DataFileValue> fixedMapFiles = MetadataTableUtil.getFileAndLogEntries(context, extent).getSecond();
        verifySame(expectedMapFiles, fixedMapFiles);
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) ColumnFQ(org.apache.accumulo.core.util.ColumnFQ) ScannerImpl(org.apache.accumulo.core.clientImpl.ScannerImpl) Entry(java.util.Map.Entry) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet)

Example 35 with StoredTabletFile

use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.

the class SplitRecoveryIT method verifySame.

private void verifySame(SortedMap<StoredTabletFile, DataFileValue> datafileSizes, SortedMap<StoredTabletFile, DataFileValue> fixedDatafileSizes) throws Exception {
    if (!datafileSizes.keySet().containsAll(fixedDatafileSizes.keySet()) || !fixedDatafileSizes.keySet().containsAll(datafileSizes.keySet())) {
        throw new Exception("Key sets not the same " + datafileSizes.keySet() + " !=  " + fixedDatafileSizes.keySet());
    }
    for (Entry<StoredTabletFile, DataFileValue> entry : datafileSizes.entrySet()) {
        DataFileValue dfv = entry.getValue();
        DataFileValue otherDfv = fixedDatafileSizes.get(entry.getKey());
        if (!dfv.equals(otherDfv)) {
            throw new Exception(entry.getKey() + " dfv not equal  " + dfv + "  " + otherDfv);
        }
    }
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile)

Aggregations

StoredTabletFile (org.apache.accumulo.core.metadata.StoredTabletFile)47 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)25 TabletFile (org.apache.accumulo.core.metadata.TabletFile)18 IOException (java.io.IOException)12 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)11 HashMap (java.util.HashMap)9 HashSet (java.util.HashSet)9 Key (org.apache.accumulo.core.data.Key)9 ArrayList (java.util.ArrayList)8 TreeMap (java.util.TreeMap)8 Value (org.apache.accumulo.core.data.Value)8 Path (org.apache.hadoop.fs.Path)7 Text (org.apache.hadoop.io.Text)7 Pair (org.apache.accumulo.core.util.Pair)6 MajorCompactionRequest (org.apache.accumulo.tserver.compaction.MajorCompactionRequest)6 Test (org.junit.Test)6 LogEntry (org.apache.accumulo.core.tabletserver.log.LogEntry)5 UncheckedIOException (java.io.UncheckedIOException)4 CompactionConfig (org.apache.accumulo.core.client.admin.CompactionConfig)4 TServerInstance (org.apache.accumulo.core.metadata.TServerInstance)4