Search in sources :

Example 11 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class DatafileManagerTest method testReserveMergingMinorCompactionFile_MaxExceeded.

/*
   * Test max file size (table.compaction.minor.merge.file.size.max) exceeded when calling reserveMergingMinorCompactionFile
   */
@Test
public void testReserveMergingMinorCompactionFile_MaxExceeded() throws IOException {
    String maxMergeFileSize = "1000B";
    EasyMock.expect(tablet.getTableConfiguration()).andReturn(tableConf);
    EasyMock.expect(tableConf.get(Property.TABLE_MINC_MAX_MERGE_FILE_SIZE)).andReturn(maxMergeFileSize);
    EasyMock.replay(tablet, tableConf);
    SortedMap<FileRef, DataFileValue> testFiles = createFileMap("largefile", "10M", "file2", "100M", "file3", "100M", "file4", "100M", "file5", "100M");
    DatafileManager dfm = new DatafileManager(tablet, testFiles);
    FileRef mergeFile = dfm.reserveMergingMinorCompactionFile();
    EasyMock.verify(tablet, tableConf);
    assertEquals(null, mergeFile);
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) Test(org.junit.Test)

Example 12 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class DatafileManagerTest method testReserveMergingMinorCompactionFile.

/*
   * Test the smallest file is chosen for merging minor compaction
   */
@Test
public void testReserveMergingMinorCompactionFile() throws IOException {
    String maxMergeFileSize = "1000B";
    EasyMock.expect(tablet.getTableConfiguration()).andReturn(tableConf);
    EasyMock.expect(tableConf.get(Property.TABLE_MINC_MAX_MERGE_FILE_SIZE)).andReturn(maxMergeFileSize);
    EasyMock.replay(tablet, tableConf);
    SortedMap<FileRef, DataFileValue> testFiles = createFileMap("smallfile", "100B", "file2", "100M", "file3", "100M", "file4", "100M", "file5", "100M");
    DatafileManager dfm = new DatafileManager(tablet, testFiles);
    FileRef mergeFile = dfm.reserveMergingMinorCompactionFile();
    EasyMock.verify(tablet, tableConf);
    assertEquals("smallfile", mergeFile.path().getName());
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) Test(org.junit.Test)

Example 13 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class TwoTierCompactionStrategyTest method testDefaultCompaction.

@Test
public void testDefaultCompaction() throws IOException {
    ttcs.init(opts);
    conf = DefaultConfiguration.getInstance();
    KeyExtent ke = new KeyExtent(Table.ID.of("0"), null, null);
    mcr = new MajorCompactionRequest(ke, MajorCompactionReason.NORMAL, conf);
    Map<FileRef, DataFileValue> fileMap = createFileMap("f1", "10M", "f2", "10M", "f3", "10M", "f4", "10M", "f5", "100M", "f6", "100M", "f7", "100M", "f8", "100M");
    mcr.setFiles(fileMap);
    Assert.assertTrue(ttcs.shouldCompact(mcr));
    Assert.assertEquals(8, mcr.getFiles().size());
    List<FileRef> filesToCompact = ttcs.getCompactionPlan(mcr).inputFiles;
    Assert.assertEquals(fileMap.keySet(), new HashSet<>(filesToCompact));
    Assert.assertEquals(8, filesToCompact.size());
    Assert.assertEquals(null, ttcs.getCompactionPlan(mcr).writeParameters.getCompressType());
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) Test(org.junit.Test)

Example 14 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class MasterMetadataUtil method replaceDatafiles.

public static void replaceDatafiles(ClientContext context, KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path, Long compactionId, DataFileValue size, String address, TServerInstance lastLocation, ZooLock zooLock, boolean insertDeleteFlags) throws IOException {
    if (insertDeleteFlags) {
        // add delete flags for those paths before the data file reference is removed
        MetadataTableUtil.addDeleteEntries(extent, datafilesToDelete, context);
    }
    // replace data file references to old mapfiles with the new mapfiles
    Mutation m = new Mutation(extent.getMetadataEntry());
    for (FileRef pathToRemove : datafilesToDelete) m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
    for (FileRef scanFile : scanFiles) m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value(new byte[0]));
    if (size.getNumEntries() > 0)
        m.put(DataFileColumnFamily.NAME, path.meta(), new Value(size.encode()));
    if (compactionId != null)
        TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value(("" + compactionId).getBytes()));
    TServerInstance self = getTServerInstance(address, zooLock);
    self.putLastLocation(m);
    // remove the old location
    if (lastLocation != null && !lastLocation.equals(self))
        lastLocation.clearLastLocation(m);
    MetadataTableUtil.update(context, zooLock, m, extent);
}
Also used : FileRef(org.apache.accumulo.server.fs.FileRef) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) Mutation(org.apache.accumulo.core.data.Mutation) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance)

Example 15 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class MasterMetadataUtil method fixSplit.

private static KeyExtent fixSplit(ClientContext context, Table.ID tableId, Text metadataEntry, Text metadataPrevEndRow, Value oper, double splitRatio, TServerInstance tserver, String time, long initFlushID, long initCompactID, ZooLock lock) throws AccumuloException, IOException {
    if (metadataPrevEndRow == null)
        // prev end row....
        throw new AccumuloException("Split tablet does not have prev end row, something is amiss, extent = " + metadataEntry);
    // check to see if prev tablet exist in metadata tablet
    Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(tableId, metadataPrevEndRow)));
    try (ScannerImpl scanner2 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
        scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
        VolumeManager fs = VolumeManagerImpl.get();
        if (!scanner2.iterator().hasNext()) {
            log.info("Rolling back incomplete split {} {}", metadataEntry, metadataPrevEndRow);
            MetadataTableUtil.rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), context, lock);
            return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
        } else {
            log.info("Finishing incomplete split {} {}", metadataEntry, metadataPrevEndRow);
            List<FileRef> highDatafilesToRemove = new ArrayList<>();
            SortedMap<FileRef, DataFileValue> origDatafileSizes = new TreeMap<>();
            SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<>();
            SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<>();
            try (Scanner scanner3 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
                Key rowKey = new Key(metadataEntry);
                scanner3.fetchColumnFamily(DataFileColumnFamily.NAME);
                scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
                for (Entry<Key, Value> entry : scanner3) {
                    if (entry.getKey().compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
                        origDatafileSizes.put(new FileRef(fs, entry.getKey()), new DataFileValue(entry.getValue().get()));
                    }
                }
            }
            MetadataTableUtil.splitDatafiles(metadataPrevEndRow, splitRatio, new HashMap<>(), origDatafileSizes, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove);
            MetadataTableUtil.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, context, lock);
            return new KeyExtent(metadataEntry, KeyExtent.encodePrevEndRow(metadataPrevEndRow));
        }
    }
}
Also used : AccumuloException(org.apache.accumulo.core.client.AccumuloException) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Scanner(org.apache.accumulo.core.client.Scanner) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) TreeMap(java.util.TreeMap) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) FileRef(org.apache.accumulo.server.fs.FileRef) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Aggregations

DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)44 FileRef (org.apache.accumulo.server.fs.FileRef)32 Value (org.apache.accumulo.core.data.Value)18 Text (org.apache.hadoop.io.Text)14 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)13 Mutation (org.apache.accumulo.core.data.Mutation)12 Test (org.junit.Test)12 ArrayList (java.util.ArrayList)9 Key (org.apache.accumulo.core.data.Key)9 TreeMap (java.util.TreeMap)8 HashMap (java.util.HashMap)7 HashSet (java.util.HashSet)6 Scanner (org.apache.accumulo.core.client.Scanner)6 IOException (java.io.IOException)5 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)5 ScannerImpl (org.apache.accumulo.core.client.impl.ScannerImpl)4 Pair (org.apache.accumulo.core.util.Pair)4 TServerInstance (org.apache.accumulo.server.master.state.TServerInstance)4 Entry (java.util.Map.Entry)3 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)3