Search in sources :

Example 16 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class DatafileManagerTest method testReserveMergingMinorCompactionFile.

/*
   * Test the smallest file is chosen for merging minor compaction
   */
@Test
public void testReserveMergingMinorCompactionFile() throws IOException {
    String maxMergeFileSize = "1000B";
    EasyMock.expect(tablet.getTableConfiguration()).andReturn(tableConf);
    EasyMock.expect(tableConf.get(Property.TABLE_MINC_MAX_MERGE_FILE_SIZE)).andReturn(maxMergeFileSize);
    EasyMock.replay(tablet, tableConf);
    SortedMap<FileRef, DataFileValue> testFiles = createFileMap("smallfile", "100B", "file2", "100M", "file3", "100M", "file4", "100M", "file5", "100M");
    DatafileManager dfm = new DatafileManager(tablet, testFiles);
    FileRef mergeFile = dfm.reserveMergingMinorCompactionFile();
    EasyMock.verify(tablet, tableConf);
    assertEquals("smallfile", mergeFile.path().getName());
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) Test(org.junit.Test)

Example 17 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class CompactionPlanTest method testDeleteNotInAllFiles.

@Test
public void testDeleteNotInAllFiles() {
    CompactionPlan cp1 = new CompactionPlan();
    FileRef fr1 = new FileRef("hdfs://nn1/accumulo/tables/1/t-1/1.rf");
    FileRef fr2 = new FileRef("hdfs://nn1/accumulo/tables/1/t-1/2.rf");
    FileRef fr3 = new FileRef("hdfs://nn1/accumulo/tables/1/t-2/3.rf");
    cp1.deleteFiles.add(fr1);
    cp1.deleteFiles.add(fr2);
    cp1.deleteFiles.add(fr3);
    Set<FileRef> allFiles = ImmutableSet.of(fr1, fr2);
    exception.expect(IllegalStateException.class);
    cp1.validate(allFiles);
}
Also used : FileRef(org.apache.accumulo.server.fs.FileRef) Test(org.junit.Test)

Example 18 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class CompactionPlanTest method testInputNotInAllFiles.

@Test
public void testInputNotInAllFiles() {
    CompactionPlan cp1 = new CompactionPlan();
    FileRef fr1 = new FileRef("hdfs://nn1/accumulo/tables/1/t-1/1.rf");
    FileRef fr2 = new FileRef("hdfs://nn1/accumulo/tables/1/t-1/2.rf");
    FileRef fr3 = new FileRef("hdfs://nn1/accumulo/tables/1/t-2/3.rf");
    cp1.inputFiles.add(fr1);
    cp1.inputFiles.add(fr2);
    cp1.inputFiles.add(fr3);
    Set<FileRef> allFiles = ImmutableSet.of(fr1, fr2);
    exception.expect(IllegalStateException.class);
    cp1.validate(allFiles);
}
Also used : FileRef(org.apache.accumulo.server.fs.FileRef) Test(org.junit.Test)

Example 19 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class TwoTierCompactionStrategyTest method testDefaultCompaction.

@Test
public void testDefaultCompaction() throws IOException {
    ttcs.init(opts);
    conf = DefaultConfiguration.getInstance();
    KeyExtent ke = new KeyExtent(Table.ID.of("0"), null, null);
    mcr = new MajorCompactionRequest(ke, MajorCompactionReason.NORMAL, conf);
    Map<FileRef, DataFileValue> fileMap = createFileMap("f1", "10M", "f2", "10M", "f3", "10M", "f4", "10M", "f5", "100M", "f6", "100M", "f7", "100M", "f8", "100M");
    mcr.setFiles(fileMap);
    Assert.assertTrue(ttcs.shouldCompact(mcr));
    Assert.assertEquals(8, mcr.getFiles().size());
    List<FileRef> filesToCompact = ttcs.getCompactionPlan(mcr).inputFiles;
    Assert.assertEquals(fileMap.keySet(), new HashSet<>(filesToCompact));
    Assert.assertEquals(8, filesToCompact.size());
    Assert.assertEquals(null, ttcs.getCompactionPlan(mcr).writeParameters.getCompressType());
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) Test(org.junit.Test)

Example 20 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class TabletGroupWatcher method deleteTablets.

private void deleteTablets(MergeInfo info) throws AccumuloException {
    KeyExtent extent = info.getExtent();
    String targetSystemTable = extent.isMeta() ? RootTable.NAME : MetadataTable.NAME;
    Master.log.debug("Deleting tablets for {}", extent);
    char timeType = '\0';
    KeyExtent followingTablet = null;
    if (extent.getEndRow() != null) {
        Key nextExtent = new Key(extent.getEndRow()).followingKey(PartialKey.ROW);
        followingTablet = getHighTablet(new KeyExtent(extent.getTableId(), nextExtent.getRow(), extent.getEndRow()));
        Master.log.debug("Found following tablet {}", followingTablet);
    }
    try {
        Connector conn = this.master.getConnector();
        Text start = extent.getPrevEndRow();
        if (start == null) {
            start = new Text();
        }
        Master.log.debug("Making file deletion entries for {}", extent);
        Range deleteRange = new Range(KeyExtent.getMetadataEntry(extent.getTableId(), start), false, KeyExtent.getMetadataEntry(extent.getTableId(), extent.getEndRow()), true);
        Scanner scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
        scanner.setRange(deleteRange);
        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
        TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(scanner);
        scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
        scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
        Set<FileRef> datafiles = new TreeSet<>();
        for (Entry<Key, Value> entry : scanner) {
            Key key = entry.getKey();
            if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
                datafiles.add(new FileRef(this.master.fs, key));
                if (datafiles.size() > 1000) {
                    MetadataTableUtil.addDeleteEntries(extent, datafiles, master);
                    datafiles.clear();
                }
            } else if (TabletsSection.ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
                timeType = entry.getValue().toString().charAt(0);
            } else if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
                throw new IllegalStateException("Tablet " + key.getRow() + " is assigned during a merge!");
            } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
                // ACCUMULO-2974 Need to include the TableID when converting a relative path to an absolute path.
                // The value has the leading path separator already included so it doesn't need it included.
                String path = entry.getValue().toString();
                if (path.contains(":")) {
                    datafiles.add(new FileRef(path));
                } else {
                    datafiles.add(new FileRef(path, this.master.fs.getFullPath(FileType.TABLE, Path.SEPARATOR + extent.getTableId() + path)));
                }
                if (datafiles.size() > 1000) {
                    MetadataTableUtil.addDeleteEntries(extent, datafiles, master);
                    datafiles.clear();
                }
            }
        }
        MetadataTableUtil.addDeleteEntries(extent, datafiles, master);
        BatchWriter bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
        try {
            deleteTablets(info, deleteRange, bw, conn);
        } finally {
            bw.close();
        }
        if (followingTablet != null) {
            Master.log.debug("Updating prevRow of {} to {}", followingTablet, extent.getPrevEndRow());
            bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
            try {
                Mutation m = new Mutation(followingTablet.getMetadataEntry());
                TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(extent.getPrevEndRow()));
                ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
                bw.addMutation(m);
                bw.flush();
            } finally {
                bw.close();
            }
        } else {
            // Recreate the default tablet to hold the end of the table
            Master.log.debug("Recreating the last tablet to point to {}", extent.getPrevEndRow());
            VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(extent.getTableId());
            String tdir = master.getFileSystem().choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + extent.getTableId() + Constants.DEFAULT_TABLET_LOCATION;
            MetadataTableUtil.addTablet(new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), tdir, master, timeType, this.master.masterLock);
        }
    } catch (RuntimeException | IOException | TableNotFoundException | AccumuloSecurityException ex) {
        throw new AccumuloException(ex);
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) FileRef(org.apache.accumulo.server.fs.FileRef) VolumeChooserEnvironment(org.apache.accumulo.server.fs.VolumeChooserEnvironment) TreeSet(java.util.TreeSet) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Aggregations

FileRef (org.apache.accumulo.server.fs.FileRef)62 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)36 Value (org.apache.accumulo.core.data.Value)17 Key (org.apache.accumulo.core.data.Key)16 ArrayList (java.util.ArrayList)15 HashMap (java.util.HashMap)13 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)13 IOException (java.io.IOException)12 Test (org.junit.Test)12 Text (org.apache.hadoop.io.Text)11 Mutation (org.apache.accumulo.core.data.Mutation)10 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)10 Scanner (org.apache.accumulo.core.client.Scanner)9 PartialKey (org.apache.accumulo.core.data.PartialKey)9 TreeMap (java.util.TreeMap)8 FileSystem (org.apache.hadoop.fs.FileSystem)8 Path (org.apache.hadoop.fs.Path)8 HashSet (java.util.HashSet)7 IsolatedScanner (org.apache.accumulo.core.client.IsolatedScanner)6 ScannerImpl (org.apache.accumulo.core.client.impl.ScannerImpl)6