Search in sources :

Example 31 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class ConfigurableCompactionStrategyTest method testOutputOptions.

// file selection options are adequately tested by ShellServerIT
@Test
public void testOutputOptions() throws Exception {
    MajorCompactionRequest mcr = new MajorCompactionRequest(new KeyExtent(Table.ID.of("1"), null, null), MajorCompactionReason.USER, null);
    Map<FileRef, DataFileValue> files = new HashMap<>();
    files.put(new FileRef("hdfs://nn1/accumulo/tables/1/t-009/F00001.rf"), new DataFileValue(50000, 400));
    mcr.setFiles(files);
    // test setting no output options
    ConfigurableCompactionStrategy ccs = new ConfigurableCompactionStrategy();
    Map<String, String> opts = new HashMap<>();
    ccs.init(opts);
    CompactionPlan plan = ccs.getCompactionPlan(mcr);
    Assert.assertEquals(0, plan.writeParameters.getBlockSize());
    Assert.assertEquals(0, plan.writeParameters.getHdfsBlockSize());
    Assert.assertEquals(0, plan.writeParameters.getIndexBlockSize());
    Assert.assertEquals(0, plan.writeParameters.getReplication());
    Assert.assertEquals(null, plan.writeParameters.getCompressType());
    // test setting all output options
    ccs = new ConfigurableCompactionStrategy();
    CompactionSettings.OUTPUT_BLOCK_SIZE_OPT.put(opts, "64K");
    CompactionSettings.OUTPUT_COMPRESSION_OPT.put(opts, "snappy");
    CompactionSettings.OUTPUT_HDFS_BLOCK_SIZE_OPT.put(opts, "256M");
    CompactionSettings.OUTPUT_INDEX_BLOCK_SIZE_OPT.put(opts, "32K");
    CompactionSettings.OUTPUT_REPLICATION_OPT.put(opts, "5");
    ccs.init(opts);
    plan = ccs.getCompactionPlan(mcr);
    Assert.assertEquals(ConfigurationTypeHelper.getFixedMemoryAsBytes("64K"), plan.writeParameters.getBlockSize());
    Assert.assertEquals(ConfigurationTypeHelper.getFixedMemoryAsBytes("256M"), plan.writeParameters.getHdfsBlockSize());
    Assert.assertEquals(ConfigurationTypeHelper.getFixedMemoryAsBytes("32K"), plan.writeParameters.getIndexBlockSize());
    Assert.assertEquals(5, plan.writeParameters.getReplication());
    Assert.assertEquals("snappy", plan.writeParameters.getCompressType());
}
Also used : CompactionPlan(org.apache.accumulo.tserver.compaction.CompactionPlan) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) HashMap(java.util.HashMap) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) MajorCompactionRequest(org.apache.accumulo.tserver.compaction.MajorCompactionRequest) Test(org.junit.Test)

Example 32 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class Initialize method createEntriesForTablet.

private static void createEntriesForTablet(TreeMap<Key, Value> map, Tablet tablet) {
    Value EMPTY_SIZE = new DataFileValue(0, 0).encodeAsValue();
    Text extent = new Text(KeyExtent.getMetadataEntry(tablet.tableId, tablet.endRow));
    addEntry(map, extent, DIRECTORY_COLUMN, new Value(tablet.dir.getBytes(UTF_8)));
    addEntry(map, extent, TIME_COLUMN, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(UTF_8)));
    addEntry(map, extent, PREV_ROW_COLUMN, KeyExtent.encodePrevEndRow(tablet.prevEndRow));
    for (String file : tablet.files) {
        addEntry(map, extent, new ColumnFQ(DataFileColumnFamily.NAME, new Text(file)), EMPTY_SIZE);
    }
}
Also used : ColumnFQ(org.apache.accumulo.core.util.ColumnFQ) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text)

Example 33 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class MasterMetadataUtil method addNewTablet.

public static void addNewTablet(ClientContext context, KeyExtent extent, String path, TServerInstance location, Map<FileRef, DataFileValue> datafileSizes, Map<Long, ? extends Collection<FileRef>> bulkLoadedFiles, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) {
    Mutation m = extent.getPrevRowUpdateMutation();
    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(path.getBytes(UTF_8)));
    TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(UTF_8)));
    if (lastFlushID > 0)
        TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value(("" + lastFlushID).getBytes()));
    if (lastCompactID > 0)
        TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value(("" + lastCompactID).getBytes()));
    if (location != null) {
        location.putLocation(m);
        location.clearFutureLocation(m);
    }
    for (Entry<FileRef, DataFileValue> entry : datafileSizes.entrySet()) {
        m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
    }
    for (Entry<Long, ? extends Collection<FileRef>> entry : bulkLoadedFiles.entrySet()) {
        Value tidBytes = new Value(Long.toString(entry.getKey()).getBytes());
        for (FileRef ref : entry.getValue()) {
            m.put(TabletsSection.BulkFileColumnFamily.NAME, ref.meta(), new Value(tidBytes));
        }
    }
    MetadataTableUtil.update(context, zooLock, m, extent);
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) Mutation(org.apache.accumulo.core.data.Mutation)

Example 34 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class MasterMetadataUtil method getUpdateForTabletDataFile.

/**
 * Create an update that updates a tablet
 *
 * @return A Mutation to update a tablet from the given information
 */
private static Mutation getUpdateForTabletDataFile(KeyExtent extent, FileRef path, FileRef mergeFile, DataFileValue dfv, String time, Set<FileRef> filesInUseByScans, String address, ZooLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) {
    Mutation m = new Mutation(extent.getMetadataEntry());
    if (dfv.getNumEntries() > 0) {
        m.put(DataFileColumnFamily.NAME, path.meta(), new Value(dfv.encode()));
        TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(UTF_8)));
        // stuff in this location
        TServerInstance self = getTServerInstance(address, zooLock);
        self.putLastLocation(m);
        // erase the old location
        if (lastLocation != null && !lastLocation.equals(self))
            lastLocation.clearLastLocation(m);
    }
    if (unusedWalLogs != null) {
        for (String entry : unusedWalLogs) {
            m.putDelete(LogColumnFamily.NAME, new Text(entry));
        }
    }
    for (FileRef scanFile : filesInUseByScans) m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value(new byte[0]));
    if (mergeFile != null)
        m.putDelete(DataFileColumnFamily.NAME, mergeFile.meta());
    TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value(Long.toString(flushId).getBytes(UTF_8)));
    return m;
}
Also used : FileRef(org.apache.accumulo.server.fs.FileRef) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) Mutation(org.apache.accumulo.core.data.Mutation) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance)

Example 35 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class MetadataTableUtil method finishSplit.

public static void finishSplit(Text metadataEntry, Map<FileRef, DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, final ClientContext context, ZooLock zooLock) {
    Mutation m = new Mutation(metadataEntry);
    TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m);
    TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m);
    ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
    for (Entry<FileRef, DataFileValue> entry : datafileSizes.entrySet()) {
        m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
    }
    for (FileRef pathToRemove : highDatafilesToRemove) {
        m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
    }
    update(context, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) Mutation(org.apache.accumulo.core.data.Mutation) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent)

Aggregations

DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)44 FileRef (org.apache.accumulo.server.fs.FileRef)32 Value (org.apache.accumulo.core.data.Value)18 Text (org.apache.hadoop.io.Text)14 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)13 Mutation (org.apache.accumulo.core.data.Mutation)12 Test (org.junit.Test)12 ArrayList (java.util.ArrayList)9 Key (org.apache.accumulo.core.data.Key)9 TreeMap (java.util.TreeMap)8 HashMap (java.util.HashMap)7 HashSet (java.util.HashSet)6 Scanner (org.apache.accumulo.core.client.Scanner)6 IOException (java.io.IOException)5 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)5 ScannerImpl (org.apache.accumulo.core.client.impl.ScannerImpl)4 Pair (org.apache.accumulo.core.util.Pair)4 TServerInstance (org.apache.accumulo.server.master.state.TServerInstance)4 Entry (java.util.Map.Entry)3 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)3