Search in sources :

Example 46 with StoredTabletFile

use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.

the class ManagerMetadataUtil method updateTabletDataFile.

/**
 * Update tablet file data from flush. Returns a StoredTabletFile if there are data entries.
 */
public static Optional<StoredTabletFile> updateTabletDataFile(ServerContext context, KeyExtent extent, TabletFile newDatafile, DataFileValue dfv, MetadataTime time, String address, ServiceLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) {
    TabletMutator tablet = context.getAmple().mutateTablet(extent);
    // if there are no entries, the path doesn't get stored in metadata table, only the flush ID
    Optional<StoredTabletFile> newFile = Optional.empty();
    // if entries are present, write to path to metadata table
    if (dfv.getNumEntries() > 0) {
        tablet.putFile(newDatafile, dfv);
        tablet.putTime(time);
        newFile = Optional.of(newDatafile.insert());
        TServerInstance self = getTServerInstance(address, zooLock);
        tablet.putLocation(self, LocationType.LAST);
        // remove the old location
        if (lastLocation != null && !lastLocation.equals(self)) {
            tablet.deleteLocation(lastLocation, LocationType.LAST);
        }
    }
    tablet.putFlushId(flushId);
    unusedWalLogs.forEach(tablet::deleteWal);
    tablet.putZooLock(zooLock);
    tablet.mutate();
    return newFile;
}
Also used : TabletMutator(org.apache.accumulo.core.metadata.schema.Ample.TabletMutator) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TServerInstance(org.apache.accumulo.core.metadata.TServerInstance)

Example 47 with StoredTabletFile

use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.

the class TabletMetadataTest method testAllColumns.

@Test
public void testAllColumns() {
    KeyExtent extent = new KeyExtent(TableId.of("5"), new Text("df"), new Text("da"));
    Mutation mutation = TabletColumnFamily.createPrevRowMutation(extent);
    COMPACT_COLUMN.put(mutation, new Value("5"));
    DIRECTORY_COLUMN.put(mutation, new Value("t-0001757"));
    FLUSH_COLUMN.put(mutation, new Value("6"));
    TIME_COLUMN.put(mutation, new Value("M123456789"));
    String bf1 = "hdfs://nn1/acc/tables/1/t-0001/bf1";
    String bf2 = "hdfs://nn1/acc/tables/1/t-0001/bf2";
    mutation.at().family(BulkFileColumnFamily.NAME).qualifier(bf1).put(FateTxId.formatTid(56));
    mutation.at().family(BulkFileColumnFamily.NAME).qualifier(bf2).put(FateTxId.formatTid(59));
    mutation.at().family(ClonedColumnFamily.NAME).qualifier("").put("OK");
    DataFileValue dfv1 = new DataFileValue(555, 23);
    StoredTabletFile tf1 = new StoredTabletFile("hdfs://nn1/acc/tables/1/t-0001/df1.rf");
    StoredTabletFile tf2 = new StoredTabletFile("hdfs://nn1/acc/tables/1/t-0001/df2.rf");
    mutation.at().family(DataFileColumnFamily.NAME).qualifier(tf1.getMetaUpdateDelete()).put(dfv1.encode());
    DataFileValue dfv2 = new DataFileValue(234, 13);
    mutation.at().family(DataFileColumnFamily.NAME).qualifier(tf2.getMetaUpdateDelete()).put(dfv2.encode());
    mutation.at().family(CurrentLocationColumnFamily.NAME).qualifier("s001").put("server1:8555");
    mutation.at().family(LastLocationColumnFamily.NAME).qualifier("s000").put("server2:8555");
    LogEntry le1 = new LogEntry(extent, 55, "lf1");
    mutation.at().family(le1.getColumnFamily()).qualifier(le1.getColumnQualifier()).timestamp(le1.timestamp).put(le1.getValue());
    LogEntry le2 = new LogEntry(extent, 57, "lf2");
    mutation.at().family(le2.getColumnFamily()).qualifier(le2.getColumnQualifier()).timestamp(le2.timestamp).put(le2.getValue());
    StoredTabletFile sf1 = new StoredTabletFile("hdfs://nn1/acc/tables/1/t-0001/sf1.rf");
    StoredTabletFile sf2 = new StoredTabletFile("hdfs://nn1/acc/tables/1/t-0001/sf2.rf");
    mutation.at().family(ScanFileColumnFamily.NAME).qualifier(sf1.getMetaUpdateDelete()).put("");
    mutation.at().family(ScanFileColumnFamily.NAME).qualifier(sf2.getMetaUpdateDelete()).put("");
    SortedMap<Key, Value> rowMap = toRowMap(mutation);
    TabletMetadata tm = TabletMetadata.convertRow(rowMap.entrySet().iterator(), EnumSet.allOf(ColumnType.class), true);
    assertEquals("OK", tm.getCloned());
    assertEquals(5L, tm.getCompactId().getAsLong());
    assertEquals("t-0001757", tm.getDirName());
    assertEquals(extent.endRow(), tm.getEndRow());
    assertEquals(extent, tm.getExtent());
    assertEquals(Set.of(tf1, tf2), Set.copyOf(tm.getFiles()));
    assertEquals(Map.of(tf1, dfv1, tf2, dfv2), tm.getFilesMap());
    assertEquals(6L, tm.getFlushId().getAsLong());
    assertEquals(rowMap, tm.getKeyValues());
    assertEquals(Map.of(new StoredTabletFile(bf1), 56L, new StoredTabletFile(bf2), 59L), tm.getLoaded());
    assertEquals(HostAndPort.fromParts("server1", 8555), tm.getLocation().getHostAndPort());
    assertEquals("s001", tm.getLocation().getSession());
    assertEquals(LocationType.CURRENT, tm.getLocation().getType());
    assertTrue(tm.hasCurrent());
    assertEquals(HostAndPort.fromParts("server2", 8555), tm.getLast().getHostAndPort());
    assertEquals("s000", tm.getLast().getSession());
    assertEquals(LocationType.LAST, tm.getLast().getType());
    assertEquals(Set.of(le1.getValue() + " " + le1.timestamp, le2.getValue() + " " + le2.timestamp), tm.getLogs().stream().map(le -> le.getValue() + " " + le.timestamp).collect(toSet()));
    assertEquals(extent.prevEndRow(), tm.getPrevEndRow());
    assertEquals(extent.tableId(), tm.getTableId());
    assertTrue(tm.sawPrevEndRow());
    assertEquals("M123456789", tm.getTime().encode());
    assertEquals(Set.of(sf1, sf2), Set.copyOf(tm.getScans()));
}
Also used : ColumnType(org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType) Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) Mutation(org.apache.accumulo.core.data.Mutation) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Key(org.apache.accumulo.core.data.Key) Test(org.junit.jupiter.api.Test)

Aggregations

StoredTabletFile (org.apache.accumulo.core.metadata.StoredTabletFile)47 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)25 TabletFile (org.apache.accumulo.core.metadata.TabletFile)18 IOException (java.io.IOException)12 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)11 HashMap (java.util.HashMap)9 HashSet (java.util.HashSet)9 Key (org.apache.accumulo.core.data.Key)9 ArrayList (java.util.ArrayList)8 TreeMap (java.util.TreeMap)8 Value (org.apache.accumulo.core.data.Value)8 Path (org.apache.hadoop.fs.Path)7 Text (org.apache.hadoop.io.Text)7 Pair (org.apache.accumulo.core.util.Pair)6 MajorCompactionRequest (org.apache.accumulo.tserver.compaction.MajorCompactionRequest)6 Test (org.junit.Test)6 LogEntry (org.apache.accumulo.core.tabletserver.log.LogEntry)5 UncheckedIOException (java.io.UncheckedIOException)4 CompactionConfig (org.apache.accumulo.core.client.admin.CompactionConfig)4 TServerInstance (org.apache.accumulo.core.metadata.TServerInstance)4