Search in sources :

Example 36 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class MetadataTableUtil method updateTabletVolumes.

public static void updateTabletVolumes(KeyExtent extent, List<LogEntry> logsToRemove, List<LogEntry> logsToAdd, List<FileRef> filesToRemove, SortedMap<FileRef, DataFileValue> filesToAdd, String newDir, ZooLock zooLock, AccumuloServerContext context) {
    if (extent.isRootTablet()) {
        if (newDir != null)
            throw new IllegalArgumentException("newDir not expected for " + extent);
        if (filesToRemove.size() != 0 || filesToAdd.size() != 0)
            throw new IllegalArgumentException("files not expected for " + extent);
        // add before removing in case of process death
        for (LogEntry logEntry : logsToAdd) addRootLogEntry(context, zooLock, logEntry);
        removeUnusedWALEntries(context, extent, logsToRemove, zooLock);
    } else {
        Mutation m = new Mutation(extent.getMetadataEntry());
        for (LogEntry logEntry : logsToRemove) m.putDelete(logEntry.getColumnFamily(), logEntry.getColumnQualifier());
        for (LogEntry logEntry : logsToAdd) m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
        for (FileRef fileRef : filesToRemove) m.putDelete(DataFileColumnFamily.NAME, fileRef.meta());
        for (Entry<FileRef, DataFileValue> entry : filesToAdd.entrySet()) m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
        if (newDir != null)
            ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(newDir.getBytes(UTF_8)));
        update(context, m, extent);
    }
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Mutation(org.apache.accumulo.core.data.Mutation) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry)

Example 37 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class MetadataTableUtil method getFileAndLogEntries.

public static Pair<List<LogEntry>, SortedMap<FileRef, DataFileValue>> getFileAndLogEntries(ClientContext context, KeyExtent extent) throws KeeperException, InterruptedException, IOException {
    ArrayList<LogEntry> result = new ArrayList<>();
    TreeMap<FileRef, DataFileValue> sizes = new TreeMap<>();
    VolumeManager fs = VolumeManagerImpl.get();
    if (extent.isRootTablet()) {
        getRootLogEntries(result);
        Path rootDir = new Path(getRootTabletDir());
        FileStatus[] files = fs.listStatus(rootDir);
        for (FileStatus fileStatus : files) {
            if (fileStatus.getPath().toString().endsWith("_tmp")) {
                continue;
            }
            DataFileValue dfv = new DataFileValue(0, 0);
            sizes.put(new FileRef(fileStatus.getPath().toString(), fileStatus.getPath()), dfv);
        }
    } else {
        Table.ID systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
        try (Scanner scanner = new ScannerImpl(context, systemTableToCheck, Authorizations.EMPTY)) {
            scanner.fetchColumnFamily(LogColumnFamily.NAME);
            scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
            scanner.setRange(extent.toMetadataRange());
            for (Entry<Key, Value> entry : scanner) {
                if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
                    throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
                }
                if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
                    result.add(LogEntry.fromKeyValue(entry.getKey(), entry.getValue()));
                } else if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
                    DataFileValue dfv = new DataFileValue(entry.getValue().get());
                    sizes.put(new FileRef(fs, entry.getKey()), dfv);
                } else {
                    throw new RuntimeException("Unexpected col fam " + entry.getKey().getColumnFamily());
                }
            }
        }
    }
    return new Pair<>(result, sizes);
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileStatus(org.apache.hadoop.fs.FileStatus) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) FileRef(org.apache.accumulo.server.fs.FileRef) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey) Pair(org.apache.accumulo.core.util.Pair)

Example 38 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class MetadataConstraintsTest method testBulkFileCheck.

@Test
public void testBulkFileCheck() {
    MetadataConstraints mc = new TestMetadataConstraints();
    Mutation m;
    List<Short> violations;
    // inactive txid
    m = new Mutation(new Text("0;foo"));
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new DataFileValue(1, 1).encodeAsValue());
    violations = mc.check(null, m);
    assertNotNull(violations);
    assertEquals(1, violations.size());
    assertEquals(Short.valueOf((short) 8), violations.get(0));
    // txid that throws exception
    m = new Mutation(new Text("0;foo"));
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("9".getBytes()));
    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new DataFileValue(1, 1).encodeAsValue());
    violations = mc.check(null, m);
    assertNotNull(violations);
    assertEquals(1, violations.size());
    assertEquals(Short.valueOf((short) 8), violations.get(0));
    // active txid w/ file
    m = new Mutation(new Text("0;foo"));
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new DataFileValue(1, 1).encodeAsValue());
    violations = mc.check(null, m);
    assertNull(violations);
    // active txid w/o file
    m = new Mutation(new Text("0;foo"));
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
    violations = mc.check(null, m);
    assertNotNull(violations);
    assertEquals(1, violations.size());
    assertEquals(Short.valueOf((short) 8), violations.get(0));
    // two active txids w/ files
    m = new Mutation(new Text("0;foo"));
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new DataFileValue(1, 1).encodeAsValue());
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("7".getBytes()));
    m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new DataFileValue(1, 1).encodeAsValue());
    violations = mc.check(null, m);
    assertNotNull(violations);
    assertEquals(1, violations.size());
    assertEquals(Short.valueOf((short) 8), violations.get(0));
    // two files w/ one active txid
    m = new Mutation(new Text("0;foo"));
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new DataFileValue(1, 1).encodeAsValue());
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("5".getBytes()));
    m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new DataFileValue(1, 1).encodeAsValue());
    violations = mc.check(null, m);
    assertNull(violations);
    // two loaded w/ one active txid and one file
    m = new Mutation(new Text("0;foo"));
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new DataFileValue(1, 1).encodeAsValue());
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("5".getBytes()));
    violations = mc.check(null, m);
    assertNotNull(violations);
    assertEquals(1, violations.size());
    assertEquals(Short.valueOf((short) 8), violations.get(0));
    // active txid, mutation that looks like split
    m = new Mutation(new Text("0;foo"));
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
    violations = mc.check(null, m);
    assertNull(violations);
    // inactive txid, mutation that looks like split
    m = new Mutation(new Text("0;foo"));
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
    violations = mc.check(null, m);
    assertNull(violations);
    // active txid, mutation that looks like a load
    m = new Mutation(new Text("0;foo"));
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
    m.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
    violations = mc.check(null, m);
    assertNull(violations);
    // inactive txid, mutation that looks like a load
    m = new Mutation(new Text("0;foo"));
    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
    m.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
    violations = mc.check(null, m);
    assertNull(violations);
    // deleting a load flag
    m = new Mutation(new Text("0;foo"));
    m.putDelete(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"));
    violations = mc.check(null, m);
    assertNull(violations);
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Example 39 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class MetadataBulkLoadFilterTest method testBasic.

@Test
public void testBasic() throws IOException {
    TreeMap<Key, Value> tm1 = new TreeMap<>();
    TreeMap<Key, Value> expected = new TreeMap<>();
    // following should not be deleted by filter
    put(tm1, "2;m", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t1");
    put(tm1, "2;m", DataFileColumnFamily.NAME, "/t1/file1", new DataFileValue(1, 1).encodeAsString());
    put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file1", "5");
    put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file3", "7");
    put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file4", "9");
    put(tm1, "2<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t2");
    put(tm1, "2<", DataFileColumnFamily.NAME, "/t2/file2", new DataFileValue(1, 1).encodeAsString());
    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file6", "5");
    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file7", "7");
    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file8", "9");
    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/fileC", null);
    expected.putAll(tm1);
    // the following should be deleted by filter
    put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file5", "8");
    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file9", "8");
    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/fileA", "2");
    TestMetadataBulkLoadFilter iter = new TestMetadataBulkLoadFilter();
    iter.init(new SortedMapIterator(tm1), new HashMap<>(), new BaseIteratorEnvironment() {

        @Override
        public boolean isFullMajorCompaction() {
            return false;
        }

        @Override
        public IteratorScope getIteratorScope() {
            return IteratorScope.majc;
        }
    });
    iter.seek(new Range(), new ArrayList<>(), false);
    TreeMap<Key, Value> actual = new TreeMap<>();
    while (iter.hasTop()) {
        actual.put(iter.getTopKey(), iter.getTopValue());
        iter.next();
    }
    Assert.assertEquals(expected, actual);
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) TreeMap(java.util.TreeMap) SortedMapIterator(org.apache.accumulo.core.iterators.SortedMapIterator) Range(org.apache.accumulo.core.data.Range) BaseIteratorEnvironment(org.apache.accumulo.core.client.impl.BaseIteratorEnvironment) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) IteratorScope(org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 40 with DataFileValue

use of org.apache.accumulo.core.metadata.schema.DataFileValue in project accumulo by apache.

the class CloneIT method testMerge.

// test two tablets splitting into four
@Test
public void testMerge() throws Exception {
    Connector conn = getConnector();
    String tableName = getUniqueNames(1)[0];
    conn.tableOperations().create(tableName);
    BatchWriter bw1 = conn.createBatchWriter(tableName, new BatchWriterConfig());
    bw1.addMutation(createTablet("0", "m", null, "/d1", "/d1/file1"));
    bw1.addMutation(createTablet("0", null, "m", "/d2", "/d2/file2"));
    bw1.flush();
    BatchWriter bw2 = conn.createBatchWriter(tableName, new BatchWriterConfig());
    MetadataTableUtil.initializeClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
    bw1.addMutation(deleteTablet("0", "m", null, "/d1", "/d1/file1"));
    Mutation mut = createTablet("0", null, null, "/d2", "/d2/file2");
    mut.put(DataFileColumnFamily.NAME.toString(), "/d1/file1", new DataFileValue(10, 200).encodeAsString());
    bw1.addMutation(mut);
    bw1.flush();
    try {
        MetadataTableUtil.checkClone(tableName, Table.ID.of("0"), Table.ID.of("1"), conn, bw2);
        assertTrue(false);
    } catch (TabletIterator.TabletDeletedException tde) {
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) TabletIterator(org.apache.accumulo.server.util.TabletIterator) Test(org.junit.Test)

Aggregations

DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)44 FileRef (org.apache.accumulo.server.fs.FileRef)32 Value (org.apache.accumulo.core.data.Value)18 Text (org.apache.hadoop.io.Text)14 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)13 Mutation (org.apache.accumulo.core.data.Mutation)12 Test (org.junit.Test)12 ArrayList (java.util.ArrayList)9 Key (org.apache.accumulo.core.data.Key)9 TreeMap (java.util.TreeMap)8 HashMap (java.util.HashMap)7 HashSet (java.util.HashSet)6 Scanner (org.apache.accumulo.core.client.Scanner)6 IOException (java.io.IOException)5 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)5 ScannerImpl (org.apache.accumulo.core.client.impl.ScannerImpl)4 Pair (org.apache.accumulo.core.util.Pair)4 TServerInstance (org.apache.accumulo.server.master.state.TServerInstance)4 Entry (java.util.Map.Entry)3 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)3