Search in sources :

Example 86 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class TableZooHelper method _getTableIdDetectNamespaceNotFound.

/**
 * Lookup table ID in ZK. If not found, clears cache and tries again.
 */
public TableId _getTableIdDetectNamespaceNotFound(String tableName) throws NamespaceNotFoundException, TableNotFoundException {
    TableId tableId = getTableMap().getNameToIdMap().get(tableName);
    if (tableId == null) {
        // maybe the table exist, but the cache was not updated yet...
        // so try to clear the cache and check again
        clearTableListCache();
        tableId = getTableMap().getNameToIdMap().get(tableName);
        if (tableId == null) {
            String namespace = TableNameUtil.qualify(tableName).getFirst();
            if (Namespaces.getNameToIdMap(context).containsKey(namespace))
                throw new TableNotFoundException(null, tableName, null);
            else
                throw new NamespaceNotFoundException(null, namespace, null);
        }
    }
    return tableId;
}
Also used : TableId(org.apache.accumulo.core.data.TableId) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) NamespaceNotFoundException(org.apache.accumulo.core.client.NamespaceNotFoundException)

Example 87 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class Merge method getSizeIterator.

protected Iterator<Size> getSizeIterator(AccumuloClient client, String tablename, Text start, Text end) throws MergeException {
    // open up metadata, walk through the tablets.
    TableId tableId;
    TabletsMetadata tablets;
    try {
        ClientContext context = (ClientContext) client;
        tableId = context.getTableId(tablename);
        tablets = TabletsMetadata.builder(context).scanMetadataTable().overRange(new KeyExtent(tableId, end, start).toMetaRange()).fetch(FILES, PREV_ROW).build();
    } catch (Exception e) {
        throw new MergeException(e);
    }
    return tablets.stream().map(tm -> {
        long size = tm.getFilesMap().values().stream().mapToLong(DataFileValue::getSize).sum();
        return new Size(tm.getExtent(), size);
    }).iterator();
}
Also used : TableId(org.apache.accumulo.core.data.TableId) TableId(org.apache.accumulo.core.data.TableId) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Parameter(com.beust.jcommander.Parameter) TabletsMetadata(org.apache.accumulo.core.metadata.schema.TabletsMetadata) LoggerFactory(org.slf4j.LoggerFactory) Text(org.apache.hadoop.io.Text) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Accumulo(org.apache.accumulo.core.client.Accumulo) ArrayList(java.util.ArrayList) ConfigurationTypeHelper(org.apache.accumulo.core.conf.ConfigurationTypeHelper) IStringConverter(com.beust.jcommander.IStringConverter) PREV_ROW(org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.PREV_ROW) FILES(org.apache.accumulo.core.metadata.schema.TabletMetadata.ColumnType.FILES) Property(org.apache.accumulo.core.conf.Property) ClientOpts(org.apache.accumulo.core.cli.ClientOpts) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Span(io.opentelemetry.api.trace.Span) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) Scope(io.opentelemetry.context.Scope) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) List(java.util.List) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) TraceUtil(org.apache.accumulo.core.trace.TraceUtil) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) TabletsMetadata(org.apache.accumulo.core.metadata.schema.TabletsMetadata) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent)

Example 88 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class BulkSerializeTest method testRemap.

@Test
public void testRemap() throws Exception {
    TableId tableId = TableId.of("3");
    SortedMap<KeyExtent, Bulk.Files> mapping = generateMapping(tableId);
    SortedMap<KeyExtent, Bulk.Files> newNameMapping = new TreeMap<>();
    Map<String, String> nameMap = new HashMap<>();
    mapping.forEach((extent, files) -> {
        Files newFiles = new Files();
        files.forEach(fi -> {
            newFiles.add(new FileInfo("N" + fi.name, fi.estSize, fi.estEntries));
            nameMap.put(fi.name, "N" + fi.name);
        });
        newNameMapping.put(extent, newFiles);
    });
    ByteArrayOutputStream mappingBaos = new ByteArrayOutputStream();
    ByteArrayOutputStream nameBaos = new ByteArrayOutputStream();
    BulkSerialize.writeRenameMap(nameMap, "/some/dir", p -> nameBaos);
    BulkSerialize.writeLoadMapping(mapping, "/some/dir", p -> mappingBaos);
    Input input = p -> {
        if (p.getName().equals(Constants.BULK_LOAD_MAPPING)) {
            return new ByteArrayInputStream(mappingBaos.toByteArray());
        } else if (p.getName().equals(Constants.BULK_RENAME_FILE)) {
            return new ByteArrayInputStream(nameBaos.toByteArray());
        } else {
            throw new IllegalArgumentException("bad path " + p);
        }
    };
    try (LoadMappingIterator lmi = BulkSerialize.getUpdatedLoadMapping("/some/dir", tableId, input)) {
        SortedMap<KeyExtent, Bulk.Files> actual = new TreeMap<>();
        lmi.forEachRemaining(e -> actual.put(e.getKey(), e.getValue()));
        assertEquals(newNameMapping, actual);
    }
}
Also used : TableId(org.apache.accumulo.core.data.TableId) TableId(org.apache.accumulo.core.data.TableId) ByteArrayOutputStream(java.io.ByteArrayOutputStream) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) Text(org.apache.hadoop.io.Text) HashMap(java.util.HashMap) Constants(org.apache.accumulo.core.Constants) Test(org.junit.jupiter.api.Test) ByteArrayInputStream(java.io.ByteArrayInputStream) TreeMap(java.util.TreeMap) Map(java.util.Map) Input(org.apache.accumulo.core.clientImpl.bulk.BulkSerialize.Input) FileInfo(org.apache.accumulo.core.clientImpl.bulk.Bulk.FileInfo) Files(org.apache.accumulo.core.clientImpl.bulk.Bulk.Files) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) ByteArrayOutputStream(java.io.ByteArrayOutputStream) TreeMap(java.util.TreeMap) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) Input(org.apache.accumulo.core.clientImpl.bulk.BulkSerialize.Input) FileInfo(org.apache.accumulo.core.clientImpl.bulk.Bulk.FileInfo) ByteArrayInputStream(java.io.ByteArrayInputStream) Files(org.apache.accumulo.core.clientImpl.bulk.Bulk.Files) Test(org.junit.jupiter.api.Test)

Example 89 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class BulkSerializeTest method writeReadLoadMapping.

@Test
public void writeReadLoadMapping() throws Exception {
    TableId tableId = TableId.of("3");
    SortedMap<KeyExtent, Bulk.Files> mapping = generateMapping(tableId);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    BulkSerialize.writeLoadMapping(mapping, "/some/dir", p -> baos);
    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
    try (LoadMappingIterator lmi = BulkSerialize.readLoadMapping("/some/dir", tableId, p -> bais)) {
        SortedMap<KeyExtent, Bulk.Files> readMapping = new TreeMap<>();
        lmi.forEachRemaining(e -> readMapping.put(e.getKey(), e.getValue()));
        assertEquals(mapping, readMapping);
    }
}
Also used : TableId(org.apache.accumulo.core.data.TableId) ByteArrayInputStream(java.io.ByteArrayInputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Files(org.apache.accumulo.core.clientImpl.bulk.Bulk.Files) TreeMap(java.util.TreeMap) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) Test(org.junit.jupiter.api.Test)

Example 90 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class ReplicationSchemaTest method extractTableId.

@Test
public void extractTableId() {
    TableId tableId = TableId.of("1");
    Key k = new Key(new Text("foo"), StatusSection.NAME, new Text(tableId.canonical()));
    assertEquals(tableId, StatusSection.getTableId(k));
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Text(org.apache.hadoop.io.Text) Key(org.apache.accumulo.core.data.Key) Test(org.junit.jupiter.api.Test)

Aggregations

TableId (org.apache.accumulo.core.data.TableId)169 Text (org.apache.hadoop.io.Text)64 HashMap (java.util.HashMap)55 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)55 ArrayList (java.util.ArrayList)45 Test (org.junit.Test)43 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)41 Map (java.util.Map)37 Key (org.apache.accumulo.core.data.Key)36 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)34 HashSet (java.util.HashSet)31 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)31 Value (org.apache.accumulo.core.data.Value)31 IOException (java.io.IOException)28 Scanner (org.apache.accumulo.core.client.Scanner)28 AccumuloException (org.apache.accumulo.core.client.AccumuloException)27 Mutation (org.apache.accumulo.core.data.Mutation)27 List (java.util.List)26 Range (org.apache.accumulo.core.data.Range)24 BatchWriter (org.apache.accumulo.core.client.BatchWriter)23