Search in sources :

Example 46 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class PopulateMetadata method call.

@Override
public Repo<Manager> call(long tid, Manager env) throws Exception {
    KeyExtent extent = new KeyExtent(tableInfo.getTableId(), null, null);
    MetadataTableUtil.addTablet(extent, ServerColumnFamily.DEFAULT_TABLET_DIR_NAME, env.getContext(), tableInfo.getTimeType(), env.getManagerLock());
    if (tableInfo.getInitialSplitSize() > 0) {
        SortedSet<Text> splits = Utils.getSortedSetFromFile(env, tableInfo.getSplitPath(), true);
        SortedSet<Text> dirs = Utils.getSortedSetFromFile(env, tableInfo.getSplitDirsPath(), false);
        Map<Text, Text> splitDirMap = createSplitDirectoryMap(splits, dirs);
        try (BatchWriter bw = env.getContext().createBatchWriter(MetadataTable.NAME)) {
            writeSplitsToMetadataTable(env.getContext(), tableInfo.getTableId(), splits, splitDirMap, tableInfo.getTimeType(), env.getManagerLock(), bw);
        }
    }
    return new FinishCreateTable(tableInfo);
}
Also used : Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent)

Example 47 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class BulkFailureIT method runTest.

/**
 * This test verifies two things. First it ensures that after a bulk imported file is compacted
 * that import request are ignored. Second it ensures that after the bulk import transaction is
 * canceled that import request fail. The public API for bulk import can not be used for this
 * test. Internal (non public API) RPCs and Zookeeper state is manipulated directly. This is the
 * only way to interleave compactions with multiple, duplicate import RPC request.
 */
protected void runTest(String table, long fateTxid, Loader loader) throws IOException, AccumuloException, AccumuloSecurityException, TableExistsException, KeeperException, InterruptedException, Exception, FileNotFoundException, TableNotFoundException {
    try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
        SortedMap<Key, Value> testData = createTestData();
        FileSystem fs = getCluster().getFileSystem();
        String testFile = createTestFile(fateTxid, testData, fs);
        c.tableOperations().create(table);
        String tableId = c.tableOperations().tableIdMap().get(table);
        // Table has no splits, so this extent corresponds to the tables single tablet
        KeyExtent extent = new KeyExtent(TableId.of(tableId), null, null);
        ServerContext asCtx = getServerContext();
        ZooArbitrator.start(asCtx, Constants.BULK_ARBITRATOR_TYPE, fateTxid);
        VolumeManager vm = asCtx.getVolumeManager();
        // move the file into a directory for the table and rename the file to something unique
        String bulkDir = BulkImport.prepareBulkImport(asCtx, vm, testFile, TableId.of(tableId), fateTxid);
        // determine the files new name and path
        FileStatus status = fs.listStatus(new Path(bulkDir))[0];
        Path bulkLoadPath = fs.makeQualified(status.getPath());
        // Directly ask the tablet to load the file.
        loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
        assertEquals(Set.of(bulkLoadPath), getFiles(c, extent));
        assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
        // Compact the bulk imported file. Subsequent request to load the file should be ignored.
        c.tableOperations().compact(table, new CompactionConfig().setWait(true));
        Set<Path> tabletFiles = getFiles(c, extent);
        assertFalse(tabletFiles.contains(bulkLoadPath));
        assertEquals(1, tabletFiles.size());
        assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
        // this request should be ignored by the tablet
        loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
        assertEquals(tabletFiles, getFiles(c, extent));
        assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
        // this is done to ensure the tablet reads the load flags from the metadata table when it
        // loads
        c.tableOperations().offline(table, true);
        c.tableOperations().online(table, true);
        // this request should be ignored by the tablet
        loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
        assertEquals(tabletFiles, getFiles(c, extent));
        assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
        // After this, all load request should fail.
        ZooArbitrator.stop(asCtx, Constants.BULK_ARBITRATOR_TYPE, fateTxid);
        c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE);
        BatchDeleter bd = c.createBatchDeleter(MetadataTable.NAME, Authorizations.EMPTY, 1);
        bd.setRanges(Collections.singleton(extent.toMetaRange()));
        bd.fetchColumnFamily(BulkFileColumnFamily.NAME);
        bd.delete();
        loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), true);
        assertEquals(tabletFiles, getFiles(c, extent));
        assertEquals(Set.of(), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) BatchDeleter(org.apache.accumulo.core.client.BatchDeleter) FileStatus(org.apache.hadoop.fs.FileStatus) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) ServerContext(org.apache.accumulo.server.ServerContext) FileSystem(org.apache.hadoop.fs.FileSystem) Value(org.apache.accumulo.core.data.Value) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Key(org.apache.accumulo.core.data.Key)

Example 48 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class CollectTabletStats method findTablets.

private static List<KeyExtent> findTablets(ClientContext context, boolean selectLocalTablets, String tableName, SortedMap<KeyExtent, String> tabletLocations) throws Exception {
    TableId tableId = context.getTableId(tableName);
    MetadataServicer.forTableId(context, tableId).getTabletLocations(tabletLocations);
    InetAddress localaddress = InetAddress.getLocalHost();
    List<KeyExtent> candidates = new ArrayList<>();
    for (Entry<KeyExtent, String> entry : tabletLocations.entrySet()) {
        String loc = entry.getValue();
        if (loc != null) {
            boolean isLocal = HostAndPort.fromString(entry.getValue()).getHost().equals(localaddress.getHostName());
            if (selectLocalTablets && isLocal) {
                candidates.add(entry.getKey());
            } else if (!selectLocalTablets && !isLocal) {
                candidates.add(entry.getKey());
            }
        }
    }
    return candidates;
}
Also used : TableId(org.apache.accumulo.core.data.TableId) ArrayList(java.util.ArrayList) InetAddress(java.net.InetAddress) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent)

Example 49 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class InputConfigurator method binOffline.

public static Map<String, Map<KeyExtent, List<Range>>> binOffline(TableId tableId, List<Range> ranges, ClientContext context) throws AccumuloException, TableNotFoundException {
    Map<String, Map<KeyExtent, List<Range>>> binnedRanges = new HashMap<>();
    if (context.getTableState(tableId) != TableState.OFFLINE) {
        context.clearTableListCache();
        if (context.getTableState(tableId) != TableState.OFFLINE) {
            throw new AccumuloException("Table is online tableId:" + tableId + " cannot scan table in offline mode ");
        }
    }
    for (Range range : ranges) {
        Text startRow;
        if (range.getStartKey() != null)
            startRow = range.getStartKey().getRow();
        else
            startRow = new Text();
        Range metadataRange = new Range(new KeyExtent(tableId, startRow, null).toMetaRow(), true, null, false);
        Scanner scanner = context.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
        TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
        scanner.fetchColumnFamily(LastLocationColumnFamily.NAME);
        scanner.fetchColumnFamily(CurrentLocationColumnFamily.NAME);
        scanner.fetchColumnFamily(FutureLocationColumnFamily.NAME);
        scanner.setRange(metadataRange);
        RowIterator rowIter = new RowIterator(scanner);
        KeyExtent lastExtent = null;
        while (rowIter.hasNext()) {
            Iterator<Map.Entry<Key, Value>> row = rowIter.next();
            String last = "";
            KeyExtent extent = null;
            String location = null;
            while (row.hasNext()) {
                Map.Entry<Key, Value> entry = row.next();
                Key key = entry.getKey();
                if (key.getColumnFamily().equals(LastLocationColumnFamily.NAME)) {
                    last = entry.getValue().toString();
                }
                if (key.getColumnFamily().equals(CurrentLocationColumnFamily.NAME) || key.getColumnFamily().equals(FutureLocationColumnFamily.NAME)) {
                    location = entry.getValue().toString();
                }
                if (TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
                    extent = KeyExtent.fromMetaPrevRow(entry);
                }
            }
            if (location != null)
                return null;
            if (!extent.tableId().equals(tableId)) {
                throw new AccumuloException("Saw unexpected table Id " + tableId + " " + extent);
            }
            if (lastExtent != null && !extent.isPreviousExtent(lastExtent)) {
                throw new AccumuloException(" " + lastExtent + " is not previous extent " + extent);
            }
            binnedRanges.computeIfAbsent(last, k -> new HashMap<>()).computeIfAbsent(extent, k -> new ArrayList<>()).add(range);
            if (extent.endRow() == null || range.afterEndKey(new Key(extent.endRow()).followingKey(PartialKey.ROW))) {
                break;
            }
            lastExtent = extent;
        }
    }
    return binnedRanges;
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Text(org.apache.hadoop.io.Text) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Writable(org.apache.hadoop.io.Writable) TextUtil(org.apache.accumulo.core.util.TextUtil) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) BatchScanner(org.apache.accumulo.core.client.BatchScanner) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) NamespacePermission(org.apache.accumulo.core.security.NamespacePermission) ByteArrayInputStream(java.io.ByteArrayInputStream) DataOutputStream(java.io.DataOutputStream) CurrentLocationColumnFamily(org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.CurrentLocationColumnFamily) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) Value(org.apache.accumulo.core.data.Value) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) TableState(org.apache.accumulo.core.manager.state.tables.TableState) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) Collection(java.util.Collection) Set(java.util.Set) AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) InputTableConfig(org.apache.accumulo.hadoopImpl.mapreduce.InputTableConfig) Base64(java.util.Base64) List(java.util.List) Pair(org.apache.accumulo.core.util.Pair) ClientSideIteratorScanner(org.apache.accumulo.core.client.ClientSideIteratorScanner) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) DataInputStream(java.io.DataInputStream) SamplerConfigurationImpl(org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl) ByteArrayOutputStream(java.io.ByteArrayOutputStream) HashMap(java.util.HashMap) MapWritable(org.apache.hadoop.io.MapWritable) SortedKeyValueIterator(org.apache.accumulo.core.iterators.SortedKeyValueIterator) FutureLocationColumnFamily(org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.FutureLocationColumnFamily) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) LastLocationColumnFamily(org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LastLocationColumnFamily) TablePermission(org.apache.accumulo.core.security.TablePermission) TabletLocator(org.apache.accumulo.core.clientImpl.TabletLocator) StringUtils(org.apache.hadoop.util.StringUtils) StringTokenizer(java.util.StringTokenizer) Objects.requireNonNull(java.util.Objects.requireNonNull) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) Key(org.apache.accumulo.core.data.Key) Properties(java.util.Properties) Iterator(java.util.Iterator) UTF_8(java.nio.charset.StandardCharsets.UTF_8) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) IOException(java.io.IOException) Authorizations(org.apache.accumulo.core.security.Authorizations) Maps(com.google.common.collect.Maps) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Range(org.apache.accumulo.core.data.Range) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) TabletColumnFamily(org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily) PartialKey(org.apache.accumulo.core.data.PartialKey) Collections(java.util.Collections) RowIterator(org.apache.accumulo.core.client.RowIterator) ClientProperty(org.apache.accumulo.core.conf.ClientProperty) AccumuloException(org.apache.accumulo.core.client.AccumuloException) BatchScanner(org.apache.accumulo.core.client.BatchScanner) ClientSideIteratorScanner(org.apache.accumulo.core.client.ClientSideIteratorScanner) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) RowIterator(org.apache.accumulo.core.client.RowIterator) Value(org.apache.accumulo.core.data.Value) Map(java.util.Map) HashMap(java.util.HashMap) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 50 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class BulkImporter method processFailures.

private Set<Path> processFailures(Map<Path, List<KeyExtent>> completeFailures) {
    // we should check if map file was not assigned to any tablets, then we
    // should just move it; not currently being done?
    Set<Entry<Path, List<KeyExtent>>> es = completeFailures.entrySet();
    if (completeFailures.isEmpty())
        return Collections.emptySet();
    log.debug("The following map files failed ");
    for (Entry<Path, List<KeyExtent>> entry : es) {
        List<KeyExtent> extents = entry.getValue();
        for (KeyExtent keyExtent : extents) log.debug("\t{} -> {}", entry.getKey(), keyExtent);
    }
    return Collections.emptySet();
}
Also used : Path(org.apache.hadoop.fs.Path) Entry(java.util.Map.Entry) List(java.util.List) ArrayList(java.util.ArrayList) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent)

Aggregations

KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)239 Text (org.apache.hadoop.io.Text)98 ArrayList (java.util.ArrayList)72 HashMap (java.util.HashMap)60 Value (org.apache.accumulo.core.data.Value)57 Key (org.apache.accumulo.core.data.Key)56 TableId (org.apache.accumulo.core.data.TableId)53 Test (org.junit.Test)52 Mutation (org.apache.accumulo.core.data.Mutation)47 IOException (java.io.IOException)40 List (java.util.List)40 TKeyExtent (org.apache.accumulo.core.dataImpl.thrift.TKeyExtent)39 HashSet (java.util.HashSet)38 TreeMap (java.util.TreeMap)38 Range (org.apache.accumulo.core.data.Range)38 Map (java.util.Map)33 Scanner (org.apache.accumulo.core.client.Scanner)31 Entry (java.util.Map.Entry)30 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)30 Test (org.junit.jupiter.api.Test)30