Search in sources :

Example 1 with RowIterator

use of org.apache.accumulo.core.client.RowIterator in project accumulo by apache.

the class MasterClientServiceHandler method waitForFlush.

@Override
public void waitForFlush(TInfo tinfo, TCredentials c, String tableIdStr, ByteBuffer startRow, ByteBuffer endRow, long flushID, long maxLoops) throws ThriftSecurityException, ThriftTableOperationException {
    Table.ID tableId = Table.ID.of(tableIdStr);
    Namespace.ID namespaceId = getNamespaceIdFromTableId(TableOperation.FLUSH, tableId);
    master.security.canFlush(c, tableId, namespaceId);
    if (endRow != null && startRow != null && ByteBufferUtil.toText(startRow).compareTo(ByteBufferUtil.toText(endRow)) >= 0)
        throw new ThriftTableOperationException(tableId.canonicalID(), null, TableOperation.FLUSH, TableOperationExceptionType.BAD_RANGE, "start row must be less than end row");
    Set<TServerInstance> serversToFlush = new HashSet<>(master.tserverSet.getCurrentServers());
    for (long l = 0; l < maxLoops; l++) {
        for (TServerInstance instance : serversToFlush) {
            try {
                final TServerConnection server = master.tserverSet.getConnection(instance);
                if (server != null)
                    server.flush(master.masterLock, tableId, ByteBufferUtil.toBytes(startRow), ByteBufferUtil.toBytes(endRow));
            } catch (TException ex) {
                Master.log.error(ex.toString());
            }
        }
        if (l == maxLoops - 1)
            break;
        sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
        serversToFlush.clear();
        try {
            Connector conn = master.getConnector();
            Scanner scanner;
            if (tableId.equals(MetadataTable.ID)) {
                scanner = new IsolatedScanner(conn.createScanner(RootTable.NAME, Authorizations.EMPTY));
                scanner.setRange(MetadataSchema.TabletsSection.getRange());
            } else {
                scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
                Range range = new KeyExtent(tableId, null, ByteBufferUtil.toText(startRow)).toMetadataRange();
                scanner.setRange(range.clip(MetadataSchema.TabletsSection.getRange()));
            }
            TabletsSection.ServerColumnFamily.FLUSH_COLUMN.fetch(scanner);
            TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
            scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
            scanner.fetchColumnFamily(LogColumnFamily.NAME);
            RowIterator ri = new RowIterator(scanner);
            int tabletsToWaitFor = 0;
            int tabletCount = 0;
            Text ert = ByteBufferUtil.toText(endRow);
            while (ri.hasNext()) {
                Iterator<Entry<Key, Value>> row = ri.next();
                long tabletFlushID = -1;
                int logs = 0;
                boolean online = false;
                TServerInstance server = null;
                Entry<Key, Value> entry = null;
                while (row.hasNext()) {
                    entry = row.next();
                    Key key = entry.getKey();
                    if (TabletsSection.ServerColumnFamily.FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier())) {
                        tabletFlushID = Long.parseLong(entry.getValue().toString());
                    }
                    if (LogColumnFamily.NAME.equals(key.getColumnFamily()))
                        logs++;
                    if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily())) {
                        online = true;
                        server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
                    }
                }
                // when tablet is not online and has no logs, there is no reason to wait for it
                if ((online || logs > 0) && tabletFlushID < flushID) {
                    tabletsToWaitFor++;
                    if (server != null)
                        serversToFlush.add(server);
                }
                tabletCount++;
                Text tabletEndRow = new KeyExtent(entry.getKey().getRow(), (Text) null).getEndRow();
                if (tabletEndRow == null || (ert != null && tabletEndRow.compareTo(ert) >= 0))
                    break;
            }
            if (tabletsToWaitFor == 0)
                break;
            if (tabletCount == 0 && !Tables.exists(master.getInstance(), tableId))
                throw new ThriftTableOperationException(tableId.canonicalID(), null, TableOperation.FLUSH, TableOperationExceptionType.NOTFOUND, null);
        } catch (AccumuloException | TabletDeletedException e) {
            Master.log.debug("Failed to scan {} table to wait for flush {}", MetadataTable.NAME, tableId, e);
        } catch (AccumuloSecurityException e) {
            Master.log.warn("{}", e.getMessage(), e);
            throw new ThriftSecurityException();
        } catch (TableNotFoundException e) {
            Master.log.error("{}", e.getMessage(), e);
            throw new ThriftTableOperationException();
        }
    }
}
Also used : TException(org.apache.thrift.TException) Connector(org.apache.accumulo.core.client.Connector) BatchScanner(org.apache.accumulo.core.client.BatchScanner) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) TKeyExtent(org.apache.accumulo.core.data.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) Entry(java.util.Map.Entry) ThriftTableOperationException(org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) HashSet(java.util.HashSet) AccumuloException(org.apache.accumulo.core.client.AccumuloException) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) TabletDeletedException(org.apache.accumulo.server.util.TabletIterator.TabletDeletedException) ThriftSecurityException(org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException) Namespace(org.apache.accumulo.core.client.impl.Namespace) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) TServerConnection(org.apache.accumulo.server.master.LiveTServerSet.TServerConnection) RowIterator(org.apache.accumulo.core.client.RowIterator) Value(org.apache.accumulo.core.data.Value) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Key(org.apache.accumulo.core.data.Key)

Example 2 with RowIterator

use of org.apache.accumulo.core.client.RowIterator in project accumulo by apache.

the class TableOperationsImpl method waitForTableStateTransition.

private void waitForTableStateTransition(Table.ID tableId, TableState expectedState) throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
    Text startRow = null;
    Text lastRow = null;
    while (true) {
        if (Tables.getTableState(context.getInstance(), tableId) != expectedState) {
            Tables.clearCache(context.getInstance());
            TableState currentState = Tables.getTableState(context.getInstance(), tableId);
            if (currentState != expectedState) {
                if (!Tables.exists(context.getInstance(), tableId))
                    throw new TableDeletedException(tableId.canonicalID());
                if (currentState == TableState.DELETING)
                    throw new TableNotFoundException(tableId.canonicalID(), "", "Table is being deleted.");
                throw new AccumuloException("Unexpected table state " + tableId + " " + Tables.getTableState(context.getInstance(), tableId) + " != " + expectedState);
            }
        }
        Range range;
        if (startRow == null || lastRow == null)
            range = new KeyExtent(tableId, null, null).toMetadataRange();
        else
            range = new Range(startRow, lastRow);
        String metaTable = MetadataTable.NAME;
        if (tableId.equals(MetadataTable.ID))
            metaTable = RootTable.NAME;
        Scanner scanner = createMetadataScanner(metaTable, range);
        RowIterator rowIter = new RowIterator(scanner);
        KeyExtent lastExtent = null;
        int total = 0;
        int waitFor = 0;
        int holes = 0;
        Text continueRow = null;
        MapCounter<String> serverCounts = new MapCounter<>();
        while (rowIter.hasNext()) {
            Iterator<Entry<Key, Value>> row = rowIter.next();
            total++;
            KeyExtent extent = null;
            String future = null;
            String current = null;
            while (row.hasNext()) {
                Entry<Key, Value> entry = row.next();
                Key key = entry.getKey();
                if (key.getColumnFamily().equals(TabletsSection.FutureLocationColumnFamily.NAME))
                    future = entry.getValue().toString();
                if (key.getColumnFamily().equals(TabletsSection.CurrentLocationColumnFamily.NAME))
                    current = entry.getValue().toString();
                if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key))
                    extent = new KeyExtent(key.getRow(), entry.getValue());
            }
            if ((expectedState == TableState.ONLINE && current == null) || (expectedState == TableState.OFFLINE && (future != null || current != null))) {
                if (continueRow == null)
                    continueRow = extent.getMetadataEntry();
                waitFor++;
                lastRow = extent.getMetadataEntry();
                if (current != null)
                    serverCounts.increment(current, 1);
                if (future != null)
                    serverCounts.increment(future, 1);
            }
            if (!extent.getTableId().equals(tableId)) {
                throw new AccumuloException("Saw unexpected table Id " + tableId + " " + extent);
            }
            if (lastExtent != null && !extent.isPreviousExtent(lastExtent)) {
                holes++;
            }
            lastExtent = extent;
        }
        if (continueRow != null) {
            startRow = continueRow;
        }
        if (holes > 0 || total == 0) {
            startRow = null;
            lastRow = null;
        }
        if (waitFor > 0 || holes > 0 || total == 0) {
            long waitTime;
            long maxPerServer = 0;
            if (serverCounts.size() > 0) {
                maxPerServer = Collections.max(serverCounts.values());
                waitTime = maxPerServer * 10;
            } else
                waitTime = waitFor * 10;
            waitTime = Math.max(100, waitTime);
            waitTime = Math.min(5000, waitTime);
            log.trace("Waiting for {}({}) tablets, startRow = {} lastRow = {}, holes={} sleeping:{}ms", waitFor, maxPerServer, startRow, lastRow, holes, waitTime);
            sleepUninterruptibly(waitTime, TimeUnit.MILLISECONDS);
        } else {
            break;
        }
    }
}
Also used : AccumuloException(org.apache.accumulo.core.client.AccumuloException) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) TRowRange(org.apache.accumulo.core.data.thrift.TRowRange) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TableDeletedException(org.apache.accumulo.core.client.TableDeletedException) Constraint(org.apache.accumulo.core.constraints.Constraint) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ZipEntry(java.util.zip.ZipEntry) Entry(java.util.Map.Entry) RowIterator(org.apache.accumulo.core.client.RowIterator) MapCounter(org.apache.accumulo.core.util.MapCounter) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) TableState(org.apache.accumulo.core.master.state.tables.TableState)

Example 3 with RowIterator

use of org.apache.accumulo.core.client.RowIterator in project accumulo by apache.

the class TabletGroupWatcher method deleteTablets.

private void deleteTablets(MergeInfo info, Range scanRange, BatchWriter bw, Connector conn) throws TableNotFoundException, MutationsRejectedException {
    Scanner scanner;
    Mutation m;
    // Delete everything in the other tablets
    // group all deletes into tablet into one mutation, this makes tablets
    // either disappear entirely or not all.. this is important for the case
    // where the process terminates in the loop below...
    scanner = conn.createScanner(info.getExtent().isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY);
    Master.log.debug("Deleting range {}", scanRange);
    scanner.setRange(scanRange);
    RowIterator rowIter = new RowIterator(scanner);
    while (rowIter.hasNext()) {
        Iterator<Entry<Key, Value>> row = rowIter.next();
        m = null;
        while (row.hasNext()) {
            Entry<Key, Value> entry = row.next();
            Key key = entry.getKey();
            if (m == null)
                m = new Mutation(key.getRow());
            m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
            Master.log.debug("deleting entry {}", key);
        }
        bw.addMutation(m);
    }
    bw.flush();
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) Entry(java.util.Map.Entry) RowIterator(org.apache.accumulo.core.client.RowIterator) Value(org.apache.accumulo.core.data.Value) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 4 with RowIterator

use of org.apache.accumulo.core.client.RowIterator in project accumulo by apache.

the class InputConfigurator method binOffline.

public static Map<String, Map<KeyExtent, List<Range>>> binOffline(Table.ID tableId, List<Range> ranges, Instance instance, Connector conn) throws AccumuloException, TableNotFoundException {
    Map<String, Map<KeyExtent, List<Range>>> binnedRanges = new HashMap<>();
    if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
        Tables.clearCache(instance);
        if (Tables.getTableState(instance, tableId) != TableState.OFFLINE) {
            throw new AccumuloException("Table is online tableId:" + tableId + " cannot scan table in offline mode ");
        }
    }
    for (Range range : ranges) {
        Text startRow;
        if (range.getStartKey() != null)
            startRow = range.getStartKey().getRow();
        else
            startRow = new Text();
        Range metadataRange = new Range(new KeyExtent(tableId, startRow, null).getMetadataEntry(), true, null, false);
        Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
        MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
        scanner.fetchColumnFamily(MetadataSchema.TabletsSection.LastLocationColumnFamily.NAME);
        scanner.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
        scanner.fetchColumnFamily(MetadataSchema.TabletsSection.FutureLocationColumnFamily.NAME);
        scanner.setRange(metadataRange);
        RowIterator rowIter = new RowIterator(scanner);
        KeyExtent lastExtent = null;
        while (rowIter.hasNext()) {
            Iterator<Map.Entry<Key, Value>> row = rowIter.next();
            String last = "";
            KeyExtent extent = null;
            String location = null;
            while (row.hasNext()) {
                Map.Entry<Key, Value> entry = row.next();
                Key key = entry.getKey();
                if (key.getColumnFamily().equals(MetadataSchema.TabletsSection.LastLocationColumnFamily.NAME)) {
                    last = entry.getValue().toString();
                }
                if (key.getColumnFamily().equals(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME) || key.getColumnFamily().equals(MetadataSchema.TabletsSection.FutureLocationColumnFamily.NAME)) {
                    location = entry.getValue().toString();
                }
                if (MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
                    extent = new KeyExtent(key.getRow(), entry.getValue());
                }
            }
            if (location != null)
                return null;
            if (!extent.getTableId().equals(tableId)) {
                throw new AccumuloException("Saw unexpected table Id " + tableId + " " + extent);
            }
            if (lastExtent != null && !extent.isPreviousExtent(lastExtent)) {
                throw new AccumuloException(" " + lastExtent + " is not previous extent " + extent);
            }
            Map<KeyExtent, List<Range>> tabletRanges = binnedRanges.get(last);
            if (tabletRanges == null) {
                tabletRanges = new HashMap<>();
                binnedRanges.put(last, tabletRanges);
            }
            List<Range> rangeList = tabletRanges.get(extent);
            if (rangeList == null) {
                rangeList = new ArrayList<>();
                tabletRanges.put(extent, rangeList);
            }
            rangeList.add(range);
            if (extent.getEndRow() == null || range.afterEndKey(new Key(extent.getEndRow()).followingKey(PartialKey.ROW))) {
                break;
            }
            lastExtent = extent;
        }
    }
    return binnedRanges;
}
Also used : AccumuloException(org.apache.accumulo.core.client.AccumuloException) BatchScanner(org.apache.accumulo.core.client.BatchScanner) ClientSideIteratorScanner(org.apache.accumulo.core.client.ClientSideIteratorScanner) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) RowIterator(org.apache.accumulo.core.client.RowIterator) Value(org.apache.accumulo.core.data.Value) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 5 with RowIterator

use of org.apache.accumulo.core.client.RowIterator in project accumulo by apache.

the class AccumuloRowInputFormat method createRecordReader.

@Override
public RecordReader<Text, PeekingIterator<Entry<Key, Value>>> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
    log.setLevel(getLogLevel(context));
    return new RecordReaderBase<Text, PeekingIterator<Entry<Key, Value>>>() {

        RowIterator rowIterator;

        @Override
        public void initialize(InputSplit inSplit, TaskAttemptContext attempt) throws IOException {
            super.initialize(inSplit, attempt);
            rowIterator = new RowIterator(scannerIterator);
            currentK = new Text();
            currentV = null;
        }

        @Override
        public boolean nextKeyValue() throws IOException, InterruptedException {
            if (!rowIterator.hasNext())
                return false;
            currentV = new PeekingIterator<>(rowIterator.next());
            numKeysRead = rowIterator.getKVCount();
            currentKey = currentV.peek().getKey();
            currentK = new Text(currentKey.getRow());
            return true;
        }
    };
}
Also used : Entry(java.util.Map.Entry) RowIterator(org.apache.accumulo.core.client.RowIterator) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) Text(org.apache.hadoop.io.Text) InputSplit(org.apache.hadoop.mapreduce.InputSplit)

Aggregations

RowIterator (org.apache.accumulo.core.client.RowIterator)10 Entry (java.util.Map.Entry)9 Scanner (org.apache.accumulo.core.client.Scanner)8 Key (org.apache.accumulo.core.data.Key)8 Value (org.apache.accumulo.core.data.Value)8 IsolatedScanner (org.apache.accumulo.core.client.IsolatedScanner)6 Text (org.apache.hadoop.io.Text)6 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)5 AccumuloException (org.apache.accumulo.core.client.AccumuloException)4 Range (org.apache.accumulo.core.data.Range)4 Connector (org.apache.accumulo.core.client.Connector)3 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)3 PartialKey (org.apache.accumulo.core.data.PartialKey)3 ArrayList (java.util.ArrayList)2 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)2 BatchScanner (org.apache.accumulo.core.client.BatchScanner)2 TableDeletedException (org.apache.accumulo.core.client.TableDeletedException)2 Mutation (org.apache.accumulo.core.data.Mutation)2 IOException (java.io.IOException)1 InvocationTargetException (java.lang.reflect.InvocationTargetException)1