Search in sources :

Example 56 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class GroupBalancer method populateMigrations.

private void populateMigrations(Set<TServerInstance> current, List<TabletMigration> migrationsOut, Moves moves) {
    if (moves.size() == 0) {
        return;
    }
    Function<KeyExtent, String> partitioner = getPartitioner();
    for (Pair<KeyExtent, Location> entry : getLocationProvider()) {
        String group = partitioner.apply(entry.getFirst());
        Location loc = entry.getSecond();
        if (loc.equals(Location.NONE) || !current.contains(loc.getTserverInstance())) {
            migrationsOut.clear();
            return;
        }
        TServerInstance dest = moves.removeMove(loc.getTserverInstance(), group);
        if (dest != null) {
            migrationsOut.add(new TabletMigration(entry.getFirst(), loc.getTserverInstance(), dest));
            if (moves.size() == 0) {
                break;
            }
        }
    }
}
Also used : TabletMigration(org.apache.accumulo.server.master.state.TabletMigration) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance)

Example 57 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class RegexGroupBalancer method getPartitioner.

@Override
protected Function<KeyExtent, String> getPartitioner() {
    Map<String, String> customProps = context.getServerConfigurationFactory().getTableConfiguration(tableId).getAllPropertiesWithPrefix(Property.TABLE_ARBITRARY_PROP_PREFIX);
    String regex = customProps.get(REGEX_PROPERTY);
    final String defaultGroup = customProps.get(DEFAUT_GROUP_PROPERTY);
    final Pattern pattern = Pattern.compile(regex);
    return new Function<KeyExtent, String>() {

        @Override
        public String apply(KeyExtent input) {
            Text er = input.getEndRow();
            if (er == null) {
                return defaultGroup;
            }
            Matcher matcher = pattern.matcher(er.toString());
            if (matcher.matches() && matcher.groupCount() == 1) {
                return matcher.group(1);
            }
            return defaultGroup;
        }
    };
}
Also used : Pattern(java.util.regex.Pattern) Function(java.util.function.Function) Matcher(java.util.regex.Matcher) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent)

Example 58 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class TabletStateChangeIterator method setMerges.

public static void setMerges(IteratorSetting cfg, Collection<MergeInfo> merges) {
    DataOutputBuffer buffer = new DataOutputBuffer();
    try {
        for (MergeInfo info : merges) {
            KeyExtent extent = info.getExtent();
            if (extent != null && !info.getState().equals(MergeState.NONE)) {
                info.write(buffer);
            }
        }
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
    String encoded = Base64.getEncoder().encodeToString(Arrays.copyOf(buffer.getData(), buffer.getLength()));
    cfg.addOption(MERGES_OPTION, encoded);
}
Also used : DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) IOException(java.io.IOException) BadLocationStateException(org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException)

Example 59 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class MetadataConstraints method check.

@Override
public List<Short> check(Environment env, Mutation mutation) {
    ArrayList<Short> violations = null;
    Collection<ColumnUpdate> colUpdates = mutation.getUpdates();
    // check the row, it should contains at least one ; or end with <
    boolean containsSemiC = false;
    byte[] row = mutation.getRow();
    // always allow rows that fall within reserved areas
    if (row.length > 0 && row[0] == '~')
        return null;
    if (row.length > 2 && row[0] == '!' && row[1] == '!' && row[2] == '~')
        return null;
    for (byte b : row) {
        if (b == ';') {
            containsSemiC = true;
        }
        if (b == ';' || b == '<')
            break;
        if (!validTableNameChars[0xff & b]) {
            violations = addIfNotPresent(violations, 4);
        }
    }
    if (!containsSemiC) {
        // see if last row char is <
        if (row.length == 0 || row[row.length - 1] != '<') {
            violations = addIfNotPresent(violations, 4);
        }
    } else {
        if (row.length == 0) {
            violations = addIfNotPresent(violations, 4);
        }
    }
    if (row.length > 0 && row[0] == '!') {
        if (row.length < 3 || row[1] != '0' || (row[2] != '<' && row[2] != ';')) {
            violations = addIfNotPresent(violations, 4);
        }
    }
    // ensure row is not less than Constants.METADATA_TABLE_ID
    if (new Text(row).compareTo(new Text(MetadataTable.ID.getUtf8())) < 0) {
        violations = addViolation(violations, 5);
    }
    boolean checkedBulk = false;
    for (ColumnUpdate columnUpdate : colUpdates) {
        Text columnFamily = new Text(columnUpdate.getColumnFamily());
        if (columnUpdate.isDeleted()) {
            if (!isValidColumn(columnUpdate)) {
                violations = addViolation(violations, 2);
            }
            continue;
        }
        if (columnUpdate.getValue().length == 0 && !columnFamily.equals(ScanFileColumnFamily.NAME)) {
            violations = addViolation(violations, 6);
        }
        if (columnFamily.equals(DataFileColumnFamily.NAME)) {
            try {
                DataFileValue dfv = new DataFileValue(columnUpdate.getValue());
                if (dfv.getSize() < 0 || dfv.getNumEntries() < 0) {
                    violations = addViolation(violations, 1);
                }
            } catch (NumberFormatException | ArrayIndexOutOfBoundsException nfe) {
                violations = addViolation(violations, 1);
            }
        } else if (columnFamily.equals(ScanFileColumnFamily.NAME)) {
        } else if (columnFamily.equals(TabletsSection.BulkFileColumnFamily.NAME)) {
            if (!columnUpdate.isDeleted() && !checkedBulk) {
                // splits, which also write the time reference, are allowed to write this reference even when
                // the transaction is not running because the other half of the tablet is holding a reference
                // to the file.
                boolean isSplitMutation = false;
                // When a tablet is assigned, it re-writes the metadata. It should probably only update the location information,
                // but it writes everything. We allow it to re-write the bulk information if it is setting the location.
                // See ACCUMULO-1230.
                boolean isLocationMutation = false;
                HashSet<Text> dataFiles = new HashSet<>();
                HashSet<Text> loadedFiles = new HashSet<>();
                String tidString = new String(columnUpdate.getValue(), UTF_8);
                int otherTidCount = 0;
                for (ColumnUpdate update : mutation.getUpdates()) {
                    if (new ColumnFQ(update).equals(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN)) {
                        isSplitMutation = true;
                    } else if (new Text(update.getColumnFamily()).equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
                        isLocationMutation = true;
                    } else if (new Text(update.getColumnFamily()).equals(DataFileColumnFamily.NAME)) {
                        dataFiles.add(new Text(update.getColumnQualifier()));
                    } else if (new Text(update.getColumnFamily()).equals(TabletsSection.BulkFileColumnFamily.NAME)) {
                        loadedFiles.add(new Text(update.getColumnQualifier()));
                        if (!new String(update.getValue(), UTF_8).equals(tidString)) {
                            otherTidCount++;
                        }
                    }
                }
                if (!isSplitMutation && !isLocationMutation) {
                    long tid = Long.parseLong(tidString);
                    try {
                        if (otherTidCount > 0 || !dataFiles.equals(loadedFiles) || !getArbitrator().transactionAlive(Constants.BULK_ARBITRATOR_TYPE, tid)) {
                            violations = addViolation(violations, 8);
                        }
                    } catch (Exception ex) {
                        violations = addViolation(violations, 8);
                    }
                }
                checkedBulk = true;
            }
        } else {
            if (!isValidColumn(columnUpdate)) {
                violations = addViolation(violations, 2);
            } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0 && (violations == null || !violations.contains((short) 4))) {
                KeyExtent ke = new KeyExtent(new Text(mutation.getRow()), (Text) null);
                Text per = KeyExtent.decodePrevEndRow(new Value(columnUpdate.getValue()));
                boolean prevEndRowLessThanEndRow = per == null || ke.getEndRow() == null || per.compareTo(ke.getEndRow()) < 0;
                if (!prevEndRowLessThanEndRow) {
                    violations = addViolation(violations, 3);
                }
            } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.ServerColumnFamily.LOCK_COLUMN)) {
                if (zooCache == null) {
                    zooCache = new ZooCache();
                }
                if (zooRoot == null) {
                    zooRoot = ZooUtil.getRoot(HdfsZooInstance.getInstance());
                }
                boolean lockHeld = false;
                String lockId = new String(columnUpdate.getValue(), UTF_8);
                try {
                    lockHeld = ZooLock.isLockHeld(zooCache, new ZooUtil.LockID(zooRoot, lockId));
                } catch (Exception e) {
                    log.debug("Failed to verify lock was held {} {}", lockId, e.getMessage());
                }
                if (!lockHeld) {
                    violations = addViolation(violations, 7);
                }
            }
        }
    }
    if (violations != null) {
        log.debug("violating metadata mutation : {}", new String(mutation.getRow(), UTF_8));
        for (ColumnUpdate update : mutation.getUpdates()) {
            log.debug(" update: {}:{} value {}", new String(update.getColumnFamily(), UTF_8), new String(update.getColumnQualifier(), UTF_8), (update.isDeleted() ? "[delete]" : new String(update.getValue(), UTF_8)));
        }
    }
    return violations;
}
Also used : ColumnUpdate(org.apache.accumulo.core.data.ColumnUpdate) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ZooCache(org.apache.accumulo.server.zookeeper.ZooCache) Constraint(org.apache.accumulo.core.constraints.Constraint) ColumnFQ(org.apache.accumulo.core.util.ColumnFQ) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) HashSet(java.util.HashSet)

Example 60 with KeyExtent

use of org.apache.accumulo.core.data.impl.KeyExtent in project accumulo by apache.

the class MergeInfo method readFields.

@Override
public void readFields(DataInput in) throws IOException {
    extent = new KeyExtent();
    extent.readFields(in);
    state = MergeState.values()[in.readInt()];
    operation = Operation.values()[in.readInt()];
}
Also used : KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent)

Aggregations

KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)219 Test (org.junit.Test)84 Text (org.apache.hadoop.io.Text)82 Value (org.apache.accumulo.core.data.Value)67 ArrayList (java.util.ArrayList)63 Key (org.apache.accumulo.core.data.Key)59 HashMap (java.util.HashMap)50 Mutation (org.apache.accumulo.core.data.Mutation)40 Scanner (org.apache.accumulo.core.client.Scanner)39 Range (org.apache.accumulo.core.data.Range)39 TreeMap (java.util.TreeMap)37 TServerInstance (org.apache.accumulo.server.master.state.TServerInstance)36 Table (org.apache.accumulo.core.client.impl.Table)34 HashSet (java.util.HashSet)30 List (java.util.List)29 TKeyExtent (org.apache.accumulo.core.data.thrift.TKeyExtent)29 Connector (org.apache.accumulo.core.client.Connector)28 IOException (java.io.IOException)27 MetadataTable (org.apache.accumulo.core.metadata.MetadataTable)25 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)25