Search in sources :

Example 36 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class UserCompactionStrategyIT method testDropNone.

private void testDropNone(Map<String, String> options) throws Exception {
    Connector c = getConnector();
    String tableName = getUniqueNames(1)[0];
    c.tableOperations().create(tableName);
    writeFlush(c, tableName, "a");
    writeFlush(c, tableName, "b");
    CompactionStrategyConfig csConfig = new CompactionStrategyConfig(TestCompactionStrategy.class.getName());
    csConfig.setOptions(options);
    c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
    Assert.assertEquals(ImmutableSet.of("a", "b"), getRows(c, tableName));
}
Also used : CompactionStrategyConfig(org.apache.accumulo.core.client.admin.CompactionStrategyConfig) Connector(org.apache.accumulo.core.client.Connector) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig)

Example 37 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class SummaryIT method compactionTest.

@Test
public void compactionTest() throws Exception {
    final String table = getUniqueNames(1)[0];
    Connector c = getConnector();
    NewTableConfiguration ntc = new NewTableConfiguration();
    SummarizerConfiguration sc1 = SummarizerConfiguration.builder(FooCounter.class.getName()).build();
    ntc.enableSummarization(sc1);
    c.tableOperations().create(table, ntc);
    try (BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig())) {
        write(bw, "bar1", "f1", "q1", "v1");
        write(bw, "bar2", "f1", "q1", "v2");
        write(bw, "foo1", "f1", "q1", "v3");
    }
    // Create a compaction config that will filter out foos if there are too many. Uses summary data to know if there are too many foos.
    CompactionStrategyConfig csc = new CompactionStrategyConfig(FooCS.class.getName());
    List<IteratorSetting> iterators = Collections.singletonList(new IteratorSetting(100, FooFilter.class));
    CompactionConfig compactConfig = new CompactionConfig().setFlush(true).setCompactionStrategy(csc).setIterators(iterators).setWait(true);
    // this compaction should make no changes because there are less foos than bars
    c.tableOperations().compact(table, compactConfig);
    try (Scanner scanner = c.createScanner(table, Authorizations.EMPTY)) {
        Stream<Entry<Key, Value>> stream = StreamSupport.stream(scanner.spliterator(), false);
        Map<String, Long> counts = // convert to row
        stream.map(e -> e.getKey().getRowData().toString()).map(// strip numbers off row
        r -> r.replaceAll("[0-9]+", "")).collect(// count different row types
        groupingBy(identity(), counting()));
        Assert.assertEquals(1l, (long) counts.getOrDefault("foo", 0l));
        Assert.assertEquals(2l, (long) counts.getOrDefault("bar", 0l));
        Assert.assertEquals(2, counts.size());
    }
    try (BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig())) {
        write(bw, "foo2", "f1", "q1", "v4");
        write(bw, "foo3", "f1", "q1", "v5");
        write(bw, "foo4", "f1", "q1", "v6");
    }
    // this compaction should remove all foos because there are more foos than bars
    c.tableOperations().compact(table, compactConfig);
    try (Scanner scanner = c.createScanner(table, Authorizations.EMPTY)) {
        Stream<Entry<Key, Value>> stream = StreamSupport.stream(scanner.spliterator(), false);
        Map<String, Long> counts = // convert to row
        stream.map(e -> e.getKey().getRowData().toString()).map(// strip numbers off row
        r -> r.replaceAll("[0-9]+", "")).collect(// count different row types
        groupingBy(identity(), counting()));
        Assert.assertEquals(0l, (long) counts.getOrDefault("foo", 0l));
        Assert.assertEquals(2l, (long) counts.getOrDefault("bar", 0l));
        Assert.assertEquals(1, counts.size());
    }
}
Also used : Arrays(java.util.Arrays) TableOfflineException(org.apache.accumulo.core.client.TableOfflineException) SortedSet(java.util.SortedSet) Collectors.counting(java.util.stream.Collectors.counting) DELETES_IGNORED_STAT(org.apache.accumulo.core.client.summary.CountingSummarizer.DELETES_IGNORED_STAT) TOO_MANY_STAT(org.apache.accumulo.core.client.summary.CountingSummarizer.TOO_MANY_STAT) AccumuloServerException(org.apache.accumulo.core.client.impl.AccumuloServerException) Text(org.apache.hadoop.io.Text) Random(java.util.Random) Mutation(org.apache.accumulo.core.data.Mutation) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) MIN_TIMESTAMP_STAT(org.apache.accumulo.test.functional.BasicSummarizer.MIN_TIMESTAMP_STAT) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Summarizer(org.apache.accumulo.core.client.summary.Summarizer) TOTAL_STAT(org.apache.accumulo.test.functional.BasicSummarizer.TOTAL_STAT) Map(java.util.Map) Value(org.apache.accumulo.core.data.Value) PatternSyntaxException(java.util.regex.PatternSyntaxException) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) TOO_LONG_STAT(org.apache.accumulo.core.client.summary.CountingSummarizer.TOO_LONG_STAT) Collection(java.util.Collection) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) Set(java.util.Set) SecurityErrorCode(org.apache.accumulo.core.client.security.SecurityErrorCode) CompactionStrategyConfig(org.apache.accumulo.core.client.admin.CompactionStrategyConfig) List(java.util.List) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) Filter(org.apache.accumulo.core.iterators.Filter) Stream(java.util.stream.Stream) FamilySummarizer(org.apache.accumulo.core.client.summary.summarizers.FamilySummarizer) DELETES_STAT(org.apache.accumulo.test.functional.BasicSummarizer.DELETES_STAT) Function.identity(java.util.function.Function.identity) Entry(java.util.Map.Entry) Scanner(org.apache.accumulo.core.client.Scanner) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) SEEN_STAT(org.apache.accumulo.core.client.summary.CountingSummarizer.SEEN_STAT) CompactionPlan(org.apache.accumulo.tserver.compaction.CompactionPlan) Iterables(com.google.common.collect.Iterables) VisibilitySummarizer(org.apache.accumulo.core.client.summary.summarizers.VisibilitySummarizer) SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) UtilWaitThread(org.apache.accumulo.fate.util.UtilWaitThread) Collectors.groupingBy(java.util.stream.Collectors.groupingBy) FileStatistics(org.apache.accumulo.core.client.summary.Summary.FileStatistics) HashMap(java.util.HashMap) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) TreeSet(java.util.TreeSet) Connector(org.apache.accumulo.core.client.Connector) Builder(com.google.common.collect.ImmutableMap.Builder) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) AccumuloClusterHarness(org.apache.accumulo.harness.AccumuloClusterHarness) TablePermission(org.apache.accumulo.core.security.TablePermission) Lists(com.google.common.collect.Lists) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) Key(org.apache.accumulo.core.data.Key) EMITTED_STAT(org.apache.accumulo.core.client.summary.CountingSummarizer.EMITTED_STAT) MAX_TIMESTAMP_STAT(org.apache.accumulo.test.functional.BasicSummarizer.MAX_TIMESTAMP_STAT) StreamSupport(java.util.stream.StreamSupport) CounterSummary(org.apache.accumulo.core.client.summary.CounterSummary) LongSummaryStatistics(java.util.LongSummaryStatistics) Summary(org.apache.accumulo.core.client.summary.Summary) MajorCompactionRequest(org.apache.accumulo.tserver.compaction.MajorCompactionRequest) IOException(java.io.IOException) Test(org.junit.Test) Authorizations(org.apache.accumulo.core.security.Authorizations) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Range(org.apache.accumulo.core.data.Range) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) BatchWriter(org.apache.accumulo.core.client.BatchWriter) CompactionStrategy(org.apache.accumulo.tserver.compaction.CompactionStrategy) Assert(org.junit.Assert) Collections(java.util.Collections) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) CompactionStrategyConfig(org.apache.accumulo.core.client.admin.CompactionStrategyConfig) Entry(java.util.Map.Entry) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) SummarizerConfiguration(org.apache.accumulo.core.client.summary.SummarizerConfiguration) Test(org.junit.Test)

Example 38 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project Gaffer by gchq.

the class GetRDDOfAllElementsHandlerIT method getGraphForDirectRDDForValidationChecking.

private Graph getGraphForDirectRDDForValidationChecking(final KeyPackage keyPackage, final String tableName) throws InterruptedException, AccumuloException, AccumuloSecurityException, IOException, OperationException, StoreException, TableNotFoundException {
    Schema schema = getSchemaForValidationChecking();
    final Graph graph = _getGraphForDirectRDD(keyPackage, tableName, schema, null);
    graph.execute(new AddElements.Builder().input(getElementsForValidationChecking()).validate(false).build(), USER);
    AccumuloStore accumuloStore = new AccumuloStore();
    accumuloStore.initialise(tableName, schema, PROPERTIES_B);
    accumuloStore.getConnection().tableOperations().compact(tableName, new CompactionConfig());
    Thread.sleep(1000L);
    return graph;
}
Also used : Graph(uk.gov.gchq.gaffer.graph.Graph) Schema(uk.gov.gchq.gaffer.store.schema.Schema) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) AccumuloStore(uk.gov.gchq.gaffer.accumulostore.AccumuloStore)

Example 39 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class UserCompactionUtils method decodeCompactionConfig.

public static CompactionConfig decodeCompactionConfig(DataInput din) {
    try {
        Preconditions.checkArgument(MAGIC == din.readInt());
        CompactionConfig cc = new CompactionConfig();
        if (din.readBoolean()) {
            Text startRow = new Text();
            startRow.readFields(din);
            cc.setStartRow(startRow);
        }
        if (din.readBoolean()) {
            Text endRow = new Text();
            endRow.readFields(din);
            cc.setEndRow(endRow);
        }
        int num = din.readInt();
        var iterators = new ArrayList<IteratorSetting>(num);
        for (int i = 0; i < num; i++) {
            iterators.add(new IteratorSetting(din));
        }
        cc.setIterators(iterators);
        CompactionStrategyConfigUtil.decode(cc, din);
        var configurer = decodeConfigurer(din);
        if (!isDefault(configurer)) {
            cc.setConfigurer(configurer);
        }
        var selector = decodeSelector(din);
        if (!isDefault(selector)) {
            cc.setSelector(selector);
        }
        var hints = decodeMap(din);
        cc.setExecutionHints(hints);
        return cc;
    } catch (IOException ioe) {
        throw new UncheckedIOException(ioe);
    }
}
Also used : IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) UncheckedIOException(java.io.UncheckedIOException) IOException(java.io.IOException) UncheckedIOException(java.io.UncheckedIOException)

Example 40 with CompactionConfig

use of org.apache.accumulo.core.client.admin.CompactionConfig in project accumulo by apache.

the class FateServiceHandler method executeFateOperation.

@Override
public void executeFateOperation(TInfo tinfo, TCredentials c, long opid, FateOperation op, List<ByteBuffer> arguments, Map<String, String> options, boolean autoCleanup) throws ThriftSecurityException, ThriftTableOperationException {
    authenticate(c);
    String goalMessage = op.toString() + " ";
    switch(op) {
        case NAMESPACE_CREATE:
            {
                TableOperation tableOp = TableOperation.CREATE;
                validateArgumentCount(arguments, tableOp, 1);
                String namespace = validateName(arguments.get(0), tableOp, NEW_NAMESPACE_NAME);
                if (!manager.security.canCreateNamespace(c))
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                goalMessage += "Create " + namespace + " namespace.";
                manager.fate.seedTransaction(opid, new TraceRepo<>(new CreateNamespace(c.getPrincipal(), namespace, options)), autoCleanup, goalMessage);
                break;
            }
        case NAMESPACE_RENAME:
            {
                TableOperation tableOp = TableOperation.RENAME;
                validateArgumentCount(arguments, tableOp, 2);
                String oldName = validateName(arguments.get(0), tableOp, EXISTING_NAMESPACE_NAME.and(NOT_BUILTIN_NAMESPACE));
                String newName = validateName(arguments.get(1), tableOp, NEW_NAMESPACE_NAME);
                NamespaceId namespaceId = ClientServiceHandler.checkNamespaceId(manager.getContext(), oldName, tableOp);
                if (!manager.security.canRenameNamespace(c, namespaceId))
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                goalMessage += "Rename " + oldName + " namespace to " + newName;
                manager.fate.seedTransaction(opid, new TraceRepo<>(new RenameNamespace(namespaceId, oldName, newName)), autoCleanup, goalMessage);
                break;
            }
        case NAMESPACE_DELETE:
            {
                TableOperation tableOp = TableOperation.DELETE;
                validateArgumentCount(arguments, tableOp, 1);
                String namespace = validateName(arguments.get(0), tableOp, EXISTING_NAMESPACE_NAME.and(NOT_BUILTIN_NAMESPACE));
                NamespaceId namespaceId = ClientServiceHandler.checkNamespaceId(manager.getContext(), namespace, tableOp);
                if (!manager.security.canDeleteNamespace(c, namespaceId))
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                goalMessage += "Delete namespace Id: " + namespaceId;
                manager.fate.seedTransaction(opid, new TraceRepo<>(new DeleteNamespace(namespaceId)), autoCleanup, goalMessage);
                break;
            }
        case TABLE_CREATE:
            {
                TableOperation tableOp = TableOperation.CREATE;
                // offset where split data begins in arguments list
                int SPLIT_OFFSET = 4;
                if (arguments.size() < SPLIT_OFFSET) {
                    throw new ThriftTableOperationException(null, null, tableOp, TableOperationExceptionType.OTHER, "Expected at least " + SPLIT_OFFSET + " arguments, saw :" + arguments.size());
                }
                String tableName = validateName(arguments.get(0), tableOp, NEW_TABLE_NAME.and(NOT_BUILTIN_TABLE));
                TimeType timeType = TimeType.valueOf(ByteBufferUtil.toString(arguments.get(1)));
                InitialTableState initialTableState = InitialTableState.valueOf(ByteBufferUtil.toString(arguments.get(2)));
                int splitCount = Integer.parseInt(ByteBufferUtil.toString(arguments.get(3)));
                validateArgumentCount(arguments, tableOp, SPLIT_OFFSET + splitCount);
                Path splitsPath = null;
                Path splitsDirsPath = null;
                if (splitCount > 0) {
                    try {
                        Path tmpDir = mkTempDir(opid);
                        splitsPath = new Path(tmpDir, "splits");
                        splitsDirsPath = new Path(tmpDir, "splitsDirs");
                        writeSplitsToFile(splitsPath, arguments, splitCount, SPLIT_OFFSET);
                    } catch (IOException e) {
                        throw new ThriftTableOperationException(null, tableName, tableOp, TableOperationExceptionType.OTHER, "Exception thrown while writing splits to file system");
                    }
                }
                NamespaceId namespaceId;
                try {
                    namespaceId = Namespaces.getNamespaceId(manager.getContext(), TableNameUtil.qualify(tableName).getFirst());
                } catch (NamespaceNotFoundException e) {
                    throw new ThriftTableOperationException(null, tableName, tableOp, TableOperationExceptionType.NAMESPACE_NOTFOUND, "");
                }
                if (!manager.security.canCreateTable(c, tableName, namespaceId))
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                for (Map.Entry<String, String> entry : options.entrySet()) {
                    if (!Property.isTablePropertyValid(entry.getKey(), entry.getValue())) {
                        throw new ThriftTableOperationException(null, tableName, tableOp, TableOperationExceptionType.OTHER, "Property or value not valid " + entry.getKey() + "=" + entry.getValue());
                    }
                }
                goalMessage += "Create table " + tableName + " " + initialTableState + " with " + splitCount + " splits.";
                manager.fate.seedTransaction(opid, new TraceRepo<>(new CreateTable(c.getPrincipal(), tableName, timeType, options, splitsPath, splitCount, splitsDirsPath, initialTableState, namespaceId)), autoCleanup, goalMessage);
                break;
            }
        case TABLE_RENAME:
            {
                TableOperation tableOp = TableOperation.RENAME;
                validateArgumentCount(arguments, tableOp, 2);
                String oldTableName = validateName(arguments.get(0), tableOp, EXISTING_TABLE_NAME.and(NOT_BUILTIN_TABLE));
                String newTableName = validateName(arguments.get(1), tableOp, NEW_TABLE_NAME.and(sameNamespaceAs(oldTableName)));
                TableId tableId = ClientServiceHandler.checkTableId(manager.getContext(), oldTableName, tableOp);
                NamespaceId namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
                final boolean canRename;
                try {
                    canRename = manager.security.canRenameTable(c, tableId, oldTableName, newTableName, namespaceId);
                } catch (ThriftSecurityException e) {
                    throwIfTableMissingSecurityException(e, tableId, oldTableName, TableOperation.RENAME);
                    throw e;
                }
                if (!canRename)
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                goalMessage += "Rename table " + oldTableName + "(" + tableId + ") to " + oldTableName;
                try {
                    manager.fate.seedTransaction(opid, new TraceRepo<>(new RenameTable(namespaceId, tableId, oldTableName, newTableName)), autoCleanup, goalMessage);
                } catch (NamespaceNotFoundException e) {
                    throw new ThriftTableOperationException(null, oldTableName, tableOp, TableOperationExceptionType.NAMESPACE_NOTFOUND, "");
                }
                break;
            }
        case TABLE_CLONE:
            {
                TableOperation tableOp = TableOperation.CLONE;
                validateArgumentCount(arguments, tableOp, 3);
                TableId srcTableId = validateTableIdArgument(arguments.get(0), tableOp, CAN_CLONE_TABLE);
                String tableName = validateName(arguments.get(1), tableOp, NEW_TABLE_NAME.and(NOT_BUILTIN_TABLE));
                boolean keepOffline = false;
                if (arguments.get(2) != null) {
                    keepOffline = Boolean.parseBoolean(ByteBufferUtil.toString(arguments.get(2)));
                }
                NamespaceId namespaceId;
                try {
                    namespaceId = Namespaces.getNamespaceId(manager.getContext(), TableNameUtil.qualify(tableName).getFirst());
                } catch (NamespaceNotFoundException e) {
                    // shouldn't happen, but possible once cloning between namespaces is supported
                    throw new ThriftTableOperationException(null, tableName, tableOp, TableOperationExceptionType.NAMESPACE_NOTFOUND, "");
                }
                final boolean canCloneTable;
                try {
                    canCloneTable = manager.security.canCloneTable(c, srcTableId, tableName, namespaceId, namespaceId);
                } catch (ThriftSecurityException e) {
                    throwIfTableMissingSecurityException(e, srcTableId, null, TableOperation.CLONE);
                    throw e;
                }
                if (!canCloneTable)
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                Map<String, String> propertiesToSet = new HashMap<>();
                Set<String> propertiesToExclude = new HashSet<>();
                for (Entry<String, String> entry : options.entrySet()) {
                    if (entry.getKey().startsWith(TableOperationsImpl.CLONE_EXCLUDE_PREFIX)) {
                        propertiesToExclude.add(entry.getKey().substring(TableOperationsImpl.CLONE_EXCLUDE_PREFIX.length()));
                        continue;
                    }
                    if (!Property.isTablePropertyValid(entry.getKey(), entry.getValue())) {
                        throw new ThriftTableOperationException(null, tableName, tableOp, TableOperationExceptionType.OTHER, "Property or value not valid " + entry.getKey() + "=" + entry.getValue());
                    }
                    propertiesToSet.put(entry.getKey(), entry.getValue());
                }
                goalMessage += "Clone table " + srcTableId + " to " + tableName;
                if (keepOffline)
                    goalMessage += " and keep offline.";
                manager.fate.seedTransaction(opid, new TraceRepo<>(new CloneTable(c.getPrincipal(), namespaceId, srcTableId, tableName, propertiesToSet, propertiesToExclude, keepOffline)), autoCleanup, goalMessage);
                break;
            }
        case TABLE_DELETE:
            {
                TableOperation tableOp = TableOperation.DELETE;
                validateArgumentCount(arguments, tableOp, 1);
                String tableName = validateName(arguments.get(0), tableOp, EXISTING_TABLE_NAME.and(NOT_BUILTIN_TABLE));
                final TableId tableId = ClientServiceHandler.checkTableId(manager.getContext(), tableName, tableOp);
                NamespaceId namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
                final boolean canDeleteTable;
                try {
                    canDeleteTable = manager.security.canDeleteTable(c, tableId, namespaceId);
                } catch (ThriftSecurityException e) {
                    throwIfTableMissingSecurityException(e, tableId, tableName, TableOperation.DELETE);
                    throw e;
                }
                if (!canDeleteTable)
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                goalMessage += "Delete table " + tableName + "(" + tableId + ")";
                manager.fate.seedTransaction(opid, new TraceRepo<>(new PreDeleteTable(namespaceId, tableId)), autoCleanup, goalMessage);
                break;
            }
        case TABLE_ONLINE:
            {
                TableOperation tableOp = TableOperation.ONLINE;
                validateArgumentCount(arguments, tableOp, 1);
                final var tableId = validateTableIdArgument(arguments.get(0), tableOp, NOT_ROOT_TABLE_ID);
                NamespaceId namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
                final boolean canOnlineOfflineTable;
                try {
                    canOnlineOfflineTable = manager.security.canOnlineOfflineTable(c, tableId, op, namespaceId);
                } catch (ThriftSecurityException e) {
                    throwIfTableMissingSecurityException(e, tableId, null, TableOperation.ONLINE);
                    throw e;
                }
                if (!canOnlineOfflineTable)
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                goalMessage += "Online table " + tableId;
                manager.fate.seedTransaction(opid, new TraceRepo<>(new ChangeTableState(namespaceId, tableId, tableOp)), autoCleanup, goalMessage);
                break;
            }
        case TABLE_OFFLINE:
            {
                TableOperation tableOp = TableOperation.OFFLINE;
                validateArgumentCount(arguments, tableOp, 1);
                final var tableId = validateTableIdArgument(arguments.get(0), tableOp, NOT_ROOT_TABLE_ID);
                NamespaceId namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
                final boolean canOnlineOfflineTable;
                try {
                    canOnlineOfflineTable = manager.security.canOnlineOfflineTable(c, tableId, op, namespaceId);
                } catch (ThriftSecurityException e) {
                    throwIfTableMissingSecurityException(e, tableId, null, TableOperation.OFFLINE);
                    throw e;
                }
                if (!canOnlineOfflineTable)
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                goalMessage += "Offline table " + tableId;
                manager.fate.seedTransaction(opid, new TraceRepo<>(new ChangeTableState(namespaceId, tableId, tableOp)), autoCleanup, goalMessage);
                break;
            }
        case TABLE_MERGE:
            {
                TableOperation tableOp = TableOperation.MERGE;
                validateArgumentCount(arguments, tableOp, 3);
                String tableName = validateName(arguments.get(0), tableOp, EXISTING_TABLE_NAME);
                Text startRow = ByteBufferUtil.toText(arguments.get(1));
                Text endRow = ByteBufferUtil.toText(arguments.get(2));
                final TableId tableId = ClientServiceHandler.checkTableId(manager.getContext(), tableName, tableOp);
                NamespaceId namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
                final boolean canMerge;
                try {
                    canMerge = manager.security.canMerge(c, tableId, namespaceId);
                } catch (ThriftSecurityException e) {
                    throwIfTableMissingSecurityException(e, tableId, tableName, TableOperation.MERGE);
                    throw e;
                }
                if (!canMerge)
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                Manager.log.debug("Creating merge op: {} {} {}", tableId, startRow, endRow);
                goalMessage += "Merge table " + tableName + "(" + tableId + ") splits from " + startRow + " to " + endRow;
                manager.fate.seedTransaction(opid, new TraceRepo<>(new TableRangeOp(MergeInfo.Operation.MERGE, namespaceId, tableId, startRow, endRow)), autoCleanup, goalMessage);
                break;
            }
        case TABLE_DELETE_RANGE:
            {
                TableOperation tableOp = TableOperation.DELETE_RANGE;
                validateArgumentCount(arguments, tableOp, 3);
                String tableName = validateName(arguments.get(0), tableOp, EXISTING_TABLE_NAME.and(NOT_METADATA_TABLE));
                Text startRow = ByteBufferUtil.toText(arguments.get(1));
                Text endRow = ByteBufferUtil.toText(arguments.get(2));
                final TableId tableId = ClientServiceHandler.checkTableId(manager.getContext(), tableName, tableOp);
                NamespaceId namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
                final boolean canDeleteRange;
                try {
                    canDeleteRange = manager.security.canDeleteRange(c, tableId, tableName, startRow, endRow, namespaceId);
                } catch (ThriftSecurityException e) {
                    throwIfTableMissingSecurityException(e, tableId, tableName, TableOperation.DELETE_RANGE);
                    throw e;
                }
                if (!canDeleteRange)
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                goalMessage += "Delete table " + tableName + "(" + tableId + ") range " + startRow + " to " + endRow;
                manager.fate.seedTransaction(opid, new TraceRepo<>(new TableRangeOp(MergeInfo.Operation.DELETE, namespaceId, tableId, startRow, endRow)), autoCleanup, goalMessage);
                break;
            }
        case TABLE_BULK_IMPORT:
            {
                TableOperation tableOp = TableOperation.BULK_IMPORT;
                validateArgumentCount(arguments, tableOp, 4);
                String tableName = validateName(arguments.get(0), tableOp, EXISTING_TABLE_NAME.and(NOT_BUILTIN_TABLE));
                String dir = ByteBufferUtil.toString(arguments.get(1));
                String failDir = ByteBufferUtil.toString(arguments.get(2));
                boolean setTime = Boolean.parseBoolean(ByteBufferUtil.toString(arguments.get(3)));
                final TableId tableId = ClientServiceHandler.checkTableId(manager.getContext(), tableName, tableOp);
                NamespaceId namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
                final boolean canBulkImport;
                try {
                    canBulkImport = manager.security.canBulkImport(c, tableId, tableName, dir, failDir, namespaceId);
                } catch (ThriftSecurityException e) {
                    throwIfTableMissingSecurityException(e, tableId, tableName, TableOperation.BULK_IMPORT);
                    throw e;
                }
                if (!canBulkImport)
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                manager.updateBulkImportStatus(dir, BulkImportState.INITIAL);
                goalMessage += "Bulk import " + dir + " to " + tableName + "(" + tableId + ") failing to " + failDir;
                manager.fate.seedTransaction(opid, new TraceRepo<>(new org.apache.accumulo.manager.tableOps.bulkVer1.BulkImport(tableId, dir, failDir, setTime)), autoCleanup, goalMessage);
                break;
            }
        case TABLE_COMPACT:
            {
                TableOperation tableOp = TableOperation.COMPACT;
                validateArgumentCount(arguments, tableOp, 2);
                TableId tableId = validateTableIdArgument(arguments.get(0), tableOp, null);
                CompactionConfig compactionConfig = UserCompactionUtils.decodeCompactionConfig(ByteBufferUtil.toBytes(arguments.get(1)));
                NamespaceId namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
                final boolean canCompact;
                try {
                    canCompact = manager.security.canCompact(c, tableId, namespaceId);
                } catch (ThriftSecurityException e) {
                    throwIfTableMissingSecurityException(e, tableId, null, TableOperation.COMPACT);
                    throw e;
                }
                if (!canCompact)
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                goalMessage += "Compact table (" + tableId + ") with config " + compactionConfig;
                manager.fate.seedTransaction(opid, new TraceRepo<>(new CompactRange(namespaceId, tableId, compactionConfig)), autoCleanup, goalMessage);
                break;
            }
        case TABLE_CANCEL_COMPACT:
            {
                TableOperation tableOp = TableOperation.COMPACT_CANCEL;
                validateArgumentCount(arguments, tableOp, 1);
                TableId tableId = validateTableIdArgument(arguments.get(0), tableOp, null);
                NamespaceId namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
                final boolean canCancelCompact;
                try {
                    canCancelCompact = manager.security.canCompact(c, tableId, namespaceId);
                } catch (ThriftSecurityException e) {
                    throwIfTableMissingSecurityException(e, tableId, null, TableOperation.COMPACT_CANCEL);
                    throw e;
                }
                if (!canCancelCompact)
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                goalMessage += "Cancel compaction of table (" + tableId + ")";
                manager.fate.seedTransaction(opid, new TraceRepo<>(new CancelCompactions(namespaceId, tableId)), autoCleanup, goalMessage);
                break;
            }
        case TABLE_IMPORT:
            {
                TableOperation tableOp = TableOperation.IMPORT;
                // offset where table list begins
                int IMPORT_DIR_OFFSET = 2;
                if (arguments.size() < IMPORT_DIR_OFFSET) {
                    throw new ThriftTableOperationException(null, null, tableOp, TableOperationExceptionType.OTHER, "Expected at least " + IMPORT_DIR_OFFSET + "arguments, sar :" + arguments.size());
                }
                String tableName = validateName(arguments.get(0), tableOp, NEW_TABLE_NAME.and(NOT_BUILTIN_TABLE));
                List<ByteBuffer> exportDirArgs = arguments.stream().skip(1).collect(Collectors.toList());
                Set<String> exportDirs = ByteBufferUtil.toStringSet(exportDirArgs);
                NamespaceId namespaceId;
                try {
                    namespaceId = Namespaces.getNamespaceId(manager.getContext(), TableNameUtil.qualify(tableName).getFirst());
                } catch (NamespaceNotFoundException e) {
                    throw new ThriftTableOperationException(null, tableName, tableOp, TableOperationExceptionType.NAMESPACE_NOTFOUND, "");
                }
                final boolean canImport;
                try {
                    canImport = manager.security.canImport(c, tableName, exportDirs, namespaceId);
                } catch (ThriftSecurityException e) {
                    throwIfTableMissingSecurityException(e, null, tableName, TableOperation.IMPORT);
                    throw e;
                }
                if (!canImport)
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                goalMessage += "Import table with new name: " + tableName + " from " + exportDirs;
                manager.fate.seedTransaction(opid, new TraceRepo<>(new ImportTable(c.getPrincipal(), tableName, exportDirs, namespaceId)), autoCleanup, goalMessage);
                break;
            }
        case TABLE_EXPORT:
            {
                TableOperation tableOp = TableOperation.EXPORT;
                validateArgumentCount(arguments, tableOp, 2);
                String tableName = validateName(arguments.get(0), tableOp, EXISTING_TABLE_NAME.and(NOT_BUILTIN_TABLE));
                String exportDir = ByteBufferUtil.toString(arguments.get(1));
                TableId tableId = ClientServiceHandler.checkTableId(manager.getContext(), tableName, tableOp);
                NamespaceId namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
                final boolean canExport;
                try {
                    canExport = manager.security.canExport(c, tableId, tableName, exportDir, namespaceId);
                } catch (ThriftSecurityException e) {
                    throwIfTableMissingSecurityException(e, tableId, tableName, TableOperation.EXPORT);
                    throw e;
                }
                if (!canExport)
                    throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
                goalMessage += "Export table " + tableName + "(" + tableId + ") to " + exportDir;
                manager.fate.seedTransaction(opid, new TraceRepo<>(new ExportTable(namespaceId, tableName, tableId, exportDir)), autoCleanup, goalMessage);
                break;
            }
        case TABLE_BULK_IMPORT2:
            TableOperation tableOp = TableOperation.BULK_IMPORT;
            validateArgumentCount(arguments, tableOp, 3);
            final var tableId = validateTableIdArgument(arguments.get(0), tableOp, NOT_ROOT_TABLE_ID);
            String dir = ByteBufferUtil.toString(arguments.get(1));
            boolean setTime = Boolean.parseBoolean(ByteBufferUtil.toString(arguments.get(2)));
            NamespaceId namespaceId = getNamespaceIdFromTableId(tableOp, tableId);
            final boolean canBulkImport;
            String tableName;
            try {
                tableName = manager.getContext().getTableName(tableId);
                canBulkImport = manager.security.canBulkImport(c, tableId, tableName, dir, null, namespaceId);
            } catch (ThriftSecurityException e) {
                throwIfTableMissingSecurityException(e, tableId, "", TableOperation.BULK_IMPORT);
                throw e;
            } catch (TableNotFoundException e) {
                throw new ThriftTableOperationException(tableId.canonical(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.NOTFOUND, "Table no longer exists");
            }
            if (!canBulkImport)
                throw new ThriftSecurityException(c.getPrincipal(), SecurityErrorCode.PERMISSION_DENIED);
            manager.updateBulkImportStatus(dir, BulkImportState.INITIAL);
            goalMessage += "Bulk import (v2)  " + dir + " to " + tableName + "(" + tableId + ")";
            manager.fate.seedTransaction(opid, new TraceRepo<>(new PrepBulkImport(tableId, dir, setTime)), autoCleanup, goalMessage);
            break;
        default:
            throw new UnsupportedOperationException();
    }
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Set(java.util.Set) HashSet(java.util.HashSet) ChangeTableState(org.apache.accumulo.manager.tableOps.ChangeTableState) DeleteNamespace(org.apache.accumulo.manager.tableOps.namespace.delete.DeleteNamespace) PreDeleteTable(org.apache.accumulo.manager.tableOps.delete.PreDeleteTable) PrepBulkImport(org.apache.accumulo.manager.tableOps.bulkVer2.PrepBulkImport) InitialTableState(org.apache.accumulo.core.client.admin.InitialTableState) CreateTable(org.apache.accumulo.manager.tableOps.create.CreateTable) RenameNamespace(org.apache.accumulo.manager.tableOps.namespace.rename.RenameNamespace) TimeType(org.apache.accumulo.core.client.admin.TimeType) CancelCompactions(org.apache.accumulo.manager.tableOps.compact.cancel.CancelCompactions) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) Entry(java.util.Map.Entry) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) ThriftTableOperationException(org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException) List(java.util.List) ImportTable(org.apache.accumulo.manager.tableOps.tableImport.ImportTable) CloneTable(org.apache.accumulo.manager.tableOps.clone.CloneTable) Path(org.apache.hadoop.fs.Path) CompactRange(org.apache.accumulo.manager.tableOps.compact.CompactRange) Text(org.apache.hadoop.io.Text) TraceRepo(org.apache.accumulo.manager.tableOps.TraceRepo) IOException(java.io.IOException) ThriftSecurityException(org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) NamespaceNotFoundException(org.apache.accumulo.core.client.NamespaceNotFoundException) ExportTable(org.apache.accumulo.manager.tableOps.tableExport.ExportTable) RenameTable(org.apache.accumulo.manager.tableOps.rename.RenameTable) TableOperation(org.apache.accumulo.core.clientImpl.thrift.TableOperation) TableRangeOp(org.apache.accumulo.manager.tableOps.merge.TableRangeOp) NamespaceId(org.apache.accumulo.core.data.NamespaceId) Map(java.util.Map) HashMap(java.util.HashMap) CreateNamespace(org.apache.accumulo.manager.tableOps.namespace.create.CreateNamespace)

Aggregations

CompactionConfig (org.apache.accumulo.core.client.admin.CompactionConfig)57 Test (org.junit.Test)36 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)32 Mutation (org.apache.accumulo.core.data.Mutation)21 BatchWriter (org.apache.accumulo.core.client.BatchWriter)20 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)17 Value (org.apache.accumulo.core.data.Value)14 PluginConfig (org.apache.accumulo.core.client.admin.PluginConfig)12 Scanner (org.apache.accumulo.core.client.Scanner)11 CompactionStrategyConfig (org.apache.accumulo.core.client.admin.CompactionStrategyConfig)11 NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)11 Text (org.apache.hadoop.io.Text)10 AccumuloException (org.apache.accumulo.core.client.AccumuloException)9 Connector (org.apache.accumulo.core.client.Connector)9 Key (org.apache.accumulo.core.data.Key)9 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)6 File (java.io.File)5 IOException (java.io.IOException)5 HashMap (java.util.HashMap)5 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)5