Search in sources :

Example 6 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class MergeStats method main.

public static void main(String[] args) throws Exception {
    ServerUtilOpts opts = new ServerUtilOpts();
    opts.parseArgs(MergeStats.class.getName(), args);
    Span span = TraceUtil.startSpan(MergeStats.class, "main");
    try (Scope scope = span.makeCurrent()) {
        try (AccumuloClient client = Accumulo.newClient().from(opts.getClientProps()).build()) {
            Map<String, String> tableIdMap = client.tableOperations().tableIdMap();
            ZooReaderWriter zooReaderWriter = opts.getServerContext().getZooReaderWriter();
            for (Entry<String, String> entry : tableIdMap.entrySet()) {
                final String table = entry.getKey(), tableId = entry.getValue();
                String path = ZooUtil.getRoot(client.instanceOperations().getInstanceId()) + Constants.ZTABLES + "/" + tableId + "/merge";
                MergeInfo info = new MergeInfo();
                if (zooReaderWriter.exists(path)) {
                    byte[] data = zooReaderWriter.getData(path);
                    DataInputBuffer in = new DataInputBuffer();
                    in.reset(data, data.length);
                    info.readFields(in);
                }
                System.out.printf("%25s  %10s %10s %s%n", table, info.getState(), info.getOperation(), info.getExtent());
            }
        }
    } finally {
        span.end();
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) MergeInfo(org.apache.accumulo.server.manager.state.MergeInfo) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) Scope(io.opentelemetry.context.Scope) ZooReaderWriter(org.apache.accumulo.fate.zookeeper.ZooReaderWriter) ServerUtilOpts(org.apache.accumulo.server.cli.ServerUtilOpts) Span(io.opentelemetry.api.trace.Span)

Example 7 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class BinaryStressIT method resetConfig.

@After
public void resetConfig() throws Exception {
    if (majcDelay != null) {
        try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
            InstanceOperations iops = client.instanceOperations();
            iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
            iops.setProperty(Property.TSERV_MAXMEM.getKey(), maxMem);
        }
        getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
        getClusterControl().startAllServers(ServerType.TABLET_SERVER);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) InstanceOperations(org.apache.accumulo.core.client.admin.InstanceOperations) After(org.junit.After)

Example 8 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class BinaryStressIT method alterConfig.

@Before
public void alterConfig() throws Exception {
    if (getClusterType() == ClusterType.MINI) {
        return;
    }
    try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
        InstanceOperations iops = client.instanceOperations();
        Map<String, String> conf = iops.getSystemConfiguration();
        majcDelay = conf.get(Property.TSERV_MAJC_DELAY.getKey());
        maxMem = conf.get(Property.TSERV_MAXMEM.getKey());
        iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "50ms");
        iops.setProperty(Property.TSERV_MAXMEM.getKey(), "50K");
        getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
        getClusterControl().startAllServers(ServerType.TABLET_SERVER);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) InstanceOperations(org.apache.accumulo.core.client.admin.InstanceOperations) Before(org.junit.Before)

Example 9 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class BulkFailureIT method runTest.

/**
 * This test verifies two things. First it ensures that after a bulk imported file is compacted
 * that import request are ignored. Second it ensures that after the bulk import transaction is
 * canceled that import request fail. The public API for bulk import can not be used for this
 * test. Internal (non public API) RPCs and Zookeeper state is manipulated directly. This is the
 * only way to interleave compactions with multiple, duplicate import RPC request.
 */
protected void runTest(String table, long fateTxid, Loader loader) throws IOException, AccumuloException, AccumuloSecurityException, TableExistsException, KeeperException, InterruptedException, Exception, FileNotFoundException, TableNotFoundException {
    try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
        SortedMap<Key, Value> testData = createTestData();
        FileSystem fs = getCluster().getFileSystem();
        String testFile = createTestFile(fateTxid, testData, fs);
        c.tableOperations().create(table);
        String tableId = c.tableOperations().tableIdMap().get(table);
        // Table has no splits, so this extent corresponds to the tables single tablet
        KeyExtent extent = new KeyExtent(TableId.of(tableId), null, null);
        ServerContext asCtx = getServerContext();
        ZooArbitrator.start(asCtx, Constants.BULK_ARBITRATOR_TYPE, fateTxid);
        VolumeManager vm = asCtx.getVolumeManager();
        // move the file into a directory for the table and rename the file to something unique
        String bulkDir = BulkImport.prepareBulkImport(asCtx, vm, testFile, TableId.of(tableId), fateTxid);
        // determine the files new name and path
        FileStatus status = fs.listStatus(new Path(bulkDir))[0];
        Path bulkLoadPath = fs.makeQualified(status.getPath());
        // Directly ask the tablet to load the file.
        loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
        assertEquals(Set.of(bulkLoadPath), getFiles(c, extent));
        assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
        // Compact the bulk imported file. Subsequent request to load the file should be ignored.
        c.tableOperations().compact(table, new CompactionConfig().setWait(true));
        Set<Path> tabletFiles = getFiles(c, extent);
        assertFalse(tabletFiles.contains(bulkLoadPath));
        assertEquals(1, tabletFiles.size());
        assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
        // this request should be ignored by the tablet
        loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
        assertEquals(tabletFiles, getFiles(c, extent));
        assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
        // this is done to ensure the tablet reads the load flags from the metadata table when it
        // loads
        c.tableOperations().offline(table, true);
        c.tableOperations().online(table, true);
        // this request should be ignored by the tablet
        loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
        assertEquals(tabletFiles, getFiles(c, extent));
        assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
        // After this, all load request should fail.
        ZooArbitrator.stop(asCtx, Constants.BULK_ARBITRATOR_TYPE, fateTxid);
        c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE);
        BatchDeleter bd = c.createBatchDeleter(MetadataTable.NAME, Authorizations.EMPTY, 1);
        bd.setRanges(Collections.singleton(extent.toMetaRange()));
        bd.fetchColumnFamily(BulkFileColumnFamily.NAME);
        bd.delete();
        loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), true);
        assertEquals(tabletFiles, getFiles(c, extent));
        assertEquals(Set.of(), getLoaded(c, extent));
        assertEquals(testData, readTable(table, c));
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) BatchDeleter(org.apache.accumulo.core.client.BatchDeleter) FileStatus(org.apache.hadoop.fs.FileStatus) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) ServerContext(org.apache.accumulo.server.ServerContext) FileSystem(org.apache.hadoop.fs.FileSystem) Value(org.apache.accumulo.core.data.Value) CompactionConfig(org.apache.accumulo.core.client.admin.CompactionConfig) Key(org.apache.accumulo.core.data.Key)

Example 10 with AccumuloClient

use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.

the class BulkNewIT method testBadLoadPlans.

@Test
public void testBadLoadPlans() throws Exception {
    try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
        addSplits(c, tableName, "0333 0666 0999 1333 1666");
        String dir = getDir("/testBulkFile-");
        writeData(dir + "/f1.", aconf, 0, 333);
        writeData(dir + "/f2.", aconf, 0, 666);
        final var importMappingOptions = c.tableOperations().importDirectory(dir).to(tableName);
        // Create a plan with more files than exists in dir
        LoadPlan loadPlan = LoadPlan.builder().loadFileTo("f1.rf", RangeType.TABLE, null, row(333)).loadFileTo("f2.rf", RangeType.TABLE, null, row(666)).loadFileTo("f3.rf", RangeType.TABLE, null, row(666)).build();
        final var tooManyFiles = importMappingOptions.plan(loadPlan);
        assertThrows(IllegalArgumentException.class, tooManyFiles::load);
        // Create a plan with fewer files than exists in dir
        loadPlan = LoadPlan.builder().loadFileTo("f1.rf", RangeType.TABLE, null, row(333)).build();
        final var tooFewFiles = importMappingOptions.plan(loadPlan);
        assertThrows(IllegalArgumentException.class, tooFewFiles::load);
        // Create a plan with tablet boundary that does not exist
        loadPlan = LoadPlan.builder().loadFileTo("f1.rf", RangeType.TABLE, null, row(555)).loadFileTo("f2.rf", RangeType.TABLE, null, row(555)).build();
        final var nonExistentBoundary = importMappingOptions.plan(loadPlan);
        assertThrows(AccumuloException.class, nonExistentBoundary::load);
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) LoadPlan(org.apache.accumulo.core.data.LoadPlan) Test(org.junit.Test)

Aggregations

AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)500 Test (org.junit.Test)411 BatchWriter (org.apache.accumulo.core.client.BatchWriter)149 Text (org.apache.hadoop.io.Text)143 Mutation (org.apache.accumulo.core.data.Mutation)138 Scanner (org.apache.accumulo.core.client.Scanner)122 Value (org.apache.accumulo.core.data.Value)118 Key (org.apache.accumulo.core.data.Key)108 NewTableConfiguration (org.apache.accumulo.core.client.admin.NewTableConfiguration)91 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)64 HashMap (java.util.HashMap)61 Range (org.apache.accumulo.core.data.Range)51 TreeSet (java.util.TreeSet)50 ArrayList (java.util.ArrayList)47 Entry (java.util.Map.Entry)41 Path (org.apache.hadoop.fs.Path)39 CompactionConfig (org.apache.accumulo.core.client.admin.CompactionConfig)34 Authorizations (org.apache.accumulo.core.security.Authorizations)34 BatchScanner (org.apache.accumulo.core.client.BatchScanner)32 HashSet (java.util.HashSet)31