Search in sources :

Example 76 with ServerContext

use of org.apache.accumulo.server.ServerContext in project accumulo by apache.

the class LogReader method execute.

@SuppressFBWarnings(value = "DM_EXIT", justification = "System.exit is fine here because it's a utility class executed by a main()")
@Override
public void execute(String[] args) throws Exception {
    Opts opts = new Opts();
    opts.parseArgs("accumulo wal-info", args);
    if (opts.files.isEmpty()) {
        System.err.println("No WAL files were given");
        System.exit(1);
    }
    var siteConfig = SiteConfiguration.auto();
    ServerContext context = new ServerContext(siteConfig);
    try (VolumeManager fs = context.getVolumeManager()) {
        Matcher rowMatcher = null;
        KeyExtent ke = null;
        Text row = null;
        if (opts.row != null) {
            row = new Text(opts.row);
        }
        if (opts.extent != null) {
            String[] sa = opts.extent.split(";");
            ke = new KeyExtent(TableId.of(sa[0]), new Text(sa[1]), new Text(sa[2]));
        }
        if (opts.regexp != null) {
            Pattern pattern = Pattern.compile(opts.regexp);
            rowMatcher = pattern.matcher("");
        }
        Set<Integer> tabletIds = new HashSet<>();
        for (String file : opts.files) {
            Path path = new Path(file);
            LogFileKey key = new LogFileKey();
            LogFileValue value = new LogFileValue();
            // ensure it's a regular non-sorted WAL file, and not a single sorted WAL in RFile format
            if (fs.getFileStatus(path).isFile()) {
                if (file.endsWith(".rf")) {
                    log.error("Unable to read from a single RFile. A non-sorted WAL file was expected. " + "To read sorted WALs, please pass in a directory containing the sorted recovery logs.");
                    continue;
                }
                try (final FSDataInputStream fsinput = fs.open(path);
                    DataInputStream input = DfsLogger.getDecryptingStream(fsinput, siteConfig)) {
                    while (true) {
                        try {
                            key.readFields(input);
                            value.readFields(input);
                        } catch (EOFException ex) {
                            break;
                        }
                        printLogEvent(key, value, row, rowMatcher, ke, tabletIds, opts.maxMutations);
                    }
                } catch (LogHeaderIncompleteException e) {
                    log.warn("Could not read header for {} . Ignoring...", path);
                    continue;
                }
            } else {
                // finished file.
                try (var rli = new RecoveryLogsIterator(context, Collections.singletonList(path), null, null, false)) {
                    while (rli.hasNext()) {
                        Entry<LogFileKey, LogFileValue> entry = rli.next();
                        printLogEvent(entry.getKey(), entry.getValue(), row, rowMatcher, ke, tabletIds, opts.maxMutations);
                    }
                }
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Pattern(java.util.regex.Pattern) Matcher(java.util.regex.Matcher) Text(org.apache.hadoop.io.Text) DataInputStream(java.io.DataInputStream) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) LogHeaderIncompleteException(org.apache.accumulo.tserver.log.DfsLogger.LogHeaderIncompleteException) ServerContext(org.apache.accumulo.server.ServerContext) RecoveryLogsIterator(org.apache.accumulo.tserver.log.RecoveryLogsIterator) EOFException(java.io.EOFException) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) HashSet(java.util.HashSet) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings)

Example 77 with ServerContext

use of org.apache.accumulo.server.ServerContext in project accumulo by apache.

the class DefaultCompactionStrategyTest method getServerContext.

public static ServerContext getServerContext() {
    ServerContext context = EasyMock.createMock(ServerContext.class);
    EasyMock.expect(context.getCryptoService()).andReturn(CryptoServiceFactory.newDefaultInstance()).anyTimes();
    EasyMock.replay(context);
    return context;
}
Also used : ServerContext(org.apache.accumulo.server.ServerContext)

Example 78 with ServerContext

use of org.apache.accumulo.server.ServerContext in project accumulo by apache.

the class ReplicationIT method replicationRecordsAreClosedAfterGarbageCollection.

@Test
public void replicationRecordsAreClosedAfterGarbageCollection() throws Exception {
    getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
    final ServerContext context = getServerContext();
    try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
        ReplicationTable.setOnline(client);
        client.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.WRITE);
        client.tableOperations().deleteRows(ReplicationTable.NAME, null, null);
        final AtomicBoolean keepRunning = new AtomicBoolean(true);
        final Set<String> metadataWals = new HashSet<>();
        Thread t = new Thread(() -> {
            // when that happens
            while (keepRunning.get()) {
                try {
                    metadataWals.addAll(getLogs(client, context).keySet());
                } catch (Exception e) {
                    log.error("Metadata table doesn't exist");
                }
            }
        });
        t.start();
        String table1 = "table1", table2 = "table2", table3 = "table3";
        Map<String, String> replicate_props = new HashMap<>();
        replicate_props.put(Property.TABLE_REPLICATION.getKey(), "true");
        replicate_props.put(Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
        try {
            client.tableOperations().create(table1, new NewTableConfiguration().setProperties(replicate_props));
            client.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1", ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, null));
            // Write some data to table1
            writeSomeData(client, table1, 200, 500);
            client.tableOperations().create(table2, new NewTableConfiguration().setProperties(replicate_props));
            writeSomeData(client, table2, 200, 500);
            client.tableOperations().create(table3, new NewTableConfiguration().setProperties(replicate_props));
            writeSomeData(client, table3, 200, 500);
            // Flush everything to try to make the replication records
            for (String table : Arrays.asList(table1, table2, table3)) {
                client.tableOperations().compact(table, null, null, true, true);
            }
        } finally {
            keepRunning.set(false);
            t.join(5000);
            assertFalse(t.isAlive());
        }
        // Kill the tserver(s) and restart them
        // to ensure that the WALs we previously observed all move to closed.
        cluster.getClusterControl().stop(ServerType.TABLET_SERVER);
        cluster.getClusterControl().start(ServerType.TABLET_SERVER);
        // Make sure we can read all the tables (recovery complete)
        for (String table : Arrays.asList(table1, table2, table3)) {
            Iterators.size(client.createScanner(table, Authorizations.EMPTY).iterator());
        }
        // Starting the gc will run CloseWriteAheadLogReferences which will first close Statuses
        // in the metadata table, and then in the replication table
        Process gc = cluster.exec(SimpleGarbageCollector.class).getProcess();
        waitForGCLock(client);
        Thread.sleep(1000);
        log.info("GC is up and should have had time to run at least once by now");
        try {
            boolean allClosed = true;
            // After they're closed, they are candidates for deletion
            for (int i = 0; i < 10; i++) {
                try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
                    s.setRange(Range.prefix(ReplicationSection.getRowPrefix()));
                    Iterator<Entry<Key, Value>> iter = s.iterator();
                    long recordsFound = 0L;
                    while (allClosed && iter.hasNext()) {
                        Entry<Key, Value> entry = iter.next();
                        String wal = entry.getKey().getRow().toString();
                        if (metadataWals.contains(wal)) {
                            Status status = Status.parseFrom(entry.getValue().get());
                            log.info("{}={}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(status));
                            allClosed &= status.getClosed();
                            recordsFound++;
                        }
                    }
                    log.info("Found {} records from the metadata table", recordsFound);
                    if (allClosed) {
                        break;
                    }
                    sleepUninterruptibly(2, TimeUnit.SECONDS);
                }
            }
            if (!allClosed) {
                try (Scanner s = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
                    s.setRange(Range.prefix(ReplicationSection.getRowPrefix()));
                    for (Entry<Key, Value> entry : s) {
                        log.info("{} {}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
                    }
                    fail("Expected all replication records in the metadata table to be closed");
                }
            }
            for (int i = 0; i < 10; i++) {
                allClosed = true;
                try (Scanner s = ReplicationTable.getScanner(client)) {
                    Iterator<Entry<Key, Value>> iter = s.iterator();
                    long recordsFound = 0L;
                    while (allClosed && iter.hasNext()) {
                        Entry<Key, Value> entry = iter.next();
                        String wal = entry.getKey().getRow().toString();
                        if (metadataWals.contains(wal)) {
                            Status status = Status.parseFrom(entry.getValue().get());
                            log.info("{}={}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(status));
                            allClosed &= status.getClosed();
                            recordsFound++;
                        }
                    }
                    log.info("Found {} records from the replication table", recordsFound);
                    if (allClosed) {
                        break;
                    }
                    sleepUninterruptibly(3, TimeUnit.SECONDS);
                }
            }
            if (!allClosed) {
                try (Scanner s = ReplicationTable.getScanner(client)) {
                    StatusSection.limit(s);
                    for (Entry<Key, Value> entry : s) {
                        log.info("{} {}", entry.getKey().toStringNoTruncate(), TextFormat.shortDebugString(Status.parseFrom(entry.getValue().get())));
                    }
                    fail("Expected all replication records in the replication table to be closed");
                }
            }
        } finally {
            gc.destroy();
            gc.waitFor();
        }
    }
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) Status(org.apache.accumulo.server.replication.proto.Replication.Status) Scanner(org.apache.accumulo.core.client.Scanner) HashMap(java.util.HashMap) SimpleGarbageCollector(org.apache.accumulo.gc.SimpleGarbageCollector) TableOfflineException(org.apache.accumulo.core.client.TableOfflineException) URISyntaxException(java.net.URISyntaxException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) NoSuchElementException(java.util.NoSuchElementException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Entry(java.util.Map.Entry) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) ServerContext(org.apache.accumulo.server.ServerContext) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 79 with ServerContext

use of org.apache.accumulo.server.ServerContext in project accumulo by apache.

the class CollectTabletStats method main.

public static void main(String[] args) throws Exception {
    final CollectOptions opts = new CollectOptions();
    opts.parseArgs(CollectTabletStats.class.getName(), args);
    String[] columnsTmp = {};
    if (opts.columns != null)
        columnsTmp = opts.columns.split(",");
    final String[] columns = columnsTmp;
    ServerContext context = opts.getServerContext();
    final VolumeManager fs = context.getVolumeManager();
    TableId tableId = context.getTableId(opts.tableName);
    if (tableId == null) {
        log.error("Unable to find table named {}", opts.tableName);
        System.exit(-1);
    }
    TreeMap<KeyExtent, String> tabletLocations = new TreeMap<>();
    List<KeyExtent> candidates = findTablets(context, !opts.selectFarTablets, opts.tableName, tabletLocations);
    if (candidates.size() < opts.numThreads) {
        System.err.println("ERROR : Unable to find " + opts.numThreads + " " + (opts.selectFarTablets ? "far" : "local") + " tablets");
        System.exit(-1);
    }
    List<KeyExtent> tabletsToTest = selectRandomTablets(opts.numThreads, candidates);
    Map<KeyExtent, List<TabletFile>> tabletFiles = new HashMap<>();
    for (KeyExtent ke : tabletsToTest) {
        List<TabletFile> files = getTabletFiles(context, ke);
        tabletFiles.put(ke, files);
    }
    System.out.println();
    System.out.println("run location      : " + InetAddress.getLocalHost().getHostName() + "/" + InetAddress.getLocalHost().getHostAddress());
    System.out.println("num threads       : " + opts.numThreads);
    System.out.println("table             : " + opts.tableName);
    System.out.println("table id          : " + tableId);
    for (KeyExtent ke : tabletsToTest) {
        System.out.println("\t *** Information about tablet " + ke.getUUID() + " *** ");
        System.out.println("\t\t# files in tablet : " + tabletFiles.get(ke).size());
        System.out.println("\t\ttablet location   : " + tabletLocations.get(ke));
        reportHdfsBlockLocations(context, tabletFiles.get(ke));
    }
    System.out.println("%n*** RUNNING TEST ***%n");
    ExecutorService threadPool = Executors.newFixedThreadPool(opts.numThreads);
    for (int i = 0; i < opts.iterations; i++) {
        ArrayList<Test> tests = new ArrayList<>();
        for (final KeyExtent ke : tabletsToTest) {
            final List<TabletFile> files = tabletFiles.get(ke);
            Test test = new Test(ke) {

                @Override
                public int runTest() throws Exception {
                    return readFiles(fs, context.getConfiguration(), files, ke, columns);
                }
            };
            tests.add(test);
        }
        runTest("read files", tests, opts.numThreads, threadPool);
    }
    for (int i = 0; i < opts.iterations; i++) {
        ArrayList<Test> tests = new ArrayList<>();
        for (final KeyExtent ke : tabletsToTest) {
            final List<TabletFile> files = tabletFiles.get(ke);
            Test test = new Test(ke) {

                @Override
                public int runTest() throws Exception {
                    return readFilesUsingIterStack(fs, context, files, opts.auths, ke, columns, false);
                }
            };
            tests.add(test);
        }
        runTest("read tablet files w/ system iter stack", tests, opts.numThreads, threadPool);
    }
    for (int i = 0; i < opts.iterations; i++) {
        ArrayList<Test> tests = new ArrayList<>();
        for (final KeyExtent ke : tabletsToTest) {
            final List<TabletFile> files = tabletFiles.get(ke);
            Test test = new Test(ke) {

                @Override
                public int runTest() throws Exception {
                    return readFilesUsingIterStack(fs, context, files, opts.auths, ke, columns, true);
                }
            };
            tests.add(test);
        }
        runTest("read tablet files w/ table iter stack", tests, opts.numThreads, threadPool);
    }
    try (AccumuloClient client = Accumulo.newClient().from(opts.getClientProps()).build()) {
        for (int i = 0; i < opts.iterations; i++) {
            ArrayList<Test> tests = new ArrayList<>();
            for (final KeyExtent ke : tabletsToTest) {
                Test test = new Test(ke) {

                    @Override
                    public int runTest() throws Exception {
                        return scanTablet(client, opts.tableName, opts.auths, ke.prevEndRow(), ke.endRow(), columns);
                    }
                };
                tests.add(test);
            }
            runTest("read tablet data through accumulo", tests, opts.numThreads, threadPool);
        }
        for (final KeyExtent ke : tabletsToTest) {
            threadPool.submit(() -> {
                try {
                    calcTabletStats(client, opts.tableName, opts.auths, ke, columns);
                } catch (Exception e) {
                    log.error("Failed to calculate tablet stats.", e);
                }
            });
        }
    }
    threadPool.shutdown();
}
Also used : TableId(org.apache.accumulo.core.data.TableId) AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) IOException(java.io.IOException) ServerContext(org.apache.accumulo.server.ServerContext) ExecutorService(java.util.concurrent.ExecutorService) List(java.util.List) ArrayList(java.util.ArrayList) TabletFile(org.apache.accumulo.core.metadata.TabletFile)

Example 80 with ServerContext

use of org.apache.accumulo.server.ServerContext in project accumulo by apache.

the class NullTserver method main.

public static void main(String[] args) throws Exception {
    Opts opts = new Opts();
    opts.parseArgs(NullTserver.class.getName(), args);
    // modify metadata
    int zkTimeOut = (int) DefaultConfiguration.getInstance().getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT);
    var siteConfig = SiteConfiguration.auto();
    ServerContext context = ServerContext.override(siteConfig, opts.iname, opts.keepers, zkTimeOut);
    TransactionWatcher watcher = new TransactionWatcher(context);
    ThriftClientHandler tch = new ThriftClientHandler(context, watcher);
    Processor<Iface> processor = new Processor<>(tch);
    TServerUtils.startTServer(context.getConfiguration(), ThriftServerType.CUSTOM_HS_HA, processor, "NullTServer", "null tserver", 2, ThreadPools.DEFAULT_TIMEOUT_MILLISECS, 1000, 10 * 1024 * 1024, null, null, -1, HostAndPort.fromParts("0.0.0.0", opts.port));
    HostAndPort addr = HostAndPort.fromParts(InetAddress.getLocalHost().getHostName(), opts.port);
    TableId tableId = context.getTableId(opts.tableName);
    // read the locations for the table
    Range tableRange = new KeyExtent(tableId, null, null).toMetaRange();
    List<Assignment> assignments = new ArrayList<>();
    try (var s = new MetaDataTableScanner(context, tableRange, MetadataTable.NAME)) {
        long randomSessionID = opts.port;
        TServerInstance instance = new TServerInstance(addr, randomSessionID);
        while (s.hasNext()) {
            TabletLocationState next = s.next();
            assignments.add(new Assignment(next.extent, instance));
        }
    }
    // point them to this server
    TabletStateStore store = TabletStateStore.getStoreForLevel(DataLevel.USER, context);
    store.setLocations(assignments);
    while (true) {
        sleepUninterruptibly(10, TimeUnit.SECONDS);
    }
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Processor(org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Processor) ArrayList(java.util.ArrayList) TabletStateStore(org.apache.accumulo.server.manager.state.TabletStateStore) TRange(org.apache.accumulo.core.dataImpl.thrift.TRange) TRowRange(org.apache.accumulo.core.dataImpl.thrift.TRowRange) Range(org.apache.accumulo.core.data.Range) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TServerInstance(org.apache.accumulo.core.metadata.TServerInstance) Assignment(org.apache.accumulo.server.manager.state.Assignment) HostAndPort(org.apache.accumulo.core.util.HostAndPort) Iface(org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Iface) TransactionWatcher(org.apache.accumulo.server.zookeeper.TransactionWatcher) ServerContext(org.apache.accumulo.server.ServerContext) MetaDataTableScanner(org.apache.accumulo.server.manager.state.MetaDataTableScanner) TabletLocationState(org.apache.accumulo.core.metadata.TabletLocationState)

Aggregations

ServerContext (org.apache.accumulo.server.ServerContext)87 Test (org.junit.Test)41 ZooReaderWriter (org.apache.accumulo.fate.zookeeper.ZooReaderWriter)18 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)15 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)15 TServerInstance (org.apache.accumulo.core.metadata.TServerInstance)15 HostAndPort (org.apache.accumulo.core.util.HostAndPort)15 Path (org.apache.hadoop.fs.Path)15 ArrayList (java.util.ArrayList)14 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)14 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)13 KeeperException (org.apache.zookeeper.KeeperException)13 ServerAddress (org.apache.accumulo.server.rpc.ServerAddress)12 TableId (org.apache.accumulo.core.data.TableId)11 LiveTServerSet (org.apache.accumulo.server.manager.LiveTServerSet)11 Value (org.apache.accumulo.core.data.Value)10 IOException (java.io.IOException)9 UUID (java.util.UUID)9 ConfigurationCopy (org.apache.accumulo.core.conf.ConfigurationCopy)9 Client (org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client)9