Search in sources :

Example 16 with AccumuloServerContext

use of org.apache.accumulo.server.AccumuloServerContext in project accumulo by apache.

the class GetMasterStats method main.

public static void main(String[] args) throws Exception {
    MasterClientService.Iface client = null;
    MasterMonitorInfo stats = null;
    Instance instance = HdfsZooInstance.getInstance();
    AccumuloServerContext context = new AccumuloServerContext(instance, new ServerConfigurationFactory(instance));
    while (true) {
        try {
            client = MasterClient.getConnectionWithRetry(context);
            stats = client.getMasterStats(Tracer.traceInfo(), context.rpcCreds());
            break;
        } catch (ThriftNotActiveServiceException e) {
            // Let it loop, fetching a new location
            sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
        } finally {
            if (client != null)
                MasterClient.close(client);
        }
    }
    out(0, "State: " + stats.state.name());
    out(0, "Goal State: " + stats.goalState.name());
    if (stats.serversShuttingDown != null && stats.serversShuttingDown.size() > 0) {
        out(0, "Servers to shutdown");
        for (String server : stats.serversShuttingDown) {
            out(1, "%s", server);
        }
    }
    out(0, "Unassigned tablets: %d", stats.unassignedTablets);
    if (stats.badTServers != null && stats.badTServers.size() > 0) {
        out(0, "Bad servers");
        for (Entry<String, Byte> entry : stats.badTServers.entrySet()) {
            out(1, "%s: %d", entry.getKey(), (int) entry.getValue());
        }
    }
    out(0, "Dead tablet servers count: %s", stats.deadTabletServers.size());
    for (DeadServer dead : stats.deadTabletServers) {
        out(1, "Dead tablet server: %s", dead.server);
        out(2, "Last report: %s", new SimpleDateFormat().format(new Date(dead.lastStatus)));
        out(2, "Cause: %s", dead.status);
    }
    out(0, "Bulk imports: %s", stats.bulkImports.size());
    for (BulkImportStatus bulk : stats.bulkImports) {
        out(1, "Import directory: %s", bulk.filename);
        out(2, "Bulk state %s", bulk.state);
        out(2, "Bulk start %s", bulk.startTime);
    }
    if (stats.tableMap != null && stats.tableMap.size() > 0) {
        out(0, "Tables");
        for (Entry<String, TableInfo> entry : stats.tableMap.entrySet()) {
            TableInfo v = entry.getValue();
            out(1, "%s", entry.getKey());
            out(2, "Records: %d", v.recs);
            out(2, "Records in Memory: %d", v.recsInMemory);
            out(2, "Tablets: %d", v.tablets);
            out(2, "Online Tablets: %d", v.onlineTablets);
            out(2, "Ingest Rate: %.2f", v.ingestRate);
            out(2, "Query Rate: %.2f", v.queryRate);
        }
    }
    if (stats.tServerInfo != null && stats.tServerInfo.size() > 0) {
        out(0, "Tablet Servers");
        long now = System.currentTimeMillis();
        for (TabletServerStatus server : stats.tServerInfo) {
            TableInfo summary = TableInfoUtil.summarizeTableStats(server);
            out(1, "Name: %s", server.name);
            out(2, "Ingest: %.2f", summary.ingestRate);
            out(2, "Last Contact: %s", server.lastContact);
            out(2, "OS Load Average: %.2f", server.osLoad);
            out(2, "Queries: %.2f", summary.queryRate);
            out(2, "Time Difference: %.1f", ((now - server.lastContact) / 1000.));
            out(2, "Total Records: %d", summary.recs);
            out(2, "Lookups: %d", server.lookups);
            if (server.holdTime > 0)
                out(2, "Hold Time: %d", server.holdTime);
            if (server.tableMap != null && server.tableMap.size() > 0) {
                out(2, "Tables");
                for (Entry<String, TableInfo> status : server.tableMap.entrySet()) {
                    TableInfo info = status.getValue();
                    out(3, "Table: %s", status.getKey());
                    out(4, "Tablets: %d", info.onlineTablets);
                    out(4, "Records: %d", info.recs);
                    out(4, "Records in Memory: %d", info.recsInMemory);
                    out(4, "Ingest: %.2f", info.ingestRate);
                    out(4, "Queries: %.2f", info.queryRate);
                    out(4, "Major Compacting: %d", info.majors == null ? 0 : info.majors.running);
                    out(4, "Queued for Major Compaction: %d", info.majors == null ? 0 : info.majors.queued);
                    out(4, "Minor Compacting: %d", info.minors == null ? 0 : info.minors.running);
                    out(4, "Queued for Minor Compaction: %d", info.minors == null ? 0 : info.minors.queued);
                }
            }
            out(2, "Recoveries: %d", server.logSorts.size());
            for (RecoveryStatus sort : server.logSorts) {
                out(3, "File: %s", sort.name);
                out(3, "Progress: %.2f%%", sort.progress * 100);
                out(3, "Time running: %s", sort.runtime / 1000.);
            }
            out(3, "Bulk imports: %s", stats.bulkImports.size());
            for (BulkImportStatus bulk : stats.bulkImports) {
                out(4, "Import file: %s", bulk.filename);
                out(5, "Bulk state %s", bulk.state);
                out(5, "Bulk start %s", bulk.startTime);
            }
        }
    }
}
Also used : MasterMonitorInfo(org.apache.accumulo.core.master.thrift.MasterMonitorInfo) AccumuloServerContext(org.apache.accumulo.server.AccumuloServerContext) ThriftNotActiveServiceException(org.apache.accumulo.core.client.impl.thrift.ThriftNotActiveServiceException) Instance(org.apache.accumulo.core.client.Instance) HdfsZooInstance(org.apache.accumulo.server.client.HdfsZooInstance) ServerConfigurationFactory(org.apache.accumulo.server.conf.ServerConfigurationFactory) DeadServer(org.apache.accumulo.core.master.thrift.DeadServer) Date(java.util.Date) BulkImportStatus(org.apache.accumulo.core.master.thrift.BulkImportStatus) MasterClientService(org.apache.accumulo.core.master.thrift.MasterClientService) TableInfo(org.apache.accumulo.core.master.thrift.TableInfo) RecoveryStatus(org.apache.accumulo.core.master.thrift.RecoveryStatus) SimpleDateFormat(java.text.SimpleDateFormat) TabletServerStatus(org.apache.accumulo.core.master.thrift.TabletServerStatus)

Example 17 with AccumuloServerContext

use of org.apache.accumulo.server.AccumuloServerContext in project accumulo by apache.

the class GarbageCollectWriteAheadLogsTest method replicationDelaysFileCollection.

@Test
public void replicationDelaysFileCollection() throws Exception {
    AccumuloServerContext context = EasyMock.createMock(AccumuloServerContext.class);
    VolumeManager fs = EasyMock.createMock(VolumeManager.class);
    WalStateManager marker = EasyMock.createMock(WalStateManager.class);
    LiveTServerSet tserverSet = EasyMock.createMock(LiveTServerSet.class);
    Connector conn = EasyMock.createMock(Connector.class);
    Scanner mscanner = EasyMock.createMock(Scanner.class);
    Scanner rscanner = EasyMock.createMock(Scanner.class);
    String row = MetadataSchema.ReplicationSection.getRowPrefix() + path.toString();
    String colf = MetadataSchema.ReplicationSection.COLF.toString();
    String colq = "1";
    Map<Key, Value> replicationWork = Collections.singletonMap(new Key(row, colf, colq), new Value(new byte[0]));
    GCStatus status = new GCStatus(null, null, null, new GcCycleStats());
    EasyMock.expect(tserverSet.getCurrentServers()).andReturn(Collections.singleton(server1));
    EasyMock.expect(marker.getAllMarkers()).andReturn(markers).once();
    EasyMock.expect(marker.state(server1, id)).andReturn(new Pair<>(WalState.UNREFERENCED, path));
    EasyMock.expect(context.getConnector()).andReturn(conn);
    EasyMock.expect(conn.createScanner(ReplicationTable.NAME, Authorizations.EMPTY)).andReturn(rscanner);
    rscanner.fetchColumnFamily(ReplicationSchema.StatusSection.NAME);
    EasyMock.expectLastCall().once();
    EasyMock.expect(rscanner.iterator()).andReturn(emptyKV);
    EasyMock.expect(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)).andReturn(mscanner);
    mscanner.fetchColumnFamily(MetadataSchema.ReplicationSection.COLF);
    EasyMock.expectLastCall().once();
    mscanner.setRange(MetadataSchema.ReplicationSection.getRange());
    EasyMock.expectLastCall().once();
    EasyMock.expect(mscanner.iterator()).andReturn(replicationWork.entrySet().iterator());
    EasyMock.replay(context, fs, marker, tserverSet, conn, rscanner, mscanner);
    GarbageCollectWriteAheadLogs gc = new GarbageCollectWriteAheadLogs(context, fs, false, tserverSet, marker, tabletOnServer1List);
    gc.collect(status);
    EasyMock.verify(context, fs, marker, tserverSet, conn, rscanner, mscanner);
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) AccumuloServerContext(org.apache.accumulo.server.AccumuloServerContext) GcCycleStats(org.apache.accumulo.core.gc.thrift.GcCycleStats) GCStatus(org.apache.accumulo.core.gc.thrift.GCStatus) LiveTServerSet(org.apache.accumulo.server.master.LiveTServerSet) WalStateManager(org.apache.accumulo.server.log.WalStateManager) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 18 with AccumuloServerContext

use of org.apache.accumulo.server.AccumuloServerContext in project accumulo by apache.

the class GarbageCollectWriteAheadLogsTest method ignoreReferenceLogOnDeadServer.

@Test
public void ignoreReferenceLogOnDeadServer() throws Exception {
    AccumuloServerContext context = EasyMock.createMock(AccumuloServerContext.class);
    VolumeManager fs = EasyMock.createMock(VolumeManager.class);
    WalStateManager marker = EasyMock.createMock(WalStateManager.class);
    LiveTServerSet tserverSet = EasyMock.createMock(LiveTServerSet.class);
    Connector conn = EasyMock.createMock(Connector.class);
    Scanner mscanner = EasyMock.createMock(Scanner.class);
    Scanner rscanner = EasyMock.createMock(Scanner.class);
    GCStatus status = new GCStatus(null, null, null, new GcCycleStats());
    EasyMock.expect(tserverSet.getCurrentServers()).andReturn(Collections.singleton(server1));
    EasyMock.expect(marker.getAllMarkers()).andReturn(markers2).once();
    EasyMock.expect(marker.state(server2, id)).andReturn(new Pair<>(WalState.OPEN, path));
    EasyMock.expect(context.getConnector()).andReturn(conn);
    EasyMock.expect(conn.createScanner(ReplicationTable.NAME, Authorizations.EMPTY)).andReturn(rscanner);
    rscanner.fetchColumnFamily(ReplicationSchema.StatusSection.NAME);
    EasyMock.expectLastCall().once();
    EasyMock.expect(rscanner.iterator()).andReturn(emptyKV);
    EasyMock.expect(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)).andReturn(mscanner);
    mscanner.fetchColumnFamily(MetadataSchema.ReplicationSection.COLF);
    EasyMock.expectLastCall().once();
    mscanner.setRange(MetadataSchema.ReplicationSection.getRange());
    EasyMock.expectLastCall().once();
    EasyMock.expect(mscanner.iterator()).andReturn(emptyKV);
    EasyMock.replay(context, fs, marker, tserverSet, conn, rscanner, mscanner);
    GarbageCollectWriteAheadLogs gc = new GarbageCollectWriteAheadLogs(context, fs, false, tserverSet, marker, tabletOnServer2List);
    gc.collect(status);
    EasyMock.verify(context, fs, marker, tserverSet, conn, rscanner, mscanner);
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) AccumuloServerContext(org.apache.accumulo.server.AccumuloServerContext) WalStateManager(org.apache.accumulo.server.log.WalStateManager) GcCycleStats(org.apache.accumulo.core.gc.thrift.GcCycleStats) GCStatus(org.apache.accumulo.core.gc.thrift.GCStatus) LiveTServerSet(org.apache.accumulo.server.master.LiveTServerSet) Test(org.junit.Test)

Example 19 with AccumuloServerContext

use of org.apache.accumulo.server.AccumuloServerContext in project accumulo by apache.

the class GarbageCollectWriteAheadLogsTest method testKeepClosedLog.

@Test
public void testKeepClosedLog() throws Exception {
    AccumuloServerContext context = EasyMock.createMock(AccumuloServerContext.class);
    VolumeManager fs = EasyMock.createMock(VolumeManager.class);
    WalStateManager marker = EasyMock.createMock(WalStateManager.class);
    LiveTServerSet tserverSet = EasyMock.createMock(LiveTServerSet.class);
    GCStatus status = new GCStatus(null, null, null, new GcCycleStats());
    EasyMock.expect(tserverSet.getCurrentServers()).andReturn(Collections.singleton(server1));
    EasyMock.expect(marker.getAllMarkers()).andReturn(markers).once();
    EasyMock.expect(marker.state(server1, id)).andReturn(new Pair<>(WalState.CLOSED, path));
    EasyMock.replay(context, marker, tserverSet, fs);
    GarbageCollectWriteAheadLogs gc = new GarbageCollectWriteAheadLogs(context, fs, false, tserverSet, marker, tabletOnServer1List) {

        @Override
        protected int removeReplicationEntries(Map<UUID, TServerInstance> candidates) throws IOException, KeeperException, InterruptedException {
            return 0;
        }
    };
    gc.collect(status);
    EasyMock.verify(context, marker, tserverSet, fs);
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) AccumuloServerContext(org.apache.accumulo.server.AccumuloServerContext) WalStateManager(org.apache.accumulo.server.log.WalStateManager) GcCycleStats(org.apache.accumulo.core.gc.thrift.GcCycleStats) GCStatus(org.apache.accumulo.core.gc.thrift.GCStatus) Map(java.util.Map) LiveTServerSet(org.apache.accumulo.server.master.LiveTServerSet) Test(org.junit.Test)

Example 20 with AccumuloServerContext

use of org.apache.accumulo.server.AccumuloServerContext in project accumulo by apache.

the class HostRegexTableLoadBalancerTest method testBalanceWithTooManyOutstandingMigrations.

@Test
public void testBalanceWithTooManyOutstandingMigrations() {
    List<TabletMigration> migrationsOut = new ArrayList<>();
    init(new AccumuloServerContext(instance, factory));
    // lets say we already have migrations ongoing for the FOO and BAR table extends (should be 5 of each of them) for a total of 10
    Set<KeyExtent> migrations = new HashSet<>();
    migrations.addAll(tableExtents.get(FOO.getTableName()));
    migrations.addAll(tableExtents.get(BAR.getTableName()));
    long wait = this.balance(Collections.unmodifiableSortedMap(createCurrent(15)), migrations, migrationsOut);
    Assert.assertEquals(20000, wait);
    // no migrations should have occurred as 10 is the maxOutstandingMigrations
    Assert.assertEquals(0, migrationsOut.size());
}
Also used : AccumuloServerContext(org.apache.accumulo.server.AccumuloServerContext) TabletMigration(org.apache.accumulo.server.master.state.TabletMigration) ArrayList(java.util.ArrayList) TKeyExtent(org.apache.accumulo.core.data.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

AccumuloServerContext (org.apache.accumulo.server.AccumuloServerContext)38 Test (org.junit.Test)20 ServerConfigurationFactory (org.apache.accumulo.server.conf.ServerConfigurationFactory)17 Instance (org.apache.accumulo.core.client.Instance)16 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)12 TServerInstance (org.apache.accumulo.server.master.state.TServerInstance)12 HashMap (java.util.HashMap)9 TKeyExtent (org.apache.accumulo.core.data.thrift.TKeyExtent)8 HdfsZooInstance (org.apache.accumulo.server.client.HdfsZooInstance)8 ArrayList (java.util.ArrayList)7 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)7 Connector (org.apache.accumulo.core.client.Connector)6 GCStatus (org.apache.accumulo.core.gc.thrift.GCStatus)6 Map (java.util.Map)5 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)5 GcCycleStats (org.apache.accumulo.core.gc.thrift.GcCycleStats)5 TabletServerStatus (org.apache.accumulo.core.master.thrift.TabletServerStatus)5 WalStateManager (org.apache.accumulo.server.log.WalStateManager)5 LiveTServerSet (org.apache.accumulo.server.master.LiveTServerSet)5 TabletMigration (org.apache.accumulo.server.master.state.TabletMigration)5