Search in sources :

Example 36 with Span

use of org.apache.accumulo.core.trace.Span in project accumulo by apache.

the class TracerTest method testThrift.

@Test
public void testThrift() throws Exception {
    TestReceiver tracer = new TestReceiver();
    org.apache.htrace.Trace.addReceiver(tracer);
    ServerSocket socket = new ServerSocket(0);
    TServerSocket transport = new TServerSocket(socket);
    transport.listen();
    TThreadPoolServer.Args args = new TThreadPoolServer.Args(transport);
    args.processor(new Processor<Iface>(TraceWrap.service(new Service())));
    final TServer tserver = new TThreadPoolServer(args);
    Thread t = new Thread() {

        @Override
        public void run() {
            tserver.serve();
        }
    };
    t.start();
    TTransport clientTransport = new TSocket(new Socket("localhost", socket.getLocalPort()));
    TestService.Iface client = new TestService.Client(new TBinaryProtocol(clientTransport), new TBinaryProtocol(clientTransport));
    client = TraceWrap.client(client);
    assertFalse(client.checkTrace(null, "test"));
    Span start = Trace.on("start");
    assertTrue(client.checkTrace(null, "my test"));
    start.stop();
    assertNotNull(tracer.traces.get(start.traceId()));
    String[] traces = { "my test", "checkTrace", "client:checkTrace", "start" };
    assertTrue(tracer.traces.get(start.traceId()).size() == traces.length);
    for (int i = 0; i < traces.length; i++) assertEquals(traces[i], tracer.traces.get(start.traceId()).get(i).description);
    tserver.stop();
    t.join(100);
}
Also used : TServer(org.apache.thrift.server.TServer) TestService(org.apache.accumulo.tracer.thrift.TestService) TestService(org.apache.accumulo.tracer.thrift.TestService) ServerSocket(java.net.ServerSocket) TServerSocket(org.apache.thrift.transport.TServerSocket) Span(org.apache.accumulo.core.trace.Span) TServerSocket(org.apache.thrift.transport.TServerSocket) Iface(org.apache.accumulo.tracer.thrift.TestService.Iface) Iface(org.apache.accumulo.tracer.thrift.TestService.Iface) TBinaryProtocol(org.apache.thrift.protocol.TBinaryProtocol) TTransport(org.apache.thrift.transport.TTransport) TThreadPoolServer(org.apache.thrift.server.TThreadPoolServer) Socket(java.net.Socket) ServerSocket(java.net.ServerSocket) TSocket(org.apache.thrift.transport.TSocket) TServerSocket(org.apache.thrift.transport.TServerSocket) TSocket(org.apache.thrift.transport.TSocket) Test(org.junit.Test)

Example 37 with Span

use of org.apache.accumulo.core.trace.Span in project accumulo by apache.

the class TracerTest method testTrace.

@SuppressWarnings("deprecation")
@Test
public void testTrace() throws Exception {
    TestReceiver tracer = new TestReceiver();
    org.apache.htrace.Trace.addReceiver(tracer);
    assertFalse(Trace.isTracing());
    Trace.start("nop").stop();
    assertTrue(tracer.traces.size() == 0);
    assertFalse(Trace.isTracing());
    Trace.on("nop").stop();
    assertTrue(tracer.traces.size() == 1);
    assertFalse(Trace.isTracing());
    Span start = Trace.on("testing");
    assertEquals(Trace.currentTrace().getSpan(), start.getScope().getSpan());
    assertTrue(Trace.isTracing());
    Span span = Trace.start("shortest trace ever");
    span.stop();
    long traceId = Trace.currentTraceId();
    assertNotNull(tracer.traces.get(traceId));
    assertTrue(tracer.traces.get(traceId).size() == 1);
    assertEquals("shortest trace ever", tracer.traces.get(traceId).get(0).description);
    Span pause = Trace.start("pause");
    Thread.sleep(100);
    pause.stop();
    assertTrue(tracer.traces.get(traceId).size() == 2);
    assertTrue(tracer.traces.get(traceId).get(1).millis() >= 100);
    Thread t = new Thread(Trace.wrap(new Runnable() {

        @Override
        public void run() {
            assertTrue(Trace.isTracing());
        }
    }), "My Task");
    t.start();
    t.join();
    assertTrue(tracer.traces.get(traceId).size() == 3);
    assertEquals("My Task", tracer.traces.get(traceId).get(2).description);
    Trace.off();
    assertFalse(Trace.isTracing());
}
Also used : Span(org.apache.accumulo.core.trace.Span) Test(org.junit.Test)

Example 38 with Span

use of org.apache.accumulo.core.trace.Span in project accumulo by apache.

the class ConditionalWriterIT method testTrace.

@Test
public void testTrace() throws Exception {
    // Need to add a getClientConfig() to AccumuloCluster
    Assume.assumeTrue(getClusterType() == ClusterType.MINI);
    Process tracer = null;
    Connector conn = getConnector();
    AccumuloCluster cluster = getCluster();
    MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
    if (!conn.tableOperations().exists("trace")) {
        tracer = mac.exec(TraceServer.class);
        while (!conn.tableOperations().exists("trace")) {
            sleepUninterruptibly(1, TimeUnit.SECONDS);
        }
    }
    String tableName = getUniqueNames(1)[0];
    conn.tableOperations().create(tableName);
    DistributedTrace.enable("localhost", "testTrace", mac.getClientConfig());
    sleepUninterruptibly(1, TimeUnit.SECONDS);
    Span root = Trace.on("traceTest");
    try (ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig())) {
        // mutation conditional on column tx:seq not exiting
        ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq"));
        cm0.put("name", "last", "doe");
        cm0.put("name", "first", "john");
        cm0.put("tx", "seq", "1");
        Assert.assertEquals(Status.ACCEPTED, cw.write(cm0).getStatus());
        root.stop();
    }
    try (Scanner scanner = conn.createScanner("trace", Authorizations.EMPTY)) {
        scanner.setRange(new Range(new Text(Long.toHexString(root.traceId()))));
        loop: while (true) {
            final StringBuilder finalBuffer = new StringBuilder();
            int traceCount = TraceDump.printTrace(scanner, new Printer() {

                @Override
                public void print(final String line) {
                    try {
                        finalBuffer.append(line).append("\n");
                    } catch (Exception ex) {
                        throw new RuntimeException(ex);
                    }
                }
            });
            String traceOutput = finalBuffer.toString();
            log.info("Trace output:" + traceOutput);
            if (traceCount > 0) {
                int lastPos = 0;
                for (String part : "traceTest, startScan,startConditionalUpdate,conditionalUpdate,Check conditions,apply conditional mutations".split(",")) {
                    log.info("Looking in trace output for '" + part + "'");
                    int pos = traceOutput.indexOf(part);
                    if (-1 == pos) {
                        log.info("Trace output doesn't contain '" + part + "'");
                        Thread.sleep(1000);
                        break loop;
                    }
                    assertTrue("Did not find '" + part + "' in output", pos > 0);
                    assertTrue("'" + part + "' occurred earlier than the previous element unexpectedly", pos > lastPos);
                    lastPos = pos;
                }
                break;
            } else {
                log.info("Ignoring trace output as traceCount not greater than zero: " + traceCount);
                Thread.sleep(1000);
            }
        }
        if (tracer != null) {
            tracer.destroy();
        }
    }
}
Also used : Condition(org.apache.accumulo.core.data.Condition) Connector(org.apache.accumulo.core.client.Connector) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) AccumuloCluster(org.apache.accumulo.cluster.AccumuloCluster) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) Printer(org.apache.accumulo.tracer.TraceDump.Printer) Span(org.apache.accumulo.core.trace.Span) TableOfflineException(org.apache.accumulo.core.client.TableOfflineException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) TableExistsException(org.apache.accumulo.core.client.TableExistsException) TableDeletedException(org.apache.accumulo.core.client.TableDeletedException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ConditionalWriter(org.apache.accumulo.core.client.ConditionalWriter) ConditionalMutation(org.apache.accumulo.core.data.ConditionalMutation) TraceServer(org.apache.accumulo.tracer.TraceServer) ConditionalWriterConfig(org.apache.accumulo.core.client.ConditionalWriterConfig) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) Test(org.junit.Test)

Example 39 with Span

use of org.apache.accumulo.core.trace.Span in project accumulo by apache.

the class GarbageCollectWriteAheadLogs method collect.

public void collect(GCStatus status) {
    Span span = Trace.start("getCandidates");
    try {
        status.currentLog.started = System.currentTimeMillis();
        Map<TServerInstance, Set<UUID>> logsByServer = new HashMap<>();
        Map<UUID, Pair<WalState, Path>> logsState = new HashMap<>();
        // Scan for log file info first: the order is important
        // Consider:
        // * get live servers
        // * new server gets a lock, creates a log
        // * get logs
        // * the log appears to belong to a dead server
        long count = getCurrent(logsByServer, logsState);
        long fileScanStop = System.currentTimeMillis();
        log.info(String.format("Fetched %d files for %d servers in %.2f seconds", count, logsByServer.size(), (fileScanStop - status.currentLog.started) / 1000.));
        status.currentLog.candidates = count;
        span.stop();
        // now it's safe to get the liveServers
        Set<TServerInstance> currentServers = liveServers.getCurrentServers();
        Map<UUID, TServerInstance> uuidToTServer;
        span = Trace.start("removeEntriesInUse");
        try {
            uuidToTServer = removeEntriesInUse(logsByServer, currentServers, logsState);
            count = uuidToTServer.size();
        } catch (Exception ex) {
            log.error("Unable to scan metadata table", ex);
            return;
        } finally {
            span.stop();
        }
        long logEntryScanStop = System.currentTimeMillis();
        log.info(String.format("%d log entries scanned in %.2f seconds", count, (logEntryScanStop - fileScanStop) / 1000.));
        span = Trace.start("removeReplicationEntries");
        try {
            count = removeReplicationEntries(uuidToTServer);
        } catch (Exception ex) {
            log.error("Unable to scan replication table", ex);
            return;
        } finally {
            span.stop();
        }
        long replicationEntryScanStop = System.currentTimeMillis();
        log.info(String.format("%d replication entries scanned in %.2f seconds", count, (replicationEntryScanStop - logEntryScanStop) / 1000.));
        span = Trace.start("removeFiles");
        logsState.keySet().retainAll(uuidToTServer.keySet());
        count = removeFiles(logsState.values(), status);
        long removeStop = System.currentTimeMillis();
        log.info(String.format("%d total logs removed from %d servers in %.2f seconds", count, logsByServer.size(), (removeStop - logEntryScanStop) / 1000.));
        span.stop();
        span = Trace.start("removeMarkers");
        count = removeTabletServerMarkers(uuidToTServer, logsByServer, currentServers);
        long removeMarkersStop = System.currentTimeMillis();
        log.info(String.format("%d markers removed in %.2f seconds", count, (removeMarkersStop - removeStop) / 1000.));
        span.stop();
        status.currentLog.finished = removeStop;
        status.lastLog = status.currentLog;
        status.currentLog = new GcCycleStats();
    } catch (Exception e) {
        log.error("exception occured while garbage collecting write ahead logs", e);
    } finally {
        span.stop();
    }
}
Also used : LiveTServerSet(org.apache.accumulo.server.master.LiveTServerSet) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) GcCycleStats(org.apache.accumulo.core.gc.thrift.GcCycleStats) Span(org.apache.accumulo.core.trace.Span) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) FileNotFoundException(java.io.FileNotFoundException) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) WalMarkerException(org.apache.accumulo.server.log.WalStateManager.WalMarkerException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) UUID(java.util.UUID) Pair(org.apache.accumulo.core.util.Pair)

Example 40 with Span

use of org.apache.accumulo.core.trace.Span in project accumulo by apache.

the class SimpleGarbageCollector method run.

private void run() {
    long tStart, tStop;
    // Sleep for an initial period, giving the master time to start up and
    // old data files to be unused
    log.info("Trying to acquire ZooKeeper lock for garbage collector");
    try {
        getZooLock(startStatsService());
    } catch (Exception ex) {
        log.error("{}", ex.getMessage(), ex);
        System.exit(1);
    }
    try {
        long delay = getStartDelay();
        log.debug("Sleeping for {} milliseconds before beginning garbage collection cycles", delay);
        Thread.sleep(delay);
    } catch (InterruptedException e) {
        log.warn("{}", e.getMessage(), e);
        return;
    }
    ProbabilitySampler sampler = new ProbabilitySampler(getConfiguration().getFraction(Property.GC_TRACE_PERCENT));
    while (true) {
        Trace.on("gc", sampler);
        Span gcSpan = Trace.start("loop");
        tStart = System.currentTimeMillis();
        try {
            // make room
            System.gc();
            status.current.started = System.currentTimeMillis();
            new GarbageCollectionAlgorithm().collect(new GCEnv(RootTable.NAME));
            new GarbageCollectionAlgorithm().collect(new GCEnv(MetadataTable.NAME));
            log.info("Number of data file candidates for deletion: {}", status.current.candidates);
            log.info("Number of data file candidates still in use: {}", status.current.inUse);
            log.info("Number of successfully deleted data files: {}", status.current.deleted);
            log.info("Number of data files delete failures: {}", status.current.errors);
            status.current.finished = System.currentTimeMillis();
            status.last = status.current;
            status.current = new GcCycleStats();
        } catch (Exception e) {
            log.error("{}", e.getMessage(), e);
        }
        tStop = System.currentTimeMillis();
        log.info(String.format("Collect cycle took %.2f seconds", ((tStop - tStart) / 1000.0)));
        // We want to prune references to fully-replicated WALs from the replication table which are no longer referenced in the metadata table
        // before running GarbageCollectWriteAheadLogs to ensure we delete as many files as possible.
        Span replSpan = Trace.start("replicationClose");
        try {
            CloseWriteAheadLogReferences closeWals = new CloseWriteAheadLogReferences(this);
            closeWals.run();
        } catch (Exception e) {
            log.error("Error trying to close write-ahead logs for replication table", e);
        } finally {
            replSpan.stop();
        }
        // Clean up any unused write-ahead logs
        Span waLogs = Trace.start("walogs");
        try {
            GarbageCollectWriteAheadLogs walogCollector = new GarbageCollectWriteAheadLogs(this, fs, isUsingTrash());
            log.info("Beginning garbage collection of write-ahead logs");
            walogCollector.collect(status);
        } catch (Exception e) {
            log.error("{}", e.getMessage(), e);
        } finally {
            waLogs.stop();
        }
        gcSpan.stop();
        // we just made a lot of metadata changes: flush them out
        try {
            Connector connector = getConnector();
            connector.tableOperations().compact(MetadataTable.NAME, null, null, true, true);
            connector.tableOperations().compact(RootTable.NAME, null, null, true, true);
        } catch (Exception e) {
            log.warn("{}", e.getMessage(), e);
        }
        Trace.off();
        try {
            long gcDelay = getConfiguration().getTimeInMillis(Property.GC_CYCLE_DELAY);
            log.debug("Sleeping for {} milliseconds", gcDelay);
            Thread.sleep(gcDelay);
        } catch (InterruptedException e) {
            log.warn("{}", e.getMessage(), e);
            return;
        }
    }
}
Also used : ProbabilitySampler(org.apache.accumulo.core.trace.ProbabilitySampler) CloseWriteAheadLogReferences(org.apache.accumulo.gc.replication.CloseWriteAheadLogReferences) Connector(org.apache.accumulo.core.client.Connector) GcCycleStats(org.apache.accumulo.core.gc.thrift.GcCycleStats) Span(org.apache.accumulo.core.trace.Span) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) FileNotFoundException(java.io.FileNotFoundException) ReplicationTableOfflineException(org.apache.accumulo.core.replication.ReplicationTableOfflineException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) AccumuloException(org.apache.accumulo.core.client.AccumuloException)

Aggregations

Span (org.apache.accumulo.core.trace.Span)56 Key (org.apache.accumulo.core.data.Key)12 Value (org.apache.accumulo.core.data.Value)12 IOException (java.io.IOException)11 ColumnVisibility (org.apache.accumulo.core.security.ColumnVisibility)10 Text (org.apache.hadoop.io.Text)9 StreamingPropertyValue (org.vertexium.property.StreamingPropertyValue)8 AccumuloException (org.apache.accumulo.core.client.AccumuloException)7 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)7 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)6 PartialKey (org.apache.accumulo.core.data.PartialKey)6 Mutation (org.apache.accumulo.core.data.Mutation)5 IndexHint (org.vertexium.search.IndexHint)5 InvalidProtocolBufferException (com.google.protobuf.InvalidProtocolBufferException)4 Connector (org.apache.accumulo.core.client.Connector)4 Scanner (org.apache.accumulo.core.client.Scanner)4 ReplicationTableOfflineException (org.apache.accumulo.core.replication.ReplicationTableOfflineException)4 Status (org.apache.accumulo.server.replication.proto.Replication.Status)4 Test (org.junit.Test)4 FileNotFoundException (java.io.FileNotFoundException)3