Search in sources :

Example 1 with PartitionInfo

use of ddlwindowing.WindowingApp.PartitionInfo in project voltdb by VoltDB.

the class Reporter method run.

@Override
public void run() {
    boolean success = true;
    Map<Integer, Long> averagesForWindows = new TreeMap<Integer, Long>();
    // See ddl.sql for the actual SQL being run by the 'Average' procedure.
    for (int seconds : new int[] { 1, 5, 10, 30 }) {
        try {
            // SQL BEING RUN:
            //  SELECT SUM(sum_values) / SUM(count_values)
            //  FROM agg_by_second
            //  WHERE second_ts >= TO_TIMESTAMP(SECOND, SINCE_EPOCH(SECOND, NOW) - ?);
            ClientResponse cr = app.client.callProcedure("Average", -seconds);
            VoltTable result = cr.getResults()[0];
            long average = result.asScalarLong();
            if (!result.wasNull()) {
                averagesForWindows.put(seconds, average);
            } else {
                // If there are no rows in the selected time window (for example
                // if we stop the client and then start it up again), then the
                // average will be NULL.
                averagesForWindows.put(seconds, null);
            }
        } catch (IOException | ProcCallException e) {
            // Note any failure for reporting later.
            success = false;
        }
    }
    // this report printing.
    synchronized (app) {
        long now = System.currentTimeMillis();
        long time = Math.round((now - app.startTS) / 1000.0);
        // Print out how long the processing has been running
        System.out.printf("%02d:%02d:%02d Report:\n", time / 3600, (time / 60) % 60, time % 60);
        // If possible, print out the averages over several time windows.
        if (success) {
            System.out.println("  Average values over time windows:");
            for (Entry<Integer, Long> e : averagesForWindows.entrySet()) {
                System.out.printf("    Average for past %2ds: %d\n", e.getKey(), e.getValue());
            }
        } else {
            System.out.println("  Unable to retrieve average values at this time.");
        }
        System.out.println("  Partition statistics:");
        for (Entry<Long, PartitionInfo> e : app.getPartitionData().entrySet()) {
            PartitionInfo pinfo = e.getValue();
            System.out.printf("    Partition %2d: %9d tuples, youngest: %6.3fs, oldest: %6.3fs\n", e.getKey(), pinfo.tupleCount, pinfo.youngestTupleAge / 1000.0, pinfo.oldestTupleAge / 1000.0);
        }
        // Let the inserter process print a one line report.
        app.inserter.printReport();
        //
        // FAILURE REPORTING FOR PERIODIC OPERATIONS
        //
        long partitionTrackerFailures = app.partitionTracker.failureCount.getAndSet(0);
        if (partitionTrackerFailures > 0) {
            System.out.printf("  Partition Tracker failed %d times since last report.\n", partitionTrackerFailures);
        }
        long maxTrackerFailures = app.maxTracker.failureCount.getAndSet(0);
        if (maxTrackerFailures > 0) {
            System.out.printf("  Max Tracker failed %d times since last report.\n", maxTrackerFailures);
        }
        System.out.println();
        System.out.flush();
    }
}
Also used : ClientResponse(org.voltdb.client.ClientResponse) IOException(java.io.IOException) TreeMap(java.util.TreeMap) VoltTable(org.voltdb.VoltTable) PartitionInfo(ddlwindowing.WindowingApp.PartitionInfo) ProcCallException(org.voltdb.client.ProcCallException)

Example 2 with PartitionInfo

use of ddlwindowing.WindowingApp.PartitionInfo in project voltdb by VoltDB.

the class PartitionDataTracker method run.

@Override
public void run() {
    Map<Long, PartitionInfo> partitionData = new HashMap<Long, PartitionInfo>();
    VoltTable partitionKeys = null, tableStats = null;
    try {
        tableStats = app.client.callProcedure("@Statistics", "TABLE").getResults()[0];
        partitionKeys = app.client.callProcedure("@GetPartitionKeys", "STRING").getResults()[0];
    } catch (IOException | ProcCallException e) {
        // Track failures in a simplistic way.
        failureCount.incrementAndGet();
        // No worries. Will be scheduled again soon.
        return;
    }
    while (tableStats.advanceRow()) {
        if (!tableStats.getString("TABLE_NAME").equalsIgnoreCase("timedata")) {
            continue;
        }
        PartitionInfo pinfo = new PartitionInfo();
        long partitionId = tableStats.getLong("PARTITION_ID");
        pinfo.tupleCount = tableStats.getLong("TUPLE_COUNT");
        pinfo.partitionKey = null;
        // If redundancy (k-safety) is enabled, this will put k+1 times per partition,
        // but the tuple count will be the same so it will be ok.
        partitionData.put(partitionId, pinfo);
    }
    while (partitionKeys.advanceRow()) {
        long partitionId = partitionKeys.getLong("PARTITION_ID");
        PartitionInfo pinfo = partitionData.get(partitionId);
        if (pinfo == null) {
            // will have settled down.
            return;
        }
        pinfo.partitionKey = partitionKeys.getString("PARTITION_KEY");
        try {
            // Find the age of the oldest and youngest tuples in this partition to
            // demonstrate that we're both accepting new tuples and aging out
            // old tuples at the appropriate time.
            ClientResponse cr = app.client.callProcedure("AgeOfOldest", pinfo.partitionKey);
            pinfo.oldestTupleAge = cr.getResults()[0].asScalarLong();
            cr = app.client.callProcedure("AgeOfYoungest", pinfo.partitionKey);
            pinfo.youngestTupleAge = cr.getResults()[0].asScalarLong();
        } catch (IOException | ProcCallException e) {
            failureCount.incrementAndGet();
            return;
        }
    }
    // This is a sanity check to see that every partition has
    // a partition value
    boolean allMatched = true;
    for (PartitionInfo pinfo : partitionData.values()) {
        // a partition has a count, but no key
        if (pinfo.partitionKey == null) {
            allMatched = false;
        }
    }
    if (!allMatched) {
        // will have settled down.
        return;
    }
    // atomically update the new map for the old one
    app.updatePartitionInfo(partitionData);
}
Also used : ClientResponse(org.voltdb.client.ClientResponse) HashMap(java.util.HashMap) AtomicLong(java.util.concurrent.atomic.AtomicLong) PartitionInfo(ddlwindowing.WindowingApp.PartitionInfo) IOException(java.io.IOException) VoltTable(org.voltdb.VoltTable) ProcCallException(org.voltdb.client.ProcCallException)

Aggregations

PartitionInfo (ddlwindowing.WindowingApp.PartitionInfo)2 IOException (java.io.IOException)2 VoltTable (org.voltdb.VoltTable)2 ClientResponse (org.voltdb.client.ClientResponse)2 ProcCallException (org.voltdb.client.ProcCallException)2 HashMap (java.util.HashMap)1 TreeMap (java.util.TreeMap)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1