Search in sources :

Example 51 with BBContainer

use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.

the class ParameterSet method flattenToBuffer.

public void flattenToBuffer(ByteBuffer buf) throws IOException {
    buf.putShort((short) m_params.length);
    for (int i = 0; i < m_params.length; i++) {
        Object obj = m_params[i];
        if ((obj == null) || (obj == JSONObject.NULL)) {
            VoltType type = VoltType.NULL;
            buf.put(type.getValue());
            continue;
        }
        Class<?> cls = obj.getClass();
        if (cls.isArray()) {
            // special case them as the VARBINARY type.
            if (obj instanceof byte[]) {
                final byte[] b = (byte[]) obj;
                // commented out this bit... presumably the EE will do this check upon recipt
                /*if (b.length > VoltType.MAX_VALUE_LENGTH) {
                        throw new IOException("Value of byte[] larger than allowed max string or varbinary " + VoltType.MAX_VALUE_LENGTH_STR);
                    }*/
                buf.put(VoltType.VARBINARY.getValue());
                buf.putInt(b.length);
                buf.put(b);
                continue;
            }
            //Same as before, but deal with the fact it is coming in as a unmanaged bytebuffer
            if (obj instanceof BBContainer) {
                final BBContainer cont = (BBContainer) obj;
                final ByteBuffer paramBuf = cont.b();
                buf.put(VoltType.VARBINARY.getValue());
                buf.putInt(paramBuf.remaining());
                buf.put(paramBuf);
                continue;
            }
            buf.put(ARRAY);
            VoltType type;
            try {
                type = VoltType.typeFromClass(cls.getComponentType());
            } catch (VoltTypeException e) {
                obj = getAKosherArray((Object[]) obj);
                cls = obj.getClass();
                type = VoltType.typeFromClass(cls.getComponentType());
            }
            buf.put(type.getValue());
            switch(type) {
                case SMALLINT:
                    SerializationHelper.writeArray((short[]) obj, buf);
                    break;
                case INTEGER:
                    SerializationHelper.writeArray((int[]) obj, buf);
                    break;
                case BIGINT:
                    SerializationHelper.writeArray((long[]) obj, buf);
                    break;
                case FLOAT:
                    SerializationHelper.writeArray((double[]) obj, buf);
                    break;
                case STRING:
                    if (m_encodedStringArrays[i] == null) {
                        // should not happen
                        throw new IOException("String array not encoded");
                    }
                    // This check used to be done by FastSerializer.writeArray(), but things changed?
                    if (m_encodedStringArrays[i].length > Short.MAX_VALUE) {
                        throw new IOException("Array exceeds maximum length of " + Short.MAX_VALUE + " bytes");
                    }
                    buf.putShort((short) m_encodedStringArrays[i].length);
                    for (int zz = 0; zz < m_encodedStringArrays[i].length; zz++) {
                        SerializationHelper.writeVarbinary(m_encodedStringArrays[i][zz], buf);
                    }
                    break;
                case TIMESTAMP:
                    SerializationHelper.writeArray((TimestampType[]) obj, buf);
                    break;
                case DECIMAL:
                    // converted long128 in serializer api
                    SerializationHelper.writeArray((BigDecimal[]) obj, buf);
                    break;
                case VOLTTABLE:
                    SerializationHelper.writeArray((VoltTable[]) obj, buf);
                    break;
                case VARBINARY:
                    SerializationHelper.writeArray((byte[][]) obj, buf);
                    break;
                case GEOGRAPHY_POINT:
                    SerializationHelper.writeArray((GeographyPointValue[]) obj, buf);
                    break;
                case GEOGRAPHY:
                    SerializationHelper.writeArray((GeographyValue[]) obj, buf);
                    break;
                default:
                    throw new RuntimeException("FIXME: Unsupported type " + type);
            }
            continue;
        }
        // Handle NULL mappings not encoded by type.min_value convention
        if (obj == VoltType.NULL_TIMESTAMP) {
            buf.put(VoltType.TIMESTAMP.getValue());
            // corresponds to EE value.h isNull()
            buf.putLong(VoltType.NULL_BIGINT);
            continue;
        } else if (obj == VoltType.NULL_STRING_OR_VARBINARY) {
            buf.put(VoltType.STRING.getValue());
            buf.putInt(VoltType.NULL_STRING_LENGTH);
            continue;
        } else if (obj == VoltType.NULL_DECIMAL) {
            buf.put(VoltType.DECIMAL.getValue());
            VoltDecimalHelper.serializeNull(buf);
            continue;
        } else if (obj == VoltType.NULL_POINT) {
            buf.put(VoltType.GEOGRAPHY_POINT.getValue());
            GeographyPointValue.serializeNull(buf);
            continue;
        } else if (obj == VoltType.NULL_GEOGRAPHY) {
            buf.put(VoltType.GEOGRAPHY.getValue());
            buf.putInt(VoltType.NULL_STRING_LENGTH);
            continue;
        } else if (obj instanceof BBContainer) {
            final BBContainer cont = (BBContainer) obj;
            final ByteBuffer paramBuf = cont.b();
            buf.put(VoltType.VARBINARY.getValue());
            buf.putInt(paramBuf.remaining());
            buf.put(paramBuf);
            continue;
        }
        VoltType type = VoltType.typeFromClass(cls);
        buf.put(type.getValue());
        switch(type) {
            case TINYINT:
                buf.put((Byte) obj);
                break;
            case SMALLINT:
                buf.putShort((Short) obj);
                break;
            case INTEGER:
                buf.putInt((Integer) obj);
                break;
            case BIGINT:
                buf.putLong((Long) obj);
                break;
            case FLOAT:
                if (cls == Float.class)
                    buf.putDouble(((Float) obj).doubleValue());
                else if (cls == Double.class)
                    buf.putDouble(((Double) obj).doubleValue());
                else
                    throw new RuntimeException("Can't cast parameter type to Double");
                break;
            case STRING:
                if (m_encodedStrings[i] == null) {
                    // should not happen
                    throw new IOException("String not encoded: " + (String) obj);
                }
                SerializationHelper.writeVarbinary(m_encodedStrings[i], buf);
                break;
            case TIMESTAMP:
                long micros = timestampToMicroseconds(obj);
                buf.putLong(micros);
                break;
            case DECIMAL:
                VoltDecimalHelper.serializeBigDecimal((BigDecimal) obj, buf);
                break;
            case VOLTTABLE:
                ((VoltTable) obj).flattenToBuffer(buf);
                break;
            case GEOGRAPHY_POINT:
                ((GeographyPointValue) obj).flattenToBuffer(buf);
                break;
            case GEOGRAPHY:
                GeographyValue gv = (GeographyValue) obj;
                buf.putInt(gv.getLengthInBytes());
                gv.flattenToBuffer(buf);
                break;
            default:
                throw new RuntimeException("FIXME: Unsupported type " + type);
        }
    }
}
Also used : GeographyValue(org.voltdb.types.GeographyValue) IOException(java.io.IOException) JSONString(org.json_voltpatches.JSONString) ByteBuffer(java.nio.ByteBuffer) BigDecimal(java.math.BigDecimal) BBContainer(org.voltcore.utils.DBBPool.BBContainer) TimestampType(org.voltdb.types.TimestampType) JSONObject(org.json_voltpatches.JSONObject) GeographyPointValue(org.voltdb.types.GeographyPointValue)

Example 52 with BBContainer

use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.

the class PartitionProjectionSnapshotFilter method filter.

@Override
public Callable<BBContainer> filter(final Callable<BBContainer> input) {
    return new Callable<BBContainer>() {

        @Override
        public BBContainer call() throws Exception {
            final BBContainer cont = input.call();
            final int partitionId = cont.b().getInt(m_partitionIdOffset);
            boolean hasPartition = false;
            for (int acceptedPartitionId : m_partitions) {
                if (partitionId == acceptedPartitionId) {
                    hasPartition = true;
                }
            }
            if (hasPartition) {
                return cont;
            } else {
                cont.discard();
                return null;
            }
        }
    };
}
Also used : BBContainer(org.voltcore.utils.DBBPool.BBContainer) Callable(java.util.concurrent.Callable)

Example 53 with BBContainer

use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.

the class SnapshotSiteProcessor method doSnapshotWork.

/*
     * No schedule means don't try and schedule snapshot work because this is a blocking
     * task from completeSnapshotWork. This avoids creating thousands of task objects.
     */
public Future<?> doSnapshotWork(SystemProcedureExecutionContext context, boolean noSchedule) {
    ListenableFuture<?> retval = null;
    /*
         * This thread will null out the reference to m_snapshotTableTasks when
         * a snapshot is finished. If the snapshot buffer is loaned out that means
         * it is pending I/O somewhere so there is no work to do until it comes back.
         */
    if (m_snapshotTableTasks == null) {
        return retval;
    }
    if (m_snapshotTargets == null) {
        return null;
    }
    /*
         * Try to serialize a block from a table, if the table is finished,
         * remove the tasks from the task map and move on to the next table. If a block is
         * successfully serialized, break out of the loop and release the site thread for more
         * transaction work.
         */
    Iterator<Map.Entry<Integer, Collection<SnapshotTableTask>>> taskIter = m_snapshotTableTasks.asMap().entrySet().iterator();
    while (taskIter.hasNext()) {
        Map.Entry<Integer, Collection<SnapshotTableTask>> taskEntry = taskIter.next();
        final int tableId = taskEntry.getKey();
        final Collection<SnapshotTableTask> tableTasks = taskEntry.getValue();
        final List<BBContainer> outputBuffers = getOutputBuffers(tableTasks, noSchedule);
        if (outputBuffers == null) {
            // Not enough buffers available
            if (!noSchedule) {
                rescheduleSnapshotWork();
            }
            break;
        }
        // Stream more and add a listener to handle any failures
        Pair<ListenableFuture, Boolean> streamResult = m_streamers.get(tableId).streamMore(context, outputBuffers, null);
        if (streamResult.getFirst() != null) {
            final ListenableFuture writeFutures = streamResult.getFirst();
            writeFutures.addListener(new Runnable() {

                @Override
                public void run() {
                    try {
                        writeFutures.get();
                    } catch (Throwable t) {
                        if (m_perSiteLastSnapshotSucceded) {
                            if (t instanceof StreamSnapshotTimeoutException || t.getCause() instanceof StreamSnapshotTimeoutException) {
                            //This error is already logged by the watchdog when it generates the exception
                            } else {
                                SNAP_LOG.error("Error while attempting to write snapshot data", t);
                            }
                            m_perSiteLastSnapshotSucceded = false;
                        }
                    }
                }
            }, CoreUtils.SAMETHREADEXECUTOR);
        }
        /**
             * The table streamer will return false when there is no more data left to pull from that table. The
             * enclosing loop ensures that the next table is then addressed.
             */
        if (!streamResult.getSecond()) {
            asyncTerminateReplicatedTableTasks(tableTasks);
            // XXX: Guava's multimap will clear the tableTasks collection when the entry is
            // removed from the containing map, so don't use the collection after removal!
            taskIter.remove();
            SNAP_LOG.debug("Finished snapshot tasks for table " + tableId + ": " + tableTasks);
        } else {
            break;
        }
    }
    /**
         * If there are no more tasks then this particular EE is finished doing snapshot work
         * Check the AtomicInteger to find out if this is the last one.
         */
    if (m_snapshotTableTasks.isEmpty()) {
        SNAP_LOG.debug("Finished with tasks");
        // In case this is a non-blocking snapshot, do the post-snapshot tasks here.
        runPostSnapshotTasks(context);
        final ArrayList<SnapshotDataTarget> snapshotTargets = m_snapshotTargets;
        m_snapshotTargets = null;
        m_snapshotTableTasks = null;
        boolean IamLast = false;
        synchronized (ExecutionSitesCurrentlySnapshotting) {
            if (!ExecutionSitesCurrentlySnapshotting.contains(this)) {
                VoltDB.crashLocalVoltDB("Currently snapshotting site didn't find itself in set of snapshotting sites", true, null);
            }
            IamLast = ExecutionSitesCurrentlySnapshotting.size() == 1;
            if (!IamLast) {
                ExecutionSitesCurrentlySnapshotting.remove(this);
            }
        }
        /**
             * If this is the last one then this EE must close all the SnapshotDataTargets.
             * Done in a separate thread so the EE can go and do other work. It will
             * sync every file descriptor and that may block for a while.
             */
        if (IamLast) {
            SNAP_LOG.debug("I AM LAST!");
            final long txnId = m_lastSnapshotTxnId;
            final ExtensibleSnapshotDigestData snapshotDataForZookeeper = m_extraSnapshotData;
            m_extraSnapshotData = null;
            final Thread terminatorThread = new Thread("Snapshot terminator") {

                @Override
                public void run() {
                    boolean snapshotSucceeded = true;
                    try {
                        /*
                             * Be absolutely sure the snapshot is finished
                             * and synced to disk before another is started
                             */
                        for (Thread t : m_snapshotTargetTerminators) {
                            if (t == this) {
                                continue;
                            }
                            try {
                                t.join();
                            } catch (InterruptedException e) {
                                return;
                            }
                        }
                        for (final SnapshotDataTarget t : snapshotTargets) {
                            try {
                                t.close();
                            } catch (IOException e) {
                                snapshotSucceeded = false;
                                throw new RuntimeException(e);
                            } catch (InterruptedException e) {
                                snapshotSucceeded = false;
                                throw new RuntimeException(e);
                            }
                        }
                        Runnable r = null;
                        while ((r = m_tasksOnSnapshotCompletion.poll()) != null) {
                            try {
                                r.run();
                            } catch (Exception e) {
                                SNAP_LOG.error("Error running snapshot completion task", e);
                            }
                        }
                    } finally {
                        try {
                            VoltDB.instance().getHostMessenger().getZK().delete(VoltZK.nodes_currently_snapshotting + "/" + VoltDB.instance().getHostMessenger().getHostId(), -1);
                        } catch (NoNodeException e) {
                            SNAP_LOG.warn("Expect the snapshot node to already exist during deletion", e);
                        } catch (Exception e) {
                            VoltDB.crashLocalVoltDB(e.getMessage(), true, e);
                        } finally {
                            /**
                                 * Remove this last site from the set here after the terminator has run
                                 * so that new snapshots won't start until
                                 * everything is on disk for the previous snapshot. This prevents a really long
                                 * snapshot initiation procedure from occurring because it has to contend for
                                 * filesystem resources
                                 *
                                 * Do this before logSnapshotCompleteToZK() because the ZK operations are slow,
                                 * and they can trigger snapshot completion interests to fire before this site
                                 * removes itself from the set. The next snapshot request may come in and see
                                 * this snapshot is still in progress.
                                 */
                            ExecutionSitesCurrentlySnapshotting.remove(SnapshotSiteProcessor.this);
                        }
                        logSnapshotCompleteToZK(txnId, snapshotSucceeded, snapshotDataForZookeeper);
                    }
                }
            };
            m_snapshotTargetTerminators.add(terminatorThread);
            terminatorThread.start();
        }
    }
    return retval;
}
Also used : NoNodeException(org.apache.zookeeper_voltpatches.KeeperException.NoNodeException) StreamSnapshotTimeoutException(org.voltdb.rejoin.StreamSnapshotDataTarget.StreamSnapshotTimeoutException) IOException(java.io.IOException) StreamSnapshotTimeoutException(org.voltdb.rejoin.StreamSnapshotDataTarget.StreamSnapshotTimeoutException) NoNodeException(org.apache.zookeeper_voltpatches.KeeperException.NoNodeException) KeeperException(org.apache.zookeeper_voltpatches.KeeperException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Collection(java.util.Collection) BBContainer(org.voltcore.utils.DBBPool.BBContainer) ListenableFuture(com.google_voltpatches.common.util.concurrent.ListenableFuture) HashMap(java.util.HashMap) Map(java.util.Map) ImmutableMap(com.google_voltpatches.common.collect.ImmutableMap)

Example 54 with BBContainer

use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.

the class SnapshotUtil method OutputBuffersToBytes.

public static byte[] OutputBuffersToBytes(Collection<BBContainer> outputContainers) {
    ByteBuffer buf = ByteBuffer.allocate(// buffer count
    4 + // buffer info
    (8 + 4 + 4) * outputContainers.size());
    buf.putInt(outputContainers.size());
    for (DBBPool.BBContainer container : outputContainers) {
        buf.putLong(container.address());
        buf.putInt(container.b().position());
        buf.putInt(container.b().remaining());
    }
    return buf.array();
}
Also used : BBContainer(org.voltcore.utils.DBBPool.BBContainer) ByteBuffer(java.nio.ByteBuffer) DBBPool(org.voltcore.utils.DBBPool)

Example 55 with BBContainer

use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.

the class NIOWriteStreamBase method shutdown.

/**
     * Free the pool resources that are held by this WriteStream. The pool itself is thread local
     * and will be freed when the thread terminates.
     */
void shutdown() {
    int bytesReleased = 0;
    m_isShutdown = true;
    BBContainer c = null;
    if (m_currentWriteBuffer != null) {
        bytesReleased += m_currentWriteBuffer.b().remaining();
        m_currentWriteBuffer.discard();
    }
    while ((c = m_queuedBuffers.poll()) != null) {
        //Buffer is not flipped after being written to in swap and serialize, need to do it here
        c.b().flip();
        bytesReleased += c.b().remaining();
        c.discard();
    }
    updateQueued(-bytesReleased, false);
}
Also used : BBContainer(org.voltcore.utils.DBBPool.BBContainer)

Aggregations

BBContainer (org.voltcore.utils.DBBPool.BBContainer)57 Test (org.junit.Test)22 ByteBuffer (java.nio.ByteBuffer)21 BinaryDequeReader (org.voltdb.utils.BinaryDeque.BinaryDequeReader)18 IOException (java.io.IOException)15 File (java.io.File)10 JSONObject (org.json_voltpatches.JSONObject)4 BinaryDequeTruncator (org.voltdb.utils.BinaryDeque.BinaryDequeTruncator)4 TruncatorResponse (org.voltdb.utils.BinaryDeque.TruncatorResponse)4 ArrayList (java.util.ArrayList)3 ExecutionException (java.util.concurrent.ExecutionException)3 FileInputStream (java.io.FileInputStream)2 Collection (java.util.Collection)2 Random (java.util.Random)2 Callable (java.util.concurrent.Callable)2 AtomicReference (java.util.concurrent.atomic.AtomicReference)2 PureJavaCrc32C (org.apache.hadoop_voltpatches.util.PureJavaCrc32C)2 JSONException (org.json_voltpatches.JSONException)2 JSONString (org.json_voltpatches.JSONString)2 HashinatorConfig (org.voltdb.TheHashinator.HashinatorConfig)2