Search in sources :

Example 1 with PureJavaCrc32C

use of org.apache.hadoop_voltpatches.util.PureJavaCrc32C in project voltdb by VoltDB.

the class TheHashinator method computeConfigurationSignature.

/**
     * It computes a signature from the given configuration bytes
     * @param config configuration byte array
     * @return signature from the given configuration bytes
     */
public static long computeConfigurationSignature(byte[] config) {
    PureJavaCrc32C crc = new PureJavaCrc32C();
    crc.update(config);
    return crc.getValue();
}
Also used : PureJavaCrc32C(org.apache.hadoop_voltpatches.util.PureJavaCrc32C)

Example 2 with PureJavaCrc32C

use of org.apache.hadoop_voltpatches.util.PureJavaCrc32C in project voltdb by VoltDB.

the class RealVoltDB method printDiagnosticInformation.

public static void printDiagnosticInformation(CatalogContext context, String procName, LoadedProcedureSet procSet) {
    StringBuilder sb = new StringBuilder();
    final CatalogMap<Procedure> catalogProcedures = context.database.getProcedures();
    PureJavaCrc32C crc = new PureJavaCrc32C();
    sb.append("Statements within " + procName + ": ").append("\n");
    for (final Procedure proc : catalogProcedures) {
        if (proc.getTypeName().equals(procName)) {
            for (Statement stmt : proc.getStatements()) {
                // compute hash for determinism check
                crc.reset();
                String sqlText = stmt.getSqltext();
                crc.update(sqlText.getBytes(Constants.UTF8ENCODING));
                int hash = (int) crc.getValue();
                sb.append("Statement Hash: ").append(hash);
                sb.append(", Statement SQL: ").append(sqlText);
                for (PlanFragment frag : stmt.getFragments()) {
                    byte[] planHash = Encoder.hexDecode(frag.getPlanhash());
                    long planId = ActivePlanRepository.getFragmentIdForPlanHash(planHash);
                    String stmtText = ActivePlanRepository.getStmtTextForPlanHash(planHash);
                    byte[] jsonPlan = ActivePlanRepository.planForFragmentId(planId);
                    sb.append(", Plan Fragment Id:").append(planId);
                    sb.append(", Plan Stmt Text:").append(stmtText);
                    sb.append(", Json Plan:").append(new String(jsonPlan));
                }
                sb.append("\n");
            }
        }
    }
    sb.append("Default CRUD Procedures: ").append("\n");
    for (Entry<String, Procedure> pair : context.m_defaultProcs.m_defaultProcMap.entrySet()) {
        crc.reset();
        String sqlText = DefaultProcedureManager.sqlForDefaultProc(pair.getValue());
        crc.update(sqlText.getBytes(Constants.UTF8ENCODING));
        int hash = (int) crc.getValue();
        sb.append("Statement Hash: ").append(hash);
        sb.append(", Statement SQL: ").append(sqlText);
        ProcedureRunner runner = procSet.getProcByName(pair.getValue().getTypeName());
        for (Statement stmt : runner.getCatalogProcedure().getStatements()) {
            for (PlanFragment frag : stmt.getFragments()) {
                byte[] planHash = Encoder.hexDecode(frag.getPlanhash());
                long planId = ActivePlanRepository.getFragmentIdForPlanHash(planHash);
                String stmtText = ActivePlanRepository.getStmtTextForPlanHash(planHash);
                byte[] jsonPlan = ActivePlanRepository.planForFragmentId(planId);
                sb.append(", Plan Fragment Id:").append(planId);
                sb.append(", Plan Stmt Text:").append(stmtText);
                sb.append(", Json Plan:").append(new String(jsonPlan));
            }
        }
        sb.append("\n");
    }
    hostLog.error(sb.toString());
}
Also used : PureJavaCrc32C(org.apache.hadoop_voltpatches.util.PureJavaCrc32C) Statement(org.voltdb.catalog.Statement) Procedure(org.voltdb.catalog.Procedure) PlanFragment(org.voltdb.catalog.PlanFragment)

Example 3 with PureJavaCrc32C

use of org.apache.hadoop_voltpatches.util.PureJavaCrc32C in project voltdb by VoltDB.

the class TestDBBPool method testChecksum.

@Test
public void testChecksum() {
    EELibraryLoader.loadExecutionEngineLibrary(true);
    final long seed = System.currentTimeMillis();
    Random r = new Random(seed);
    System.out.println("Seed is " + seed);
    for (int ii = 0; ii < 10000; ii++) {
        int nextLength = r.nextInt(4096);
        byte[] bytes = new byte[nextLength];
        r.nextBytes(bytes);
        PureJavaCrc32C checksum = new PureJavaCrc32C();
        checksum.update(bytes);
        int javaSum = (int) checksum.getValue();
        BBContainer cont = DBBPool.allocateDirect(nextLength);
        cont.b().put(bytes);
        int cSum = DBBPool.getCRC32C(cont.address(), 0, nextLength);
        cont.discard();
        assertEquals(javaSum, cSum);
    }
}
Also used : Random(java.util.Random) PureJavaCrc32C(org.apache.hadoop_voltpatches.util.PureJavaCrc32C) BBContainer(org.voltcore.utils.DBBPool.BBContainer) Test(org.junit.Test)

Example 4 with PureJavaCrc32C

use of org.apache.hadoop_voltpatches.util.PureJavaCrc32C in project voltdb by VoltDB.

the class DefaultSnapshotDataTarget method write.

/*
     * Prepend length is basically synonymous with writing actual tuple data and not
     * the header.
     */
private ListenableFuture<?> write(final Callable<BBContainer> tupleDataC, final boolean prependLength) {
    /*
         * Unwrap the data to be written. For the traditional
         * snapshot data target this should be a noop.
         */
    BBContainer tupleDataTemp;
    try {
        tupleDataTemp = tupleDataC.call();
        /*
             * Can be null if the dedupe filter nulled out the buffer
             */
        if (tupleDataTemp == null) {
            return Futures.immediateFuture(null);
        }
    } catch (Throwable t) {
        return Futures.immediateFailedFuture(t);
    }
    final BBContainer tupleDataCont = tupleDataTemp;
    if (m_writeFailed) {
        tupleDataCont.discard();
        return null;
    }
    ByteBuffer tupleData = tupleDataCont.b();
    m_outstandingWriteTasks.incrementAndGet();
    Future<BBContainer> compressionTask = null;
    if (prependLength) {
        BBContainer cont = DBBPool.allocateDirectAndPool(SnapshotSiteProcessor.m_snapshotBufferCompressedLen);
        //Skip 4-bytes so the partition ID is not compressed
        //That way if we detect a corruption we know what partition is bad
        tupleData.position(tupleData.position() + 4);
        /*
             * Leave 12 bytes, it's going to be a 4-byte length prefix, a 4-byte partition id,
             * and a 4-byte CRC32C of just the header bytes, in addition to the compressed payload CRC
             * that is 16 bytes, but 4 of those are done by CompressionService
             */
        cont.b().position(12);
        compressionTask = CompressionService.compressAndCRC32cBufferAsync(tupleData, cont);
    }
    final Future<BBContainer> compressionTaskFinal = compressionTask;
    ListenableFuture<?> writeTask = m_es.submit(new Callable<Object>() {

        @Override
        public Object call() throws Exception {
            try {
                if (m_acceptOneWrite) {
                    m_acceptOneWrite = false;
                } else {
                    if (m_simulateBlockedWrite != null) {
                        m_simulateBlockedWrite.await();
                    }
                    if (m_simulateFullDiskWritingChunk) {
                        //Make sure to consume the result of the compression
                        compressionTaskFinal.get().discard();
                        throw new IOException("Disk full");
                    }
                }
                final ByteBuffer tupleData = tupleDataCont.b();
                int totalWritten = 0;
                if (prependLength) {
                    BBContainer payloadContainer = compressionTaskFinal.get();
                    try {
                        final ByteBuffer payloadBuffer = payloadContainer.b();
                        payloadBuffer.position(0);
                        ByteBuffer lengthPrefix = ByteBuffer.allocate(12);
                        m_bytesAllowedBeforeSync.acquire(payloadBuffer.remaining());
                        //Length prefix does not include 4 header items, just compressd payload
                        //that follows
                        //length prefix
                        lengthPrefix.putInt(payloadBuffer.remaining() - 16);
                        // partitionId
                        lengthPrefix.putInt(tupleData.getInt(0));
                        /*
                             * Checksum the header and put it in the payload buffer
                             */
                        PureJavaCrc32C crc = new PureJavaCrc32C();
                        crc.update(lengthPrefix.array(), 0, 8);
                        lengthPrefix.putInt((int) crc.getValue());
                        lengthPrefix.flip();
                        payloadBuffer.put(lengthPrefix);
                        payloadBuffer.position(0);
                        enforceSnapshotRateLimit(payloadBuffer.remaining());
                        /*
                             * Write payload to file
                             */
                        while (payloadBuffer.hasRemaining()) {
                            totalWritten += m_channel.write(payloadBuffer);
                        }
                    } finally {
                        payloadContainer.discard();
                    }
                } else {
                    while (tupleData.hasRemaining()) {
                        totalWritten += m_channel.write(tupleData);
                    }
                }
                m_bytesWritten += totalWritten;
                m_bytesWrittenSinceLastSync.addAndGet(totalWritten);
            } catch (IOException e) {
                m_writeException = e;
                SNAP_LOG.error("Error while attempting to write snapshot data to file " + m_file, e);
                m_writeFailed = true;
                throw e;
            } finally {
                try {
                    tupleDataCont.discard();
                } finally {
                    m_outstandingWriteTasksLock.lock();
                    try {
                        if (m_outstandingWriteTasks.decrementAndGet() == 0) {
                            m_noMoreOutstandingWriteTasksCondition.signalAll();
                        }
                    } finally {
                        m_outstandingWriteTasksLock.unlock();
                    }
                }
            }
            return null;
        }
    });
    return writeTask;
}
Also used : PureJavaCrc32C(org.apache.hadoop_voltpatches.util.PureJavaCrc32C) BBContainer(org.voltcore.utils.DBBPool.BBContainer) JSONObject(org.json_voltpatches.JSONObject) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException)

Example 5 with PureJavaCrc32C

use of org.apache.hadoop_voltpatches.util.PureJavaCrc32C in project voltdb by VoltDB.

the class TestParameterSet method testGetCRCWithoutCrash.

public void testGetCRCWithoutCrash() throws IOException {
    ParameterSet pset;
    PureJavaCrc32C crc;
    ByteBuffer buf;
    Object[] psetObjs = new Object[] { // null values
    null, // null values
    VoltType.NULL_INTEGER, // null values
    VoltType.NULL_DECIMAL, // numbers
    (byte) 1, // numbers
    (short) 2, // numbers
    (int) 3, // numbers
    (long) 4, // numbers
    1.2f, // numbers
    3.6d, // strings
    "This is spinal tap", // strings
    "", // binary
    "ABCDF012", // binary
    new byte[] { 1, 3, 5 }, // binary
    new byte[0], // decimal
    new BigDecimal(5.5), // timestamp
    new TimestampType(new Date()) };
    pset = ParameterSet.fromArrayNoCopy(psetObjs);
    crc = new PureJavaCrc32C();
    buf = ByteBuffer.allocate(pset.getSerializedSize());
    pset.flattenToBuffer(buf);
    crc.update(buf.array());
    long crc1 = crc.getValue();
    ArrayUtils.reverse(psetObjs);
    pset = ParameterSet.fromArrayNoCopy(psetObjs);
    crc = new PureJavaCrc32C();
    buf = ByteBuffer.allocate(pset.getSerializedSize());
    pset.flattenToBuffer(buf);
    crc.update(buf.array());
    long crc2 = crc.getValue();
    pset = ParameterSet.fromArrayNoCopy(new Object[0]);
    crc = new PureJavaCrc32C();
    buf = ByteBuffer.allocate(pset.getSerializedSize());
    pset.flattenToBuffer(buf);
    crc.update(buf.array());
    long crc3 = crc.getValue();
    pset = ParameterSet.fromArrayNoCopy(new Object[] { 1 });
    crc = new PureJavaCrc32C();
    buf = ByteBuffer.allocate(pset.getSerializedSize());
    pset.flattenToBuffer(buf);
    crc.update(buf.array());
    long crc4 = crc.getValue();
    assertNotSame(crc1, crc2);
    assertNotSame(crc1, crc3);
    assertNotSame(crc1, crc4);
    assertNotSame(crc2, crc3);
    assertNotSame(crc2, crc4);
    assertNotSame(crc3, crc4);
}
Also used : PureJavaCrc32C(org.apache.hadoop_voltpatches.util.PureJavaCrc32C) TimestampType(org.voltdb.types.TimestampType) ByteBuffer(java.nio.ByteBuffer) BigDecimal(java.math.BigDecimal) Date(java.util.Date)

Aggregations

PureJavaCrc32C (org.apache.hadoop_voltpatches.util.PureJavaCrc32C)5 ByteBuffer (java.nio.ByteBuffer)2 BBContainer (org.voltcore.utils.DBBPool.BBContainer)2 IOException (java.io.IOException)1 BigDecimal (java.math.BigDecimal)1 Date (java.util.Date)1 Random (java.util.Random)1 ExecutionException (java.util.concurrent.ExecutionException)1 JSONObject (org.json_voltpatches.JSONObject)1 Test (org.junit.Test)1 PlanFragment (org.voltdb.catalog.PlanFragment)1 Procedure (org.voltdb.catalog.Procedure)1 Statement (org.voltdb.catalog.Statement)1 TimestampType (org.voltdb.types.TimestampType)1