Search in sources :

Example 1 with InstanceId

use of org.voltcore.utils.InstanceId in project voltdb by VoltDB.

the class TestTheHashinator method testSaveRestoreRaw.

@Test
public void testSaveRestoreRaw() throws Exception {
    if (hashinatorType == HashinatorType.LEGACY)
        return;
    ElasticHashinator h1 = new ElasticHashinator(ElasticHashinator.getConfigureBytes(3, ElasticHashinator.DEFAULT_TOTAL_TOKENS), false);
    byte[] bytes = h1.getConfigBytes();
    HashinatorSnapshotData d1 = new HashinatorSnapshotData(bytes, 1234);
    InstanceId iid1 = new InstanceId(111, 222);
    ByteBuffer b1 = d1.saveToBuffer(iid1);
    ByteBuffer b2 = ByteBuffer.wrap(b1.array());
    HashinatorSnapshotData d2 = new HashinatorSnapshotData();
    InstanceId iid2 = d2.restoreFromBuffer(b2);
    assertEquals(iid1, iid2);
    assertTrue(Arrays.equals(d1.m_serData, d2.m_serData));
    ElasticHashinator h2 = new ElasticHashinator(d2.m_serData, false);
    assertEquals(h1.getTokens(), h2.getTokens());
}
Also used : HashinatorSnapshotData(org.voltdb.sysprocs.saverestore.HashinatorSnapshotData) InstanceId(org.voltcore.utils.InstanceId) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 2 with InstanceId

use of org.voltcore.utils.InstanceId in project voltdb by VoltDB.

the class SnapshotUtil method retrieveSnapshotFilesInternal.

private static void retrieveSnapshotFilesInternal(File directory, NamedSnapshots namedSnapshots, FileFilter filter, boolean validate, SnapshotPathType stype, VoltLogger logger, int recursion) {
    if (recursion == 32) {
        return;
    }
    if (!directory.exists()) {
        System.err.println("Error: Directory " + directory.getPath() + " doesn't exist");
        return;
    }
    if (!directory.canRead()) {
        System.err.println("Error: Directory " + directory.getPath() + " is not readable");
        return;
    }
    if (!directory.canExecute()) {
        System.err.println("Error: Directory " + directory.getPath() + " is not executable");
        return;
    }
    for (File f : directory.listFiles(filter)) {
        if (f.isDirectory()) {
            if (!f.canRead() || !f.canExecute()) {
                System.err.println("Warning: Skipping directory " + f.getPath() + " due to lack of read permission");
            } else {
                retrieveSnapshotFilesInternal(f, namedSnapshots, filter, validate, stype, logger, recursion++);
            }
            continue;
        }
        if (!f.canRead()) {
            System.err.println("Warning: " + f.getPath() + " is not readable");
            continue;
        }
        FileInputStream fis = null;
        try {
            fis = new FileInputStream(f);
        } catch (FileNotFoundException e1) {
            System.err.println(e1.getMessage());
            continue;
        }
        try {
            if (f.getName().endsWith(".digest")) {
                JSONObject digest = CRCCheck(f, logger);
                if (digest == null)
                    continue;
                Long snapshotTxnId = digest.getLong("txnId");
                String nonce = parseNonceFromSnapshotFilename(f.getName());
                Snapshot named_s = namedSnapshots.get(nonce);
                named_s.setTxnId(snapshotTxnId);
                InstanceId iid = new InstanceId(0, 0);
                if (digest.has("instanceId")) {
                    iid = new InstanceId(digest.getJSONObject("instanceId"));
                }
                named_s.setInstanceId(iid);
                TreeSet<String> tableSet = new TreeSet<String>();
                JSONArray tables = digest.getJSONArray("tables");
                for (int ii = 0; ii < tables.length(); ii++) {
                    tableSet.add(tables.getString(ii));
                }
                named_s.m_digestTables.add(tableSet);
                named_s.m_digests.add(f);
            } else if (f.getName().endsWith(".jar")) {
                String nonce = parseNonceFromSnapshotFilename(f.getName());
                Snapshot named_s = namedSnapshots.get(nonce);
                named_s.m_catalogFile = f;
            } else if (f.getName().endsWith(HASH_EXTENSION)) {
                String nonce = parseNonceFromSnapshotFilename(f.getName());
                Snapshot named_s = namedSnapshots.get(nonce);
                if (validate) {
                    try {
                        // Retrieve hashinator config data for validation only.
                        // Throws IOException when the CRC check fails.
                        HashinatorSnapshotData hashData = new HashinatorSnapshotData();
                        hashData.restoreFromFile(f);
                        named_s.m_hashConfig = f;
                    } catch (IOException e) {
                        logger.warn(String.format("Skipping bad hashinator snapshot file '%s'", f.getPath()));
                        // Skip bad hashinator files.
                        continue;
                    }
                }
            } else {
                HashSet<Integer> partitionIds = new HashSet<Integer>();
                TableSaveFile saveFile = new TableSaveFile(fis, 1, null, true);
                try {
                    for (Integer partitionId : saveFile.getPartitionIds()) {
                        partitionIds.add(partitionId);
                    }
                    if (validate && saveFile.getCompleted()) {
                        while (saveFile.hasMoreChunks()) {
                            BBContainer cont = saveFile.getNextChunk();
                            if (cont != null) {
                                cont.discard();
                            }
                        }
                    }
                    partitionIds.removeAll(saveFile.getCorruptedPartitionIds());
                    String nonce = parseNonceFromSnapshotFilename(f.getName());
                    Snapshot named_s = namedSnapshots.get(nonce);
                    named_s.setTxnId(saveFile.getTxnId());
                    TableFiles namedTableFiles = named_s.m_tableFiles.get(saveFile.getTableName());
                    if (namedTableFiles == null) {
                        namedTableFiles = new TableFiles(saveFile.isReplicated());
                        named_s.m_tableFiles.put(saveFile.getTableName(), namedTableFiles);
                    }
                    namedTableFiles.m_files.add(f);
                    namedTableFiles.m_completed.add(saveFile.getCompleted());
                    namedTableFiles.m_validPartitionIds.add(partitionIds);
                    namedTableFiles.m_corruptParititionIds.add(saveFile.getCorruptedPartitionIds());
                    namedTableFiles.m_totalPartitionCounts.add(saveFile.getTotalPartitions());
                } finally {
                    saveFile.close();
                }
            }
        } catch (IOException e) {
            System.err.println(e.getMessage());
            System.err.println("Error: Unable to process " + f.getPath());
        } catch (JSONException e) {
            System.err.println(e.getMessage());
            System.err.println("Error: Unable to process " + f.getPath());
        } finally {
            try {
                if (fis != null) {
                    fis.close();
                }
            } catch (IOException e) {
            }
        }
    }
}
Also used : InstanceId(org.voltcore.utils.InstanceId) FileNotFoundException(java.io.FileNotFoundException) JSONArray(org.json_voltpatches.JSONArray) JSONException(org.json_voltpatches.JSONException) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream) JSONObject(org.json_voltpatches.JSONObject) TreeSet(java.util.TreeSet) BBContainer(org.voltcore.utils.DBBPool.BBContainer) VoltFile(org.voltdb.utils.VoltFile) File(java.io.File) HashSet(java.util.HashSet)

Example 3 with InstanceId

use of org.voltcore.utils.InstanceId in project voltdb by VoltDB.

the class HashinatorSnapshotData method restoreFromBuffer.

/**
     * Restore and check hashinator config data.
     * @param buf input buffer
     * @return instance ID read from buffer
     * @throws I/O exception on failure
     */
public InstanceId restoreFromBuffer(ByteBuffer buf) throws IOException {
    buf.rewind();
    // Assumes config data is the last field.
    int dataSize = buf.remaining() - OFFSET_DATA;
    if (dataSize <= 0) {
        throw new IOException("Hashinator snapshot data is too small.");
    }
    // Get the CRC, zero out its buffer field, and compare to calculated CRC.
    long crcHeader = buf.getLong(OFFSET_CRC);
    buf.putLong(OFFSET_CRC, 0);
    final PureJavaCrc32 crcBuffer = new PureJavaCrc32();
    assert (buf.hasArray());
    crcBuffer.update(buf.array());
    if (crcHeader != crcBuffer.getValue()) {
        throw new IOException("Hashinator snapshot data CRC mismatch.");
    }
    // Slurp the data.
    int coord = buf.getInt(OFFSET_INSTID_COORD);
    long timestamp = buf.getLong(OFFSET_INSTID_TIMESTAMP);
    InstanceId instId = new InstanceId(coord, timestamp);
    m_version = buf.getLong(OFFSET_VERSION);
    m_serData = new byte[dataSize];
    buf.position(OFFSET_DATA);
    buf.get(m_serData);
    return instId;
}
Also used : PureJavaCrc32(org.apache.hadoop_voltpatches.util.PureJavaCrc32) InstanceId(org.voltcore.utils.InstanceId) IOException(java.io.IOException)

Example 4 with InstanceId

use of org.voltcore.utils.InstanceId in project voltdb by VoltDB.

the class RestoreAgent method checkSnapshotIsComplete.

private SnapshotInfo checkSnapshotIsComplete(Long key, Snapshot s) {
    int partitionCount = -1;
    for (TableFiles tf : s.m_tableFiles.values()) {
        // Check if the snapshot is complete
        if (tf.m_completed.stream().anyMatch(b -> !b)) {
            m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because it was not completed.");
            return null;
        }
        // Replicated table doesn't check partition count
        if (tf.m_isReplicated) {
            continue;
        }
        // Everyone has to agree on the total partition count
        for (int count : tf.m_totalPartitionCounts) {
            if (partitionCount == -1) {
                partitionCount = count;
            } else if (count != partitionCount) {
                m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because it had the wrong partition count ").append(count).append(", expecting ").append(partitionCount);
                return null;
            }
        }
    }
    if (s.m_digests.isEmpty()) {
        m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because it had no valid digest file.");
        return null;
    }
    File digest = s.m_digests.get(0);
    Long catalog_crc = null;
    Map<Integer, Long> pidToTxnMap = new TreeMap<Integer, Long>();
    Set<String> digestTableNames = new HashSet<String>();
    // Create a valid but meaningless InstanceId to support pre-instanceId checking versions
    InstanceId instanceId = new InstanceId(0, 0);
    int newParitionCount = -1;
    try {
        JSONObject digest_detail = SnapshotUtil.CRCCheck(digest, LOG);
        if (digest_detail == null)
            throw new IOException();
        catalog_crc = digest_detail.getLong("catalogCRC");
        if (digest_detail.has("partitionTransactionIds")) {
            JSONObject pidToTxnId = digest_detail.getJSONObject("partitionTransactionIds");
            Iterator<String> it = pidToTxnId.keys();
            while (it.hasNext()) {
                String pidkey = it.next();
                Long txnidval = pidToTxnId.getLong(pidkey);
                pidToTxnMap.put(Integer.valueOf(pidkey), txnidval);
            }
        }
        if (digest_detail.has("instanceId")) {
            instanceId = new InstanceId(digest_detail.getJSONObject("instanceId"));
        }
        if (digest_detail.has("newPartitionCount")) {
            newParitionCount = digest_detail.getInt("newPartitionCount");
        }
        if (digest_detail.has("tables")) {
            JSONArray tableObj = digest_detail.getJSONArray("tables");
            for (int i = 0; i < tableObj.length(); i++) {
                digestTableNames.add(tableObj.getString(i));
            }
        }
    } catch (IOException ioe) {
        m_snapshotErrLogStr.append("\nUnable to read digest file: ").append(digest.getAbsolutePath()).append(" due to: ").append(ioe.getMessage());
        return null;
    } catch (JSONException je) {
        m_snapshotErrLogStr.append("\nUnable to extract catalog CRC from digest: ").append(digest.getAbsolutePath()).append(" due to: ").append(je.getMessage());
        return null;
    }
    if (s.m_catalogFile == null) {
        m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because it had no catalog.");
        return null;
    }
    try {
        byte[] bytes = MiscUtils.fileToBytes(s.m_catalogFile);
        InMemoryJarfile jarfile = CatalogUtil.loadInMemoryJarFile(bytes);
        if (jarfile.getCRC() != catalog_crc) {
            m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because catalog CRC did not match digest.");
            return null;
        }
        // Make sure this is not a partial snapshot.
        // Compare digestTableNames with all normal table names in catalog file.
        // A normal table is one that's NOT a materialized view, nor an export table.
        Set<String> catalogNormalTableNames = CatalogUtil.getNormalTableNamesFromInMemoryJar(jarfile);
        if (!catalogNormalTableNames.equals(digestTableNames)) {
            m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because this is a partial snapshot.");
            return null;
        }
    } catch (IOException ioe) {
        m_snapshotErrLogStr.append("\nRejected snapshot ").append(s.getNonce()).append(" because catalog file could not be validated");
        return null;
    }
    SnapshotInfo info = new SnapshotInfo(key, digest.getParent(), SnapshotUtil.parseNonceFromDigestFilename(digest.getName()), partitionCount, newParitionCount, catalog_crc, m_hostId, instanceId, digestTableNames, s.m_stype);
    // populate table to partition map.
    for (Entry<String, TableFiles> te : s.m_tableFiles.entrySet()) {
        TableFiles tableFile = te.getValue();
        HashSet<Integer> ids = new HashSet<Integer>();
        for (Set<Integer> idSet : tableFile.m_validPartitionIds) {
            ids.addAll(idSet);
        }
        if (!tableFile.m_isReplicated) {
            info.partitions.put(te.getKey(), ids);
        }
        // keep track of tables for which we've seen files while we're here
        info.fileTables.add(te.getKey());
    }
    info.setPidToTxnIdMap(pidToTxnMap);
    return info;
}
Also used : InstanceId(org.voltcore.utils.InstanceId) JSONArray(org.json_voltpatches.JSONArray) TableFiles(org.voltdb.sysprocs.saverestore.SnapshotUtil.TableFiles) JSONException(org.json_voltpatches.JSONException) IOException(java.io.IOException) TreeMap(java.util.TreeMap) JSONObject(org.json_voltpatches.JSONObject) InMemoryJarfile(org.voltdb.utils.InMemoryJarfile) File(java.io.File) HashSet(java.util.HashSet)

Example 5 with InstanceId

use of org.voltcore.utils.InstanceId in project voltdb by VoltDB.

the class NativeSnapshotWritePlan method createFileBasedCompletionTasks.

static void createFileBasedCompletionTasks(String file_path, String pathType, String file_nonce, long txnId, Map<Integer, Long> partitionTransactionIds, SystemProcedureExecutionContext context, ExtensibleSnapshotDigestData extraSnapshotData, HashinatorSnapshotData hashinatorData, long timestamp, int newPartitionCount, Table[] tables) throws IOException {
    InstanceId instId = VoltDB.instance().getHostMessenger().getInstanceId();
    Runnable completionTask = SnapshotUtil.writeSnapshotDigest(txnId, context.getCatalogCRC(), file_path, pathType, file_nonce, Arrays.asList(tables), context.getHostId(), partitionTransactionIds, extraSnapshotData, instId, timestamp, newPartitionCount, context.getClusterId());
    if (completionTask != null) {
        SnapshotSiteProcessor.m_tasksOnSnapshotCompletion.offer(completionTask);
    }
    if (hashinatorData != null) {
        completionTask = SnapshotUtil.writeHashinatorConfig(instId, file_path, file_nonce, context.getHostId(), hashinatorData);
        if (completionTask != null) {
            SnapshotSiteProcessor.m_tasksOnSnapshotCompletion.offer(completionTask);
        }
    }
    completionTask = SnapshotUtil.writeSnapshotCatalog(file_path, file_nonce);
    if (completionTask != null) {
        SnapshotSiteProcessor.m_tasksOnSnapshotCompletion.offer(completionTask);
    }
    completionTask = SnapshotUtil.writeSnapshotCompletion(file_path, file_nonce, context.getHostId(), SNAP_LOG);
    if (completionTask != null) {
        SnapshotSiteProcessor.m_tasksOnSnapshotCompletion.offer(completionTask);
    }
    if (extraSnapshotData.getTerminus() != 0L) {
        completionTask = SnapshotUtil.writeTerminusMarker(file_nonce, context.getPaths(), SNAP_LOG);
        SnapshotSiteProcessor.m_tasksOnSnapshotCompletion.offer(completionTask);
    }
}
Also used : InstanceId(org.voltcore.utils.InstanceId)

Aggregations

InstanceId (org.voltcore.utils.InstanceId)6 IOException (java.io.IOException)4 JSONException (org.json_voltpatches.JSONException)3 JSONObject (org.json_voltpatches.JSONObject)3 File (java.io.File)2 HashSet (java.util.HashSet)2 JSONArray (org.json_voltpatches.JSONArray)2 FileInputStream (java.io.FileInputStream)1 FileNotFoundException (java.io.FileNotFoundException)1 SocketException (java.net.SocketException)1 ByteBuffer (java.nio.ByteBuffer)1 TreeMap (java.util.TreeMap)1 TreeSet (java.util.TreeSet)1 ExecutionException (java.util.concurrent.ExecutionException)1 PureJavaCrc32 (org.apache.hadoop_voltpatches.util.PureJavaCrc32)1 KeeperException (org.apache.zookeeper_voltpatches.KeeperException)1 Test (org.junit.Test)1 BBContainer (org.voltcore.utils.DBBPool.BBContainer)1 HashinatorSnapshotData (org.voltdb.sysprocs.saverestore.HashinatorSnapshotData)1 TableFiles (org.voltdb.sysprocs.saverestore.SnapshotUtil.TableFiles)1