Search in sources :

Example 11 with TableNotFoundException

use of org.apache.accumulo.core.client.TableNotFoundException in project accumulo by apache.

the class TableChangeStateIT method findFate.

/**
 * Checks fates in zookeeper looking for transaction associated with a compaction as a double check that the test will be valid because the running compaction
 * does have a fate transaction lock.
 *
 * @return true if corresponding fate transaction found, false otherwise
 */
private boolean findFate(final String tableName) {
    Instance instance = connector.getInstance();
    AdminUtil<String> admin = new AdminUtil<>(false);
    try {
        Table.ID tableId = Tables.getTableId(instance, tableName);
        log.trace("tid: {}", tableId);
        String secret = cluster.getSiteConfiguration().get(Property.INSTANCE_SECRET);
        IZooReaderWriter zk = new ZooReaderWriterFactory().getZooReaderWriter(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut(), secret);
        ZooStore<String> zs = new ZooStore<>(ZooUtil.getRoot(instance) + Constants.ZFATE, zk);
        AdminUtil.FateStatus fateStatus = admin.getStatus(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS + "/" + tableId, null, null);
        for (AdminUtil.TransactionStatus tx : fateStatus.getTransactions()) {
            if (tx.getTop().contains("CompactionDriver") && tx.getDebug().contains("CompactRange")) {
                return true;
            }
        }
    } catch (KeeperException | TableNotFoundException | InterruptedException ex) {
        throw new IllegalStateException(ex);
    }
    // did not find appropriate fate transaction for compaction.
    return Boolean.FALSE;
}
Also used : Table(org.apache.accumulo.core.client.impl.Table) Instance(org.apache.accumulo.core.client.Instance) ZooStore(org.apache.accumulo.fate.ZooStore) ZooReaderWriterFactory(org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory) AdminUtil(org.apache.accumulo.fate.AdminUtil) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) KeeperException(org.apache.zookeeper.KeeperException)

Example 12 with TableNotFoundException

use of org.apache.accumulo.core.client.TableNotFoundException in project accumulo by apache.

the class TableChangeStateIT method createData.

/**
 * Create the provided table and populate with some data using a batch writer. The table is scanned to ensure it was populated as expected.
 *
 * @param tableName
 *          the name of the table
 */
private void createData(final String tableName) {
    try {
        // create table.
        connector.tableOperations().create(tableName);
        BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
        // populate
        for (int i = 0; i < NUM_ROWS; i++) {
            Mutation m = new Mutation(new Text(String.format("%05d", i)));
            m.put(new Text("col" + Integer.toString((i % 3) + 1)), new Text("qual"), new Value("junk".getBytes(UTF_8)));
            bw.addMutation(m);
        }
        bw.close();
        long startTimestamp = System.nanoTime();
        try (Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY)) {
            int count = 0;
            for (Map.Entry<Key, Value> elt : scanner) {
                String expected = String.format("%05d", count);
                assert (elt.getKey().getRow().toString().equals(expected));
                count++;
            }
            log.trace("Scan time for {} rows {} ms", NUM_ROWS, TimeUnit.MILLISECONDS.convert((System.nanoTime() - startTimestamp), TimeUnit.NANOSECONDS));
            if (count != NUM_ROWS) {
                throw new IllegalStateException(String.format("Number of rows %1$d does not match expected %2$d", count, NUM_ROWS));
            }
        }
    } catch (AccumuloException | AccumuloSecurityException | TableNotFoundException | TableExistsException ex) {
        throw new IllegalStateException("Create data failed with exception", ex);
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Text(org.apache.hadoop.io.Text) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) Value(org.apache.accumulo.core.data.Value) TableExistsException(org.apache.accumulo.core.client.TableExistsException) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Map(java.util.Map) Key(org.apache.accumulo.core.data.Key)

Example 13 with TableNotFoundException

use of org.apache.accumulo.core.client.TableNotFoundException in project accumulo by apache.

the class ScanIdIT method testScanId.

/**
 * @throws Exception
 *           any exception is a test failure.
 */
@Test
public void testScanId() throws Exception {
    final String tableName = getUniqueNames(1)[0];
    Connector conn = getConnector();
    conn.tableOperations().create(tableName);
    addSplits(conn, tableName);
    log.info("Splits added");
    generateSampleData(conn, tableName);
    log.info("Generated data for {}", tableName);
    attachSlowIterator(conn, tableName);
    CountDownLatch latch = new CountDownLatch(NUM_SCANNERS);
    for (int scannerIndex = 0; scannerIndex < NUM_SCANNERS; scannerIndex++) {
        ScannerThread st = new ScannerThread(conn, scannerIndex, tableName, latch);
        pool.submit(st);
    }
    // wait for scanners to report a result.
    while (testInProgress.get()) {
        if (resultsByWorker.size() < NUM_SCANNERS) {
            log.trace("Results reported {}", resultsByWorker.size());
            sleepUninterruptibly(750, TimeUnit.MILLISECONDS);
        } else {
            // each worker has reported at least one result.
            testInProgress.set(false);
            log.debug("Final result count {}", resultsByWorker.size());
            // delay to allow scanners to react to end of test and cleanly close.
            sleepUninterruptibly(1, TimeUnit.SECONDS);
        }
    }
    // all scanner have reported at least 1 result, so check for unique scan ids.
    Set<Long> scanIds = new HashSet<>();
    List<String> tservers = conn.instanceOperations().getTabletServers();
    log.debug("tablet servers {}", tservers.toString());
    for (String tserver : tservers) {
        List<ActiveScan> activeScans = null;
        for (int i = 0; i < 10; i++) {
            try {
                activeScans = conn.instanceOperations().getActiveScans(tserver);
                break;
            } catch (AccumuloException e) {
                if (e.getCause() instanceof TableNotFoundException) {
                    log.debug("Got TableNotFoundException, will retry");
                    Thread.sleep(200);
                    continue;
                }
                throw e;
            }
        }
        assertNotNull("Repeatedly got exception trying to active scans", activeScans);
        log.debug("TServer {} has {} active scans", tserver, activeScans.size());
        for (ActiveScan scan : activeScans) {
            log.debug("Tserver {} scan id {}", tserver, scan.getScanid());
            scanIds.add(scan.getScanid());
        }
    }
    assertTrue("Expected at least " + NUM_SCANNERS + " scanIds, but saw " + scanIds.size(), NUM_SCANNERS <= scanIds.size());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ActiveScan(org.apache.accumulo.core.client.admin.ActiveScan) CountDownLatch(java.util.concurrent.CountDownLatch) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 14 with TableNotFoundException

use of org.apache.accumulo.core.client.TableNotFoundException in project accumulo by apache.

the class ContinuousIngest method main.

public static void main(String[] args) throws Exception {
    ContinuousOpts opts = new ContinuousOpts();
    BatchWriterOpts bwOpts = new BatchWriterOpts();
    ClientOnDefaultTable clientOpts = new ClientOnDefaultTable("ci");
    clientOpts.parseArgs(ContinuousIngest.class.getName(), args, bwOpts, opts);
    initVisibilities(opts);
    if (opts.min < 0 || opts.max < 0 || opts.max <= opts.min) {
        throw new IllegalArgumentException("bad min and max");
    }
    Connector conn = clientOpts.getConnector();
    if (!conn.tableOperations().exists(clientOpts.getTableName())) {
        throw new TableNotFoundException(null, clientOpts.getTableName(), "Consult the README and create the table before starting ingest.");
    }
    BatchWriter bw = conn.createBatchWriter(clientOpts.getTableName(), bwOpts.getBatchWriterConfig());
    bw = Trace.wrapAll(bw, new CountSampler(1024));
    Random r = new Random();
    byte[] ingestInstanceId = UUID.randomUUID().toString().getBytes(UTF_8);
    System.out.printf("UUID %d %s%n", System.currentTimeMillis(), new String(ingestInstanceId, UTF_8));
    long count = 0;
    final int flushInterval = 1000000;
    final int maxDepth = 25;
    // always want to point back to flushed data. This way the previous item should
    // always exist in accumulo when verifying data. To do this make insert N point
    // back to the row from insert (N - flushInterval). The array below is used to keep
    // track of this.
    long[] prevRows = new long[flushInterval];
    long[] firstRows = new long[flushInterval];
    int[] firstColFams = new int[flushInterval];
    int[] firstColQuals = new int[flushInterval];
    long lastFlushTime = System.currentTimeMillis();
    out: while (true) {
        // generate first set of nodes
        ColumnVisibility cv = getVisibility(r);
        for (int index = 0; index < flushInterval; index++) {
            long rowLong = genLong(opts.min, opts.max, r);
            prevRows[index] = rowLong;
            firstRows[index] = rowLong;
            int cf = r.nextInt(opts.maxColF);
            int cq = r.nextInt(opts.maxColQ);
            firstColFams[index] = cf;
            firstColQuals[index] = cq;
            Mutation m = genMutation(rowLong, cf, cq, cv, ingestInstanceId, count, null, r, opts.checksum);
            count++;
            bw.addMutation(m);
        }
        lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
        if (count >= opts.num)
            break out;
        // generate subsequent sets of nodes that link to previous set of nodes
        for (int depth = 1; depth < maxDepth; depth++) {
            for (int index = 0; index < flushInterval; index++) {
                long rowLong = genLong(opts.min, opts.max, r);
                byte[] prevRow = genRow(prevRows[index]);
                prevRows[index] = rowLong;
                Mutation m = genMutation(rowLong, r.nextInt(opts.maxColF), r.nextInt(opts.maxColQ), cv, ingestInstanceId, count, prevRow, r, opts.checksum);
                count++;
                bw.addMutation(m);
            }
            lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
            if (count >= opts.num)
                break out;
        }
        // point to something
        for (int index = 0; index < flushInterval - 1; index++) {
            Mutation m = genMutation(firstRows[index], firstColFams[index], firstColQuals[index], cv, ingestInstanceId, count, genRow(prevRows[index + 1]), r, opts.checksum);
            count++;
            bw.addMutation(m);
        }
        lastFlushTime = flush(bw, count, flushInterval, lastFlushTime);
        if (count >= opts.num)
            break out;
    }
    bw.close();
    clientOpts.stopTracing();
}
Also used : Connector(org.apache.accumulo.core.client.Connector) ClientOnDefaultTable(org.apache.accumulo.core.cli.ClientOnDefaultTable) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) CountSampler(org.apache.accumulo.core.trace.CountSampler) Random(java.util.Random) BatchWriterOpts(org.apache.accumulo.core.cli.BatchWriterOpts) BatchWriter(org.apache.accumulo.core.client.BatchWriter) ColumnVisibility(org.apache.accumulo.core.security.ColumnVisibility) Mutation(org.apache.accumulo.core.data.Mutation)

Example 15 with TableNotFoundException

use of org.apache.accumulo.core.client.TableNotFoundException in project incubator-rya by apache.

the class AccumuloIndexSetProvider method getIndices.

@Override
protected List<ExternalTupleSet> getIndices() throws PcjIndexSetException {
    requireNonNull(conf);
    try {
        final String tablePrefix = requireNonNull(conf.get(RdfCloudTripleStoreConfiguration.CONF_TBL_PREFIX));
        final Connector conn = requireNonNull(ConfigUtils.getConnector(conf));
        List<String> tables = null;
        if (conf instanceof RdfCloudTripleStoreConfiguration) {
            tables = ((RdfCloudTripleStoreConfiguration) conf).getPcjTables();
        }
        // this maps associates pcj table name with pcj sparql query
        final Map<String, String> indexTables = Maps.newLinkedHashMap();
        try (final PrecomputedJoinStorage storage = new AccumuloPcjStorage(conn, tablePrefix)) {
            final PcjTableNameFactory pcjFactory = new PcjTableNameFactory();
            final boolean tablesProvided = tables != null && !tables.isEmpty();
            if (tablesProvided) {
                // if tables provided, associate table name with sparql
                for (final String table : tables) {
                    indexTables.put(table, storage.getPcjMetadata(pcjFactory.getPcjId(table)).getSparql());
                }
            } else if (hasRyaDetails(tablePrefix, conn)) {
                // If this is a newer install of Rya, and it has PCJ Details, then
                // use those.
                final List<String> ids = storage.listPcjs();
                for (final String id : ids) {
                    indexTables.put(pcjFactory.makeTableName(tablePrefix, id), storage.getPcjMetadata(id).getSparql());
                }
            } else {
                // Otherwise figure it out by scanning tables.
                final PcjTables pcjTables = new PcjTables();
                for (final String table : conn.tableOperations().list()) {
                    if (table.startsWith(tablePrefix + "INDEX")) {
                        indexTables.put(table, pcjTables.getPcjMetadata(conn, table).getSparql());
                    }
                }
            }
        }
        // use table name sparql map (indexTables) to create {@link
        // AccumuloIndexSet}
        final List<ExternalTupleSet> index = Lists.newArrayList();
        if (indexTables.isEmpty()) {
            log.info("No Index found");
        } else {
            for (final String table : indexTables.keySet()) {
                final String indexSparqlString = indexTables.get(table);
                index.add(new AccumuloIndexSet(indexSparqlString, conf, table));
            }
        }
        return index;
    } catch (final PCJStorageException | AccumuloException | AccumuloSecurityException | MalformedQueryException | SailException | QueryEvaluationException | TableNotFoundException e) {
        throw new PcjIndexSetException("Failed to retrieve the indicies.", e);
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) AccumuloException(org.apache.accumulo.core.client.AccumuloException) AccumuloPcjStorage(org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPcjStorage) AccumuloIndexSet(org.apache.rya.indexing.external.tupleSet.AccumuloIndexSet) PcjTableNameFactory(org.apache.rya.indexing.pcj.storage.accumulo.PcjTableNameFactory) SailException(org.openrdf.sail.SailException) ExternalTupleSet(org.apache.rya.indexing.external.tupleSet.ExternalTupleSet) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) QueryEvaluationException(org.openrdf.query.QueryEvaluationException) PrecomputedJoinStorage(org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage) MalformedQueryException(org.openrdf.query.MalformedQueryException) List(java.util.List) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) PcjTables(org.apache.rya.indexing.pcj.storage.accumulo.PcjTables) PCJStorageException(org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.PCJStorageException) RdfCloudTripleStoreConfiguration(org.apache.rya.api.RdfCloudTripleStoreConfiguration)

Aggregations

TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)160 AccumuloException (org.apache.accumulo.core.client.AccumuloException)100 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)89 Text (org.apache.hadoop.io.Text)51 Value (org.apache.accumulo.core.data.Value)46 Key (org.apache.accumulo.core.data.Key)42 Scanner (org.apache.accumulo.core.client.Scanner)36 IOException (java.io.IOException)34 BatchWriter (org.apache.accumulo.core.client.BatchWriter)31 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)31 Connector (org.apache.accumulo.core.client.Connector)30 Mutation (org.apache.accumulo.core.data.Mutation)29 Range (org.apache.accumulo.core.data.Range)28 Authorizations (org.apache.accumulo.core.security.Authorizations)26 ArrayList (java.util.ArrayList)25 Entry (java.util.Map.Entry)25 TableExistsException (org.apache.accumulo.core.client.TableExistsException)25 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)23 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)23 HashMap (java.util.HashMap)19