Search in sources :

Example 41 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class UnorderedWorkAssignerReplicationIT method dataWasReplicatedToThePeer.

@Test
public void dataWasReplicatedToThePeer() throws Exception {
    MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD);
    peerCfg.setNumTservers(1);
    peerCfg.setInstanceName("peer");
    updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
    peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
    MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);
    peerCluster.start();
    try {
        final Connector connMaster = getConnector();
        final Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
        ReplicationTable.setOnline(connMaster);
        String peerUserName = "peer", peerPassword = "foo";
        String peerClusterName = "peer";
        connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
        connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
        connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
        // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
        connMaster.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration(peerCluster.getInstanceName(), peerCluster.getZooKeepers())));
        final String masterTable = "master", peerTable = "peer";
        connMaster.tableOperations().create(masterTable);
        String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
        Assert.assertNotNull(masterTableId);
        connPeer.tableOperations().create(peerTable);
        String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
        Assert.assertNotNull(peerTableId);
        connPeer.securityOperations().grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);
        // Replicate this table to the peerClusterName in a table with the peerTableId table id
        connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
        connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);
        // Wait for zookeeper updates (configuration) to propagate
        sleepUninterruptibly(3, TimeUnit.SECONDS);
        // Write some data to table1
        BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
        for (int rows = 0; rows < 5000; rows++) {
            Mutation m = new Mutation(Integer.toString(rows));
            for (int cols = 0; cols < 100; cols++) {
                String value = Integer.toString(cols);
                m.put(value, "", value);
            }
            bw.addMutation(m);
        }
        bw.close();
        log.info("Wrote all data to master cluster");
        final Set<String> filesNeedingReplication = connMaster.replicationOperations().referencedFiles(masterTable);
        for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
            cluster.killProcess(ServerType.TABLET_SERVER, proc);
        }
        cluster.exec(TabletServer.class);
        log.info("TabletServer restarted");
        Iterators.size(ReplicationTable.getScanner(connMaster).iterator());
        log.info("TabletServer is online");
        log.info("");
        log.info("Fetching metadata records:");
        for (Entry<Key, Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
            if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
                log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
            } else {
                log.info("{} {}", kv.getKey().toStringNoTruncate(), kv.getValue());
            }
        }
        log.info("");
        log.info("Fetching replication records:");
        for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) {
            log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
        }
        Future<Boolean> future = executor.submit(new Callable<Boolean>() {

            @Override
            public Boolean call() throws Exception {
                connMaster.replicationOperations().drain(masterTable, filesNeedingReplication);
                log.info("Drain completed");
                return true;
            }
        });
        long timeoutSeconds = timeoutFactor * 30;
        try {
            future.get(timeoutSeconds, TimeUnit.SECONDS);
        } catch (TimeoutException e) {
            future.cancel(true);
            Assert.fail("Drain did not finish within " + timeoutSeconds + " seconds");
        }
        log.info("drain completed");
        log.info("");
        log.info("Fetching metadata records:");
        for (Entry<Key, Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
            if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
                log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
            } else {
                log.info("{} {}", kv.getKey().toStringNoTruncate(), kv.getValue());
            }
        }
        log.info("");
        log.info("Fetching replication records:");
        for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) {
            log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
        }
        try (Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY);
            Scanner peer = connPeer.createScanner(peerTable, Authorizations.EMPTY)) {
            Iterator<Entry<Key, Value>> masterIter = master.iterator(), peerIter = peer.iterator();
            Entry<Key, Value> masterEntry = null, peerEntry = null;
            while (masterIter.hasNext() && peerIter.hasNext()) {
                masterEntry = masterIter.next();
                peerEntry = peerIter.next();
                Assert.assertEquals(masterEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0, masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
                Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
            }
            log.info("Last master entry: {}", masterEntry);
            log.info("Last peer entry: {}", peerEntry);
            Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
            Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
        }
    } finally {
        peerCluster.stop();
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) ProcessReference(org.apache.accumulo.minicluster.impl.ProcessReference) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) Entry(java.util.Map.Entry) AccumuloReplicaSystem(org.apache.accumulo.tserver.replication.AccumuloReplicaSystem) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) TimeoutException(java.util.concurrent.TimeoutException) MiniAccumuloConfigImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl) TimeoutException(java.util.concurrent.TimeoutException) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey) Test(org.junit.Test)

Example 42 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class ProxyServer method getWriter.

BatchWriterPlusProblem getWriter(ByteBuffer login, String tableName, WriterOptions opts) throws Exception {
    BatchWriterConfig cfg = new BatchWriterConfig();
    if (opts != null) {
        if (opts.maxMemory != 0)
            cfg.setMaxMemory(opts.maxMemory);
        if (opts.threads != 0)
            cfg.setMaxWriteThreads(opts.threads);
        if (opts.timeoutMs != 0)
            cfg.setTimeout(opts.timeoutMs, TimeUnit.MILLISECONDS);
        if (opts.latencyMs != 0)
            cfg.setMaxLatency(opts.latencyMs, TimeUnit.MILLISECONDS);
        if (opts.isSetDurability() && opts.durability != null) {
            cfg.setDurability(getDurability(opts.getDurability()));
        }
    }
    BatchWriterPlusProblem result = new BatchWriterPlusProblem();
    result.writer = getConnector(login).createBatchWriter(tableName, cfg);
    return result;
}
Also used : BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig)

Example 43 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class PluginIT method checkIterator.

@Test
public void checkIterator() throws IOException, AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
    String tableName = "checkIterator";
    connector.tableOperations().create(tableName);
    BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("ROW1");
    m.put("allowed", "CQ1", "V1");
    m.put("denied", "CQ2", "V2");
    m.put("allowed", "CQ3", "V3");
    bw.addMutation(m);
    m = new Mutation("ROW2");
    m.put("allowed", "CQ1", "V1");
    m.put("denied", "CQ2", "V2");
    m.put("allowed", "CQ3", "V3");
    bw.addMutation(m);
    bw.close();
    // check filter
    Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
    IteratorSetting is = new IteratorSetting(5, CustomFilter.class);
    scanner.addScanIterator(is);
    int count = 0;
    for (Entry<Key, Value> entry : scanner) {
        count++;
        assertEquals("allowed", entry.getKey().getColumnFamily().toString());
    }
    assertEquals(4, count);
    // check filter negated
    scanner.clearScanIterators();
    CustomFilter.setNegate(is, true);
    scanner.addScanIterator(is);
    count = 0;
    for (Entry<Key, Value> entry : scanner) {
        count++;
        assertEquals("denied", entry.getKey().getColumnFamily().toString());
    }
    assertEquals(2, count);
    assertTrue(new File("target/accumulo-maven-plugin/" + instance.getInstanceName() + "/testCheckIteratorPassed").createNewFile());
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) File(java.io.File) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 44 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class CloseWriteAheadLogReferences method updateReplicationEntries.

/**
 * Given the set of WALs which have references in the metadata table, close any status messages with reference that WAL.
 *
 * @param conn
 *          Connector
 * @param closedWals
 *          {@link Set} of paths to WALs that marked as closed or unreferenced in zookeeper
 */
protected long updateReplicationEntries(Connector conn, Set<String> closedWals) {
    BatchScanner bs = null;
    BatchWriter bw = null;
    long recordsClosed = 0;
    try {
        bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
        bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
        bs.setRanges(Collections.singleton(Range.prefix(ReplicationSection.getRowPrefix())));
        bs.fetchColumnFamily(ReplicationSection.COLF);
        Text replFileText = new Text();
        for (Entry<Key, Value> entry : bs) {
            Status status;
            try {
                status = Status.parseFrom(entry.getValue().get());
            } catch (InvalidProtocolBufferException e) {
                log.error("Could not parse Status protobuf for {}", entry.getKey(), e);
                continue;
            }
            // Ignore things that aren't completely replicated as we can't delete those anyways
            MetadataSchema.ReplicationSection.getFile(entry.getKey(), replFileText);
            String replFile = replFileText.toString();
            boolean isClosed = closedWals.contains(replFile);
            // metadata doesn't have a reference to the given WAL
            if (!status.getClosed() && !replFile.endsWith(RFILE_SUFFIX) && isClosed) {
                try {
                    closeWal(bw, entry.getKey());
                    recordsClosed++;
                } catch (MutationsRejectedException e) {
                    log.error("Failed to submit delete mutation for {}", entry.getKey());
                    continue;
                }
            }
        }
    } catch (TableNotFoundException e) {
        log.error("Replication table was deleted", e);
    } finally {
        if (null != bs) {
            bs.close();
        }
        if (null != bw) {
            try {
                bw.close();
            } catch (MutationsRejectedException e) {
                log.error("Failed to write delete mutations for replication table", e);
            }
        }
    }
    return recordsClosed;
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) BatchScanner(org.apache.accumulo.core.client.BatchScanner) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) Text(org.apache.hadoop.io.Text) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Key(org.apache.accumulo.core.data.Key) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 45 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class MockConnectorTest method testAggregation.

@Test
public void testAggregation() throws Exception {
    MockInstance mockInstance = new MockInstance();
    Connector c = mockInstance.getConnector("root", new PasswordToken(""));
    String table = "perDayCounts";
    c.tableOperations().create(table);
    IteratorSetting is = new IteratorSetting(10, "String Summation", SummingCombiner.class);
    Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("day")));
    SummingCombiner.setEncodingType(is, SummingCombiner.Type.STRING);
    c.tableOperations().attachIterator(table, is);
    String[][] keys = { { "foo", "day", "20080101" }, { "foo", "day", "20080101" }, { "foo", "day", "20080103" }, { "bar", "day", "20080101" }, { "bar", "day", "20080101" } };
    BatchWriter bw = c.createBatchWriter("perDayCounts", new BatchWriterConfig());
    for (String[] elt : keys) {
        Mutation m = new Mutation(new Text(elt[0]));
        m.put(new Text(elt[1]), new Text(elt[2]), new Value("1".getBytes()));
        bw.addMutation(m);
    }
    bw.close();
    Scanner s = c.createScanner("perDayCounts", Authorizations.EMPTY);
    Iterator<Entry<Key, Value>> iterator = s.iterator();
    assertTrue(iterator.hasNext());
    checkEntry(iterator.next(), "bar", "day", "20080101", "2");
    assertTrue(iterator.hasNext());
    checkEntry(iterator.next(), "foo", "day", "20080101", "2");
    assertTrue(iterator.hasNext());
    checkEntry(iterator.next(), "foo", "day", "20080103", "1");
    assertFalse(iterator.hasNext());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) BatchScanner(org.apache.accumulo.core.client.BatchScanner) Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) Entry(java.util.Map.Entry) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) MultiTableBatchWriter(org.apache.accumulo.core.client.MultiTableBatchWriter) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Aggregations

BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)282 BatchWriter (org.apache.accumulo.core.client.BatchWriter)246 Mutation (org.apache.accumulo.core.data.Mutation)224 Test (org.junit.Test)171 Value (org.apache.accumulo.core.data.Value)166 Connector (org.apache.accumulo.core.client.Connector)142 Scanner (org.apache.accumulo.core.client.Scanner)121 Key (org.apache.accumulo.core.data.Key)121 Text (org.apache.hadoop.io.Text)119 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)50 Entry (java.util.Map.Entry)42 Range (org.apache.accumulo.core.data.Range)42 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)41 BatchScanner (org.apache.accumulo.core.client.BatchScanner)36 Authorizations (org.apache.accumulo.core.security.Authorizations)36 AccumuloException (org.apache.accumulo.core.client.AccumuloException)35 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)32 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)29 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)29 HashMap (java.util.HashMap)24