Search in sources :

Example 46 with Mutation

use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.

the class BatchWriterIT method test.

@Test
public void test() throws Exception {
    // call the batchwriter with buffer of size zero
    String table = getUniqueNames(1)[0];
    Connector c = getConnector();
    c.tableOperations().create(table);
    BatchWriterConfig config = new BatchWriterConfig();
    config.setMaxMemory(0);
    BatchWriter writer = c.createBatchWriter(table, config);
    Mutation m = new Mutation("row");
    m.put("cf", "cq", new Value("value".getBytes()));
    writer.addMutation(m);
    writer.close();
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Test(org.junit.Test)

Example 47 with Mutation

use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.

the class BatchWriterInTabletServerIT method test.

private void test(String t1, String t2, Connector c, IteratorSetting itset, int numEntriesToWritePerEntry) throws Exception {
    // Write an entry to t1
    c.tableOperations().create(t1);
    Key k = new Key(new Text("row"), new Text("cf"), new Text("cq"));
    Value v = new Value("1".getBytes());
    {
        BatchWriterConfig config = new BatchWriterConfig();
        config.setMaxMemory(0);
        BatchWriter writer = c.createBatchWriter(t1, config);
        Mutation m = new Mutation(k.getRow());
        m.put(k.getColumnFamily(), k.getColumnQualifier(), v);
        writer.addMutation(m);
        writer.close();
    }
    // Create t2 with a combiner to count entries written to it
    c.tableOperations().create(t2);
    IteratorSetting summer = new IteratorSetting(2, "summer", SummingCombiner.class);
    LongCombiner.setEncodingType(summer, LongCombiner.Type.STRING);
    LongCombiner.setCombineAllColumns(summer, true);
    c.tableOperations().attachIterator(t2, summer);
    Map.Entry<Key, Value> actual;
    try (Scanner scanner = c.createScanner(t1, Authorizations.EMPTY)) {
        // Scan t1 with an iterator that writes to table t2
        scanner.addScanIterator(itset);
        actual = Iterators.getOnlyElement(scanner.iterator());
        Assert.assertTrue(actual.getKey().equals(k, PartialKey.ROW_COLFAM_COLQUAL));
        Assert.assertEquals(BatchWriterIterator.SUCCESS_VALUE, actual.getValue());
    }
    try (Scanner scanner = c.createScanner(t2, Authorizations.EMPTY)) {
        // ensure entries correctly wrote to table t2
        actual = Iterators.getOnlyElement(scanner.iterator());
        log.debug("t2 entry is " + actual.getKey().toStringNoTime() + " -> " + actual.getValue());
        Assert.assertTrue(actual.getKey().equals(k, PartialKey.ROW_COLFAM_COLQUAL));
        Assert.assertEquals(numEntriesToWritePerEntry, Integer.parseInt(actual.getValue().toString()));
    }
    c.tableOperations().delete(t1);
    c.tableOperations().delete(t2);
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Map(java.util.Map) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 48 with Mutation

use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.

the class BatchWriterFlushIT method runLatencyTest.

private void runLatencyTest(String tableName) throws Exception {
    // should automatically flush after 2 seconds
    try (BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig().setMaxLatency(1000, TimeUnit.MILLISECONDS));
        Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY)) {
        Mutation m = new Mutation(new Text(String.format("r_%10d", 1)));
        m.put(new Text("cf"), new Text("cq"), new Value("1".getBytes(UTF_8)));
        bw.addMutation(m);
        sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
        int count = Iterators.size(scanner.iterator());
        if (count != 0) {
            throw new Exception("Flushed too soon");
        }
        sleepUninterruptibly(1500, TimeUnit.MILLISECONDS);
        count = Iterators.size(scanner.iterator());
        if (count != 1) {
            throw new Exception("Did not flush");
        }
    }
}
Also used : Scanner(org.apache.accumulo.core.client.Scanner) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Text(org.apache.hadoop.io.Text) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException) AccumuloException(org.apache.accumulo.core.client.AccumuloException)

Example 49 with Mutation

use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.

the class MultiInstanceReplicationIT method dataWasReplicatedToThePeer.

@Test(timeout = 10 * 60 * 1000)
public void dataWasReplicatedToThePeer() throws Exception {
    MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD);
    peerCfg.setNumTservers(1);
    peerCfg.setInstanceName("peer");
    peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
    updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
    MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);
    peerCluster.start();
    try {
        final Connector connMaster = getConnector();
        final Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
        ReplicationTable.setOnline(connMaster);
        String peerUserName = "peer", peerPassword = "foo";
        String peerClusterName = "peer";
        connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
        connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
        connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
        // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
        connMaster.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration(peerCluster.getInstanceName(), peerCluster.getZooKeepers())));
        final String masterTable = "master", peerTable = "peer";
        connMaster.tableOperations().create(masterTable);
        String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
        Assert.assertNotNull(masterTableId);
        connPeer.tableOperations().create(peerTable);
        String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
        Assert.assertNotNull(peerTableId);
        connPeer.securityOperations().grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);
        // Replicate this table to the peerClusterName in a table with the peerTableId table id
        connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
        connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);
        // Write some data to table1
        BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
        for (int rows = 0; rows < 5000; rows++) {
            Mutation m = new Mutation(Integer.toString(rows));
            for (int cols = 0; cols < 100; cols++) {
                String value = Integer.toString(cols);
                m.put(value, "", value);
            }
            bw.addMutation(m);
        }
        bw.close();
        log.info("Wrote all data to master cluster");
        final Set<String> filesNeedingReplication = connMaster.replicationOperations().referencedFiles(masterTable);
        log.info("Files to replicate: " + filesNeedingReplication);
        for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
            cluster.killProcess(ServerType.TABLET_SERVER, proc);
        }
        cluster.exec(TabletServer.class);
        log.info("TabletServer restarted");
        Iterators.size(ReplicationTable.getScanner(connMaster).iterator());
        log.info("TabletServer is online");
        while (!ReplicationTable.isOnline(connMaster)) {
            log.info("Replication table still offline, waiting");
            Thread.sleep(5000);
        }
        log.info("");
        log.info("Fetching metadata records:");
        for (Entry<Key, Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
            if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
                log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
            } else {
                log.info("{} {}", kv.getKey().toStringNoTruncate(), kv.getValue());
            }
        }
        log.info("");
        log.info("Fetching replication records:");
        for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) {
            log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
        }
        Future<Boolean> future = executor.submit(new Callable<Boolean>() {

            @Override
            public Boolean call() throws Exception {
                long then = System.currentTimeMillis();
                connMaster.replicationOperations().drain(masterTable, filesNeedingReplication);
                long now = System.currentTimeMillis();
                log.info("Drain completed in " + (now - then) + "ms");
                return true;
            }
        });
        try {
            future.get(60, TimeUnit.SECONDS);
        } catch (TimeoutException e) {
            future.cancel(true);
            Assert.fail("Drain did not finish within 60 seconds");
        } finally {
            executor.shutdownNow();
        }
        log.info("drain completed");
        log.info("");
        log.info("Fetching metadata records:");
        for (Entry<Key, Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
            if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
                log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
            } else {
                log.info("{} {}", kv.getKey().toStringNoTruncate(), kv.getValue());
            }
        }
        log.info("");
        log.info("Fetching replication records:");
        for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) {
            log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
        }
        try (Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY);
            Scanner peer = connPeer.createScanner(peerTable, Authorizations.EMPTY)) {
            Iterator<Entry<Key, Value>> masterIter = master.iterator(), peerIter = peer.iterator();
            Entry<Key, Value> masterEntry = null, peerEntry = null;
            while (masterIter.hasNext() && peerIter.hasNext()) {
                masterEntry = masterIter.next();
                peerEntry = peerIter.next();
                Assert.assertEquals(masterEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0, masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
                Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
            }
            log.info("Last master entry: {}", masterEntry);
            log.info("Last peer entry: {}", peerEntry);
            Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
            Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
        }
    } finally {
        peerCluster.stop();
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) ProcessReference(org.apache.accumulo.minicluster.impl.ProcessReference) MiniAccumuloConfigImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl) TimeoutException(java.util.concurrent.TimeoutException) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) Entry(java.util.Map.Entry) Value(org.apache.accumulo.core.data.Value) AccumuloReplicaSystem(org.apache.accumulo.tserver.replication.AccumuloReplicaSystem) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 50 with Mutation

use of org.apache.accumulo.core.data.Mutation in project accumulo by apache.

the class MultiInstanceReplicationIT method dataReplicatedToCorrectTableWithoutDrain.

@Test
public void dataReplicatedToCorrectTableWithoutDrain() throws Exception {
    MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD);
    peerCfg.setNumTservers(1);
    peerCfg.setInstanceName("peer");
    peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
    updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
    MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg);
    peer1Cluster.start();
    try {
        Connector connMaster = getConnector();
        Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
        String peerClusterName = "peer";
        String peerUserName = "repl";
        String peerPassword = "passwd";
        // Create a user on the peer for replication to use
        connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
        // Configure the credentials we should use to authenticate ourselves to the peer for replication
        connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
        connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
        // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
        connMaster.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration(peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers())));
        String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2";
        connMaster.tableOperations().create(masterTable1);
        String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1);
        Assert.assertNotNull(masterTableId1);
        connMaster.tableOperations().create(masterTable2);
        String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2);
        Assert.assertNotNull(masterTableId2);
        connPeer.tableOperations().create(peerTable1);
        String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1);
        Assert.assertNotNull(peerTableId1);
        connPeer.tableOperations().create(peerTable2);
        String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2);
        Assert.assertNotNull(peerTableId2);
        // Give our replication user the ability to write to the tables
        connPeer.securityOperations().grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE);
        connPeer.securityOperations().grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE);
        // Replicate this table to the peerClusterName in a table with the peerTableId table id
        connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true");
        connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1);
        connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true");
        connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2);
        // Write some data to table1
        BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig());
        for (int rows = 0; rows < 2500; rows++) {
            Mutation m = new Mutation(masterTable1 + rows);
            for (int cols = 0; cols < 100; cols++) {
                String value = Integer.toString(cols);
                m.put(value, "", value);
            }
            bw.addMutation(m);
        }
        bw.close();
        // Write some data to table2
        bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig());
        for (int rows = 0; rows < 2500; rows++) {
            Mutation m = new Mutation(masterTable2 + rows);
            for (int cols = 0; cols < 100; cols++) {
                String value = Integer.toString(cols);
                m.put(value, "", value);
            }
            bw.addMutation(m);
        }
        bw.close();
        log.info("Wrote all data to master cluster");
        for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
            cluster.killProcess(ServerType.TABLET_SERVER, proc);
        }
        cluster.exec(TabletServer.class);
        while (!ReplicationTable.isOnline(connMaster)) {
            log.info("Replication table still offline, waiting");
            Thread.sleep(5000);
        }
        // Wait until we fully replicated something
        boolean fullyReplicated = false;
        for (int i = 0; i < 10 && !fullyReplicated; i++) {
            sleepUninterruptibly(2, TimeUnit.SECONDS);
            try (Scanner s = ReplicationTable.getScanner(connMaster)) {
                WorkSection.limit(s);
                for (Entry<Key, Value> entry : s) {
                    Status status = Status.parseFrom(entry.getValue().get());
                    if (StatusUtil.isFullyReplicated(status)) {
                        fullyReplicated |= true;
                    }
                }
            }
        }
        Assert.assertNotEquals(0, fullyReplicated);
        // We have to wait for the master to assign the replication work, a local tserver to process it, and then the remote tserver to replay it
        // Be cautious in how quickly we assert that the data is present on the peer
        long countTable = 0l;
        for (int i = 0; i < 10; i++) {
            for (Entry<Key, Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) {
                countTable++;
                Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable1));
            }
            log.info("Found {} records in {}", countTable, peerTable1);
            if (0l == countTable) {
                Thread.sleep(5000);
            } else {
                break;
            }
        }
        Assert.assertTrue("Found no records in " + peerTable1 + " in the peer cluster", countTable > 0);
        // Be cautious in how quickly we assert that the data is present on the peer
        for (int i = 0; i < 10; i++) {
            countTable = 0l;
            for (Entry<Key, Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) {
                countTable++;
                Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable2));
            }
            log.info("Found {} records in {}", countTable, peerTable2);
            if (0l == countTable) {
                Thread.sleep(5000);
            } else {
                break;
            }
        }
        Assert.assertTrue("Found no records in " + peerTable2 + " in the peer cluster", countTable > 0);
    } finally {
        peer1Cluster.stop();
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) ProcessReference(org.apache.accumulo.minicluster.impl.ProcessReference) MiniAccumuloConfigImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) Value(org.apache.accumulo.core.data.Value) AccumuloReplicaSystem(org.apache.accumulo.tserver.replication.AccumuloReplicaSystem) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) MiniAccumuloClusterImpl(org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey) Test(org.junit.Test)

Aggregations

Mutation (org.apache.accumulo.core.data.Mutation)601 BatchWriter (org.apache.accumulo.core.client.BatchWriter)358 Value (org.apache.accumulo.core.data.Value)341 Test (org.junit.Test)311 Text (org.apache.hadoop.io.Text)303 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)223 Key (org.apache.accumulo.core.data.Key)197 Scanner (org.apache.accumulo.core.client.Scanner)161 Connector (org.apache.accumulo.core.client.Connector)150 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)77 Authorizations (org.apache.accumulo.core.security.Authorizations)70 Range (org.apache.accumulo.core.data.Range)61 ArrayList (java.util.ArrayList)60 ColumnVisibility (org.apache.accumulo.core.security.ColumnVisibility)59 Entry (java.util.Map.Entry)57 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)55 Map (java.util.Map)53 HashMap (java.util.HashMap)44 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)43 BatchScanner (org.apache.accumulo.core.client.BatchScanner)41