Search in sources :

Example 16 with ServerMutation

use of org.apache.accumulo.server.data.ServerMutation in project accumulo by apache.

the class TabletTimeTest method testSetSystemTimes.

@Test
public void testSetSystemTimes() {
    ServerMutation m = createMock(ServerMutation.class);
    long lastCommitTime = 1234L;
    m.setSystemTimestamp(lastCommitTime);
    replay(m);
    mtime.setSystemTimes(m, lastCommitTime);
    verify(m);
}
Also used : ServerMutation(org.apache.accumulo.server.data.ServerMutation) Test(org.junit.Test)

Example 17 with ServerMutation

use of org.apache.accumulo.server.data.ServerMutation in project accumulo by apache.

the class BatchWriterReplicationReplayer method replicateLog.

@Override
public long replicateLog(ClientContext context, String tableName, WalEdits data) throws RemoteReplicationException, AccumuloException, AccumuloSecurityException {
    final LogFileKey key = new LogFileKey();
    final LogFileValue value = new LogFileValue();
    final long memoryInBytes = context.getConfiguration().getAsBytes(Property.TSERV_REPLICATION_BW_REPLAYER_MEMORY);
    BatchWriter bw = null;
    long mutationsApplied = 0l;
    try {
        for (ByteBuffer edit : data.getEdits()) {
            DataInputStream dis = new DataInputStream(ByteBufferUtil.toByteArrayInputStream(edit));
            try {
                key.readFields(dis);
                // TODO this is brittle because AccumuloReplicaSystem isn't actually calling LogFileValue.write, but we're expecting
                // what we receive to be readable by the LogFileValue.
                value.readFields(dis);
            } catch (IOException e) {
                log.error("Could not deserialize edit from stream", e);
                throw new RemoteReplicationException(RemoteReplicationErrorCode.COULD_NOT_DESERIALIZE, "Could not deserialize edit from stream");
            }
            // Create the batchScanner if we don't already have one.
            if (null == bw) {
                BatchWriterConfig bwConfig = new BatchWriterConfig();
                bwConfig.setMaxMemory(memoryInBytes);
                try {
                    bw = context.getConnector().createBatchWriter(tableName, bwConfig);
                } catch (TableNotFoundException e) {
                    throw new RemoteReplicationException(RemoteReplicationErrorCode.TABLE_DOES_NOT_EXIST, "Table " + tableName + " does not exist");
                }
            }
            log.info("Applying {} mutations to table {} as part of batch", value.mutations.size(), tableName);
            // If we got a ServerMutation, we have to make sure that we preserve the systemTimestamp otherwise
            // the local system will assign a new timestamp.
            List<Mutation> mutationsCopy = new ArrayList<>(value.mutations.size());
            long mutationsCopied = 0l;
            for (Mutation orig : value.mutations) {
                if (orig instanceof ServerMutation) {
                    mutationsCopied++;
                    ServerMutation origServer = (ServerMutation) orig;
                    Mutation copy = new Mutation(orig.getRow());
                    for (ColumnUpdate update : orig.getUpdates()) {
                        long timestamp;
                        // If the update doesn't have a timestamp, pull it from the ServerMutation
                        if (!update.hasTimestamp()) {
                            timestamp = origServer.getSystemTimestamp();
                        } else {
                            timestamp = update.getTimestamp();
                        }
                        // TODO ACCUMULO-2937 cache the CVs
                        if (update.isDeleted()) {
                            copy.putDelete(update.getColumnFamily(), update.getColumnQualifier(), new ColumnVisibility(update.getColumnVisibility()), timestamp);
                        } else {
                            copy.put(update.getColumnFamily(), update.getColumnQualifier(), new ColumnVisibility(update.getColumnVisibility()), timestamp, update.getValue());
                        }
                    }
                    // We also need to preserve the replicationSource information to prevent cycles
                    Set<String> replicationSources = orig.getReplicationSources();
                    if (null != replicationSources && !replicationSources.isEmpty()) {
                        for (String replicationSource : replicationSources) {
                            copy.addReplicationSource(replicationSource);
                        }
                    }
                    mutationsCopy.add(copy);
                } else {
                    mutationsCopy.add(orig);
                }
            }
            log.debug("Copied {} mutations to ensure server-assigned timestamps are propagated", mutationsCopied);
            try {
                bw.addMutations(mutationsCopy);
            } catch (MutationsRejectedException e) {
                log.error("Could not apply mutations to {}", tableName);
                throw new RemoteReplicationException(RemoteReplicationErrorCode.COULD_NOT_APPLY, "Could not apply mutations to " + tableName);
            }
            log.debug("{} mutations added to the BatchScanner", mutationsCopy.size());
            mutationsApplied += mutationsCopy.size();
        }
    } finally {
        if (null != bw) {
            try {
                bw.close();
            } catch (MutationsRejectedException e) {
                log.error("Could not apply mutations to {}", tableName);
                throw new RemoteReplicationException(RemoteReplicationErrorCode.COULD_NOT_APPLY, "Could not apply mutations to " + tableName);
            }
        }
    }
    log.info("Applied {} mutations in total to {}", mutationsApplied, tableName);
    return mutationsApplied;
}
Also used : ColumnUpdate(org.apache.accumulo.core.data.ColumnUpdate) ArrayList(java.util.ArrayList) RemoteReplicationException(org.apache.accumulo.core.replication.thrift.RemoteReplicationException) ServerMutation(org.apache.accumulo.server.data.ServerMutation) LogFileKey(org.apache.accumulo.tserver.logger.LogFileKey) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) ByteBuffer(java.nio.ByteBuffer) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) LogFileValue(org.apache.accumulo.tserver.logger.LogFileValue) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) ServerMutation(org.apache.accumulo.server.data.ServerMutation) ColumnVisibility(org.apache.accumulo.core.security.ColumnVisibility) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 18 with ServerMutation

use of org.apache.accumulo.server.data.ServerMutation in project accumulo by apache.

the class LogFileValue method readFields.

@Override
public void readFields(DataInput in) throws IOException {
    int count = in.readInt();
    mutations = new ArrayList<>(count);
    for (int i = 0; i < count; i++) {
        ServerMutation mutation = new ServerMutation();
        mutation.readFields(in);
        mutations.add(mutation);
    }
}
Also used : ServerMutation(org.apache.accumulo.server.data.ServerMutation)

Example 19 with ServerMutation

use of org.apache.accumulo.server.data.ServerMutation in project accumulo by apache.

the class SortedLogRecoveryTest method testMultipleTabletDefinition.

@Test
public void testMultipleTabletDefinition() throws Exception {
    // test for a tablet defined multiple times in a log file
    // there was a bug where the oldest tablet id was used instead
    // of the newest
    Mutation ignored = new ServerMutation(new Text("row1"));
    ignored.put("foo", "bar", "v1");
    Mutation m = new ServerMutation(new Text("row1"));
    m.put("foo", "bar", "v1");
    KeyValue[] entries = new KeyValue[] { createKeyValue(OPEN, 0, -1, "1"), createKeyValue(DEFINE_TABLET, 1, 1, extent), createKeyValue(DEFINE_TABLET, 1, 2, extent), createKeyValue(MUTATION, 2, 2, ignored), createKeyValue(COMPACTION_START, 3, 2, "/t1/f1"), createKeyValue(MUTATION, 4, 2, m), createKeyValue(COMPACTION_FINISH, 6, 2, null) };
    Arrays.sort(entries);
    Map<String, KeyValue[]> logs = new TreeMap<>();
    logs.put("entries", entries);
    List<Mutation> mutations = recover(logs, extent);
    Assert.assertEquals(1, mutations.size());
    Assert.assertEquals(m, mutations.get(0));
}
Also used : ServerMutation(org.apache.accumulo.server.data.ServerMutation) Text(org.apache.hadoop.io.Text) Mutation(org.apache.accumulo.core.data.Mutation) ServerMutation(org.apache.accumulo.server.data.ServerMutation) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Example 20 with ServerMutation

use of org.apache.accumulo.server.data.ServerMutation in project accumulo by apache.

the class SortedLogRecoveryTest method testGetMutationsAfterCompactionStart.

@Test
public void testGetMutationsAfterCompactionStart() throws IOException {
    // Create a test log
    Mutation ignored = new ServerMutation(new Text("ignored"));
    ignored.put(cf, cq, value);
    Mutation m = new ServerMutation(new Text("row1"));
    m.put(cf, cq, value);
    Mutation m2 = new ServerMutation(new Text("row2"));
    m2.put(cf, cq, new Value("123".getBytes()));
    KeyValue[] entries = new KeyValue[] { createKeyValue(OPEN, 0, -1, "1"), createKeyValue(DEFINE_TABLET, 1, 1, extent), createKeyValue(COMPACTION_START, 3, 1, "/t1/f1"), createKeyValue(MUTATION, 2, 1, ignored), createKeyValue(MUTATION, 4, 1, m) };
    KeyValue[] entries2 = new KeyValue[] { createKeyValue(OPEN, 5, -1, "1"), createKeyValue(DEFINE_TABLET, 6, 1, extent), createKeyValue(COMPACTION_FINISH, 7, 1, null), createKeyValue(MUTATION, 8, 1, m2) };
    Map<String, KeyValue[]> logs = new TreeMap<>();
    logs.put("entries", entries);
    logs.put("entries2", entries2);
    // Recover
    List<Mutation> mutations = recover(logs, extent);
    // Verify recovered data
    Assert.assertEquals(2, mutations.size());
    Assert.assertEquals(m, mutations.get(0));
    Assert.assertEquals(m2, mutations.get(1));
}
Also used : LogFileValue(org.apache.accumulo.tserver.logger.LogFileValue) Value(org.apache.accumulo.core.data.Value) ServerMutation(org.apache.accumulo.server.data.ServerMutation) Text(org.apache.hadoop.io.Text) Mutation(org.apache.accumulo.core.data.Mutation) ServerMutation(org.apache.accumulo.server.data.ServerMutation) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Aggregations

ServerMutation (org.apache.accumulo.server.data.ServerMutation)30 Test (org.junit.Test)26 Mutation (org.apache.accumulo.core.data.Mutation)24 Text (org.apache.hadoop.io.Text)22 TreeMap (java.util.TreeMap)17 LogFileKey (org.apache.accumulo.tserver.logger.LogFileKey)7 LogFileValue (org.apache.accumulo.tserver.logger.LogFileValue)7 DataOutputStream (java.io.DataOutputStream)6 ByteArrayOutputStream (java.io.ByteArrayOutputStream)5 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)5 DataInputStream (java.io.DataInputStream)4 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)4 Value (org.apache.accumulo.core.data.Value)4 Status (org.apache.accumulo.server.replication.proto.Replication.Status)4 Path (org.apache.hadoop.fs.Path)4 ByteArrayInputStream (java.io.ByteArrayInputStream)3 HashMap (java.util.HashMap)3 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)3 ConfigurationCopy (org.apache.accumulo.core.conf.ConfigurationCopy)3 ReplicationTarget (org.apache.accumulo.core.replication.ReplicationTarget)3