Search in sources :

Example 66 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class ZooCachePropertyAccessorTest method testGetProperties_NoChildren.

@Test
public void testGetProperties_NoChildren() {
    Map<String, String> props = new java.util.HashMap<>();
    AccumuloConfiguration parent = createMock(AccumuloConfiguration.class);
    Predicate<String> filter = createMock(Predicate.class);
    parent.getProperties(props, filter);
    replay(parent);
    expect(zc.getChildren(PATH)).andReturn(null);
    replay(zc);
    a.getProperties(props, PATH, filter, parent, null);
    assertEquals(0, props.size());
}
Also used : AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Test(org.junit.Test)

Example 67 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class ZooCachePropertyAccessorTest method testGetProperties_ParentFilter.

@Test
public void testGetProperties_ParentFilter() {
    Map<String, String> props = new java.util.HashMap<>();
    AccumuloConfiguration parent = createMock(AccumuloConfiguration.class);
    Predicate<String> filter = createMock(Predicate.class);
    Predicate<String> parentFilter = createMock(Predicate.class);
    parent.getProperties(props, parentFilter);
    replay(parent);
    expect(zc.getChildren(PATH)).andReturn(null);
    replay(zc);
    a.getProperties(props, PATH, filter, parent, parentFilter);
    verify(parent);
}
Also used : AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Test(org.junit.Test)

Example 68 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class SequentialWorkAssignerTest method init.

@Before
public void init() {
    AccumuloConfiguration conf = createMock(AccumuloConfiguration.class);
    client = createMock(AccumuloClient.class);
    assigner = new SequentialWorkAssigner(conf, client);
}
Also used : AccumuloClient(org.apache.accumulo.core.client.AccumuloClient) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Before(org.junit.Before)

Example 69 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class AccumuloReplicaSystemTest method restartInFileKnowsAboutPreviousTableDefines.

@Test
public void restartInFileKnowsAboutPreviousTableDefines() throws Exception {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dos = new DataOutputStream(baos);
    LogFileKey key = new LogFileKey();
    LogFileValue value = new LogFileValue();
    // What is seq used for?
    key.seq = 1L;
    /*
     * Disclaimer: the following series of LogFileKey and LogFileValue pairs have *no* bearing
     * whatsoever in reality regarding what these entries would actually look like in a WAL. They
     * are solely for testing that each LogEvents is handled, order is not important.
     */
    key.event = LogEvents.DEFINE_TABLET;
    key.tablet = new KeyExtent(TableId.of("1"), null, null);
    key.tabletId = 1;
    key.write(dos);
    value.write(dos);
    key.tablet = null;
    key.event = LogEvents.MUTATION;
    key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
    key.write(dos);
    value.write(dos);
    key.tablet = null;
    key.event = LogEvents.MUTATION;
    key.tabletId = 1;
    key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
    key.write(dos);
    value.write(dos);
    dos.close();
    Map<String, String> confMap = new HashMap<>();
    confMap.put(Property.REPLICATION_NAME.getKey(), "source");
    AccumuloConfiguration conf = new ConfigurationCopy(confMap);
    AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
    ars.setConf(conf);
    Status status = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(false).build();
    DataInputStream dis = new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
    HashSet<Integer> tids = new HashSet<>();
    // Only consume the first mutation, not the second
    WalReplication repl = ars.getWalEdits(new ReplicationTarget("peer", "1", TableId.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, 1L, tids);
    // We stopped because we got to the end of the file
    assertEquals(2, repl.entriesConsumed);
    assertEquals(1, repl.walEdits.getEditsSize());
    assertEquals(1, repl.sizeInRecords);
    assertNotEquals(0, repl.sizeInBytes);
    status = Status.newBuilder(status).setBegin(2).build();
    // Consume the rest of the mutations
    repl = ars.getWalEdits(new ReplicationTarget("peer", "1", TableId.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, 1L, tids);
    // We stopped because we got to the end of the file
    assertEquals(1, repl.entriesConsumed);
    assertEquals(1, repl.walEdits.getEditsSize());
    assertEquals(1, repl.sizeInRecords);
    assertNotEquals(0, repl.sizeInBytes);
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Path(org.apache.hadoop.fs.Path) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) HashMap(java.util.HashMap) DataOutputStream(java.io.DataOutputStream) ServerMutation(org.apache.accumulo.server.data.ServerMutation) Text(org.apache.hadoop.io.Text) ByteArrayOutputStream(java.io.ByteArrayOutputStream) LogFileKey(org.apache.accumulo.tserver.logger.LogFileKey) DataInputStream(java.io.DataInputStream) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) ByteArrayInputStream(java.io.ByteArrayInputStream) LogFileValue(org.apache.accumulo.tserver.logger.LogFileValue) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 70 with AccumuloConfiguration

use of org.apache.accumulo.core.conf.AccumuloConfiguration in project accumulo by apache.

the class AccumuloReplicaSystemTest method endOfFileExceptionOnOpenWalImpliesMoreReplication.

@Test
public void endOfFileExceptionOnOpenWalImpliesMoreReplication() throws Exception {
    Map<String, String> confMap = new HashMap<>();
    confMap.put(Property.REPLICATION_NAME.getKey(), "source");
    AccumuloConfiguration conf = new ConfigurationCopy(confMap);
    AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
    ars.setConf(conf);
    // Setting the file to be closed with the infinite end implies that we need to bump the begin up
    // to Long.MAX_VALUE
    // If it were still open, more data could be appended that we need to process
    Status status = Status.newBuilder().setBegin(100).setEnd(0).setInfiniteEnd(true).setClosed(false).build();
    DataInputStream dis = new DataInputStream(new ByteArrayInputStream(new byte[0]));
    WalReplication repl = ars.getWalEdits(new ReplicationTarget("peer", "1", TableId.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, Long.MAX_VALUE, new HashSet<>());
    // We stopped because we got to the end of the file
    assertEquals(0, repl.entriesConsumed);
    assertEquals(0, repl.walEdits.getEditsSize());
    assertEquals(0, repl.sizeInRecords);
    assertEquals(0, repl.sizeInBytes);
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Path(org.apache.hadoop.fs.Path) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) HashMap(java.util.HashMap) DataInputStream(java.io.DataInputStream) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) ByteArrayInputStream(java.io.ByteArrayInputStream) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Test(org.junit.Test)

Aggregations

AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)164 Test (org.junit.Test)51 Path (org.apache.hadoop.fs.Path)44 IOException (java.io.IOException)31 ConfigurationCopy (org.apache.accumulo.core.conf.ConfigurationCopy)31 Configuration (org.apache.hadoop.conf.Configuration)27 HashMap (java.util.HashMap)24 ArrayList (java.util.ArrayList)23 Key (org.apache.accumulo.core.data.Key)23 FileSystem (org.apache.hadoop.fs.FileSystem)22 Value (org.apache.accumulo.core.data.Value)21 ServerContext (org.apache.accumulo.server.ServerContext)18 Property (org.apache.accumulo.core.conf.Property)16 DefaultConfiguration (org.apache.accumulo.core.conf.DefaultConfiguration)15 HostAndPort (org.apache.accumulo.core.util.HostAndPort)15 Map (java.util.Map)12 ByteArrayOutputStream (java.io.ByteArrayOutputStream)11 DataInputStream (java.io.DataInputStream)11 SamplerConfiguration (org.apache.accumulo.core.client.sample.SamplerConfiguration)11 SamplerConfigurationImpl (org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl)11