use of org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl in project accumulo by apache.
the class ExistingMacIT method testExistingRunningInstance.
@Test
public void testExistingRunningInstance() throws Exception {
final String table = getUniqueNames(1)[0];
Connector conn = getConnector();
// Ensure that a master and tserver are up so the existing instance check won't fail.
conn.tableOperations().create(table);
BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
Mutation m = new Mutation("foo");
m.put("cf", "cq", "value");
bw.addMutation(m);
bw.close();
File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf_2");
FileUtils.deleteQuietly(hadoopConfDir);
assertTrue(hadoopConfDir.mkdirs());
createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_3");
FileUtils.deleteQuietly(testDir2);
MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir);
System.out.println("conf " + new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"));
MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
try {
accumulo2.start();
Assert.fail("A 2nd MAC instance should not be able to start over an existing MAC instance");
} catch (RuntimeException e) {
// TODO check message or throw more explicit exception
}
}
use of org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl in project presto by prestodb.
the class MiniAccumuloConfigUtil method getConfigImpl.
private static MiniAccumuloConfigImpl getConfigImpl(MiniAccumuloConfig config) {
try {
Field field = MiniAccumuloConfig.class.getDeclaredField("impl");
field.setAccessible(true);
return (MiniAccumuloConfigImpl) field.get(config);
} catch (ReflectiveOperationException e) {
throw new AssertionError(e);
}
}
use of org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl in project accumulo by apache.
the class ConfigurableMacBase method createMiniAccumulo.
private void createMiniAccumulo() throws Exception {
// createTestDir will give us a empty directory, we don't need to clean it up ourselves
File baseDir = createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName());
MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, ROOT_PASSWORD);
String nativePathInDevTree = NativeMapIT.nativeMapLocation().getAbsolutePath();
String nativePathInMapReduce = new File(System.getProperty("user.dir")).toString();
cfg.setNativeLibPaths(nativePathInDevTree, nativePathInMapReduce);
cfg.setProperty(Property.GC_FILE_ARCHIVE, Boolean.TRUE.toString());
Configuration coreSite = new Configuration(false);
cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());
configure(cfg, coreSite);
configureForEnvironment(cfg, getClass(), getSslDir(baseDir));
cluster = new MiniAccumuloClusterImpl(cfg);
if (coreSite.size() > 0) {
File csFile = new File(cluster.getConfig().getConfDir(), "core-site.xml");
if (csFile.exists()) {
coreSite.addResource(new Path(csFile.getAbsolutePath()));
}
File tmp = new File(csFile.getAbsolutePath() + ".tmp");
OutputStream out = new BufferedOutputStream(new FileOutputStream(tmp));
coreSite.writeXml(out);
out.close();
assertTrue(tmp.renameTo(csFile));
}
beforeClusterStart(cfg);
}
use of org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl in project accumulo by apache.
the class MultiInstanceReplicationIT method dataReplicatedToCorrectTable.
@Test
public void dataReplicatedToCorrectTable() throws Exception {
MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD);
peerCfg.setNumTservers(1);
peerCfg.setInstanceName("peer");
peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg);
peer1Cluster.start();
try {
Connector connMaster = getConnector();
Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
String peerClusterName = "peer";
String peerUserName = "peer", peerPassword = "foo";
// Create local user
connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
// ...peer = AccumuloReplicaSystem,instanceName,zookeepers
connMaster.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration(peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers())));
String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2";
// Create tables
connMaster.tableOperations().create(masterTable1);
String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1);
Assert.assertNotNull(masterTableId1);
connMaster.tableOperations().create(masterTable2);
String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2);
Assert.assertNotNull(masterTableId2);
connPeer.tableOperations().create(peerTable1);
String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1);
Assert.assertNotNull(peerTableId1);
connPeer.tableOperations().create(peerTable2);
String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2);
Assert.assertNotNull(peerTableId2);
// Grant write permission
connPeer.securityOperations().grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE);
connPeer.securityOperations().grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE);
// Replicate this table to the peerClusterName in a table with the peerTableId table id
connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true");
connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1);
connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true");
connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2);
// Write some data to table1
BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig());
long masterTable1Records = 0l;
for (int rows = 0; rows < 2500; rows++) {
Mutation m = new Mutation(masterTable1 + rows);
for (int cols = 0; cols < 100; cols++) {
String value = Integer.toString(cols);
m.put(value, "", value);
masterTable1Records++;
}
bw.addMutation(m);
}
bw.close();
// Write some data to table2
bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig());
long masterTable2Records = 0l;
for (int rows = 0; rows < 2500; rows++) {
Mutation m = new Mutation(masterTable2 + rows);
for (int cols = 0; cols < 100; cols++) {
String value = Integer.toString(cols);
m.put(value, "", value);
masterTable2Records++;
}
bw.addMutation(m);
}
bw.close();
log.info("Wrote all data to master cluster");
Set<String> filesFor1 = connMaster.replicationOperations().referencedFiles(masterTable1), filesFor2 = connMaster.replicationOperations().referencedFiles(masterTable2);
log.info("Files to replicate for table1: " + filesFor1);
log.info("Files to replicate for table2: " + filesFor2);
// Restart the tserver to force a close on the WAL
for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
cluster.killProcess(ServerType.TABLET_SERVER, proc);
}
cluster.exec(TabletServer.class);
log.info("Restarted the tserver");
// Read the data -- the tserver is back up and running
Iterators.size(connMaster.createScanner(masterTable1, Authorizations.EMPTY).iterator());
while (!ReplicationTable.isOnline(connMaster)) {
log.info("Replication table still offline, waiting");
Thread.sleep(5000);
}
// Wait for both tables to be replicated
log.info("Waiting for {} for {}", filesFor1, masterTable1);
connMaster.replicationOperations().drain(masterTable1, filesFor1);
log.info("Waiting for {} for {}", filesFor2, masterTable2);
connMaster.replicationOperations().drain(masterTable2, filesFor2);
long countTable = 0l;
for (Entry<Key, Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) {
countTable++;
Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable1));
}
log.info("Found {} records in {}", countTable, peerTable1);
Assert.assertEquals(masterTable1Records, countTable);
countTable = 0l;
for (Entry<Key, Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) {
countTable++;
Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable2));
}
log.info("Found {} records in {}", countTable, peerTable2);
Assert.assertEquals(masterTable2Records, countTable);
} finally {
peer1Cluster.stop();
}
}
use of org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl in project accumulo by apache.
the class MultiInstanceReplicationIT method dataWasReplicatedToThePeerWithoutDrain.
@Test
public void dataWasReplicatedToThePeerWithoutDrain() throws Exception {
MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD);
peerCfg.setNumTservers(1);
peerCfg.setInstanceName("peer");
peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);
peerCluster.start();
Connector connMaster = getConnector();
Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
String peerUserName = "repl";
String peerPassword = "passwd";
// Create a user on the peer for replication to use
connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
String peerClusterName = "peer";
// ...peer = AccumuloReplicaSystem,instanceName,zookeepers
connMaster.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration(peerCluster.getInstanceName(), peerCluster.getZooKeepers())));
// Configure the credentials we should use to authenticate ourselves to the peer for replication
connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
String masterTable = "master", peerTable = "peer";
connMaster.tableOperations().create(masterTable);
String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
Assert.assertNotNull(masterTableId);
connPeer.tableOperations().create(peerTable);
String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
Assert.assertNotNull(peerTableId);
// Give our replication user the ability to write to the table
connPeer.securityOperations().grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);
// Replicate this table to the peerClusterName in a table with the peerTableId table id
connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);
// Write some data to table1
BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
for (int rows = 0; rows < 5000; rows++) {
Mutation m = new Mutation(Integer.toString(rows));
for (int cols = 0; cols < 100; cols++) {
String value = Integer.toString(cols);
m.put(value, "", value);
}
bw.addMutation(m);
}
bw.close();
log.info("Wrote all data to master cluster");
Set<String> files = connMaster.replicationOperations().referencedFiles(masterTable);
log.info("Files to replicate:" + files);
for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
cluster.killProcess(ServerType.TABLET_SERVER, proc);
}
cluster.exec(TabletServer.class);
while (!ReplicationTable.isOnline(connMaster)) {
log.info("Replication table still offline, waiting");
Thread.sleep(5000);
}
Iterators.size(connMaster.createScanner(masterTable, Authorizations.EMPTY).iterator());
for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) {
log.debug("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
}
connMaster.replicationOperations().drain(masterTable, files);
try (Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY);
Scanner peer = connPeer.createScanner(peerTable, Authorizations.EMPTY)) {
Iterator<Entry<Key, Value>> masterIter = master.iterator(), peerIter = peer.iterator();
while (masterIter.hasNext() && peerIter.hasNext()) {
Entry<Key, Value> masterEntry = masterIter.next(), peerEntry = peerIter.next();
Assert.assertEquals(peerEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0, masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
}
Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
}
peerCluster.stop();
}
Aggregations