use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class MergeStats method main.
public static void main(String[] args) throws Exception {
ServerUtilOpts opts = new ServerUtilOpts();
opts.parseArgs(MergeStats.class.getName(), args);
Span span = TraceUtil.startSpan(MergeStats.class, "main");
try (Scope scope = span.makeCurrent()) {
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientProps()).build()) {
Map<String, String> tableIdMap = client.tableOperations().tableIdMap();
ZooReaderWriter zooReaderWriter = opts.getServerContext().getZooReaderWriter();
for (Entry<String, String> entry : tableIdMap.entrySet()) {
final String table = entry.getKey(), tableId = entry.getValue();
String path = ZooUtil.getRoot(client.instanceOperations().getInstanceId()) + Constants.ZTABLES + "/" + tableId + "/merge";
MergeInfo info = new MergeInfo();
if (zooReaderWriter.exists(path)) {
byte[] data = zooReaderWriter.getData(path);
DataInputBuffer in = new DataInputBuffer();
in.reset(data, data.length);
info.readFields(in);
}
System.out.printf("%25s %10s %10s %s%n", table, info.getState(), info.getOperation(), info.getExtent());
}
}
} finally {
span.end();
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class BinaryStressIT method resetConfig.
@After
public void resetConfig() throws Exception {
if (majcDelay != null) {
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
InstanceOperations iops = client.instanceOperations();
iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
iops.setProperty(Property.TSERV_MAXMEM.getKey(), maxMem);
}
getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
getClusterControl().startAllServers(ServerType.TABLET_SERVER);
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class BinaryStressIT method alterConfig.
@Before
public void alterConfig() throws Exception {
if (getClusterType() == ClusterType.MINI) {
return;
}
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
InstanceOperations iops = client.instanceOperations();
Map<String, String> conf = iops.getSystemConfiguration();
majcDelay = conf.get(Property.TSERV_MAJC_DELAY.getKey());
maxMem = conf.get(Property.TSERV_MAXMEM.getKey());
iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "50ms");
iops.setProperty(Property.TSERV_MAXMEM.getKey(), "50K");
getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
getClusterControl().startAllServers(ServerType.TABLET_SERVER);
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class BulkFailureIT method runTest.
/**
* This test verifies two things. First it ensures that after a bulk imported file is compacted
* that import request are ignored. Second it ensures that after the bulk import transaction is
* canceled that import request fail. The public API for bulk import can not be used for this
* test. Internal (non public API) RPCs and Zookeeper state is manipulated directly. This is the
* only way to interleave compactions with multiple, duplicate import RPC request.
*/
protected void runTest(String table, long fateTxid, Loader loader) throws IOException, AccumuloException, AccumuloSecurityException, TableExistsException, KeeperException, InterruptedException, Exception, FileNotFoundException, TableNotFoundException {
try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
SortedMap<Key, Value> testData = createTestData();
FileSystem fs = getCluster().getFileSystem();
String testFile = createTestFile(fateTxid, testData, fs);
c.tableOperations().create(table);
String tableId = c.tableOperations().tableIdMap().get(table);
// Table has no splits, so this extent corresponds to the tables single tablet
KeyExtent extent = new KeyExtent(TableId.of(tableId), null, null);
ServerContext asCtx = getServerContext();
ZooArbitrator.start(asCtx, Constants.BULK_ARBITRATOR_TYPE, fateTxid);
VolumeManager vm = asCtx.getVolumeManager();
// move the file into a directory for the table and rename the file to something unique
String bulkDir = BulkImport.prepareBulkImport(asCtx, vm, testFile, TableId.of(tableId), fateTxid);
// determine the files new name and path
FileStatus status = fs.listStatus(new Path(bulkDir))[0];
Path bulkLoadPath = fs.makeQualified(status.getPath());
// Directly ask the tablet to load the file.
loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
assertEquals(Set.of(bulkLoadPath), getFiles(c, extent));
assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
assertEquals(testData, readTable(table, c));
// Compact the bulk imported file. Subsequent request to load the file should be ignored.
c.tableOperations().compact(table, new CompactionConfig().setWait(true));
Set<Path> tabletFiles = getFiles(c, extent);
assertFalse(tabletFiles.contains(bulkLoadPath));
assertEquals(1, tabletFiles.size());
assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
assertEquals(testData, readTable(table, c));
// this request should be ignored by the tablet
loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
assertEquals(tabletFiles, getFiles(c, extent));
assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
assertEquals(testData, readTable(table, c));
// this is done to ensure the tablet reads the load flags from the metadata table when it
// loads
c.tableOperations().offline(table, true);
c.tableOperations().online(table, true);
// this request should be ignored by the tablet
loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), false);
assertEquals(tabletFiles, getFiles(c, extent));
assertEquals(Set.of(bulkLoadPath), getLoaded(c, extent));
assertEquals(testData, readTable(table, c));
// After this, all load request should fail.
ZooArbitrator.stop(asCtx, Constants.BULK_ARBITRATOR_TYPE, fateTxid);
c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE);
BatchDeleter bd = c.createBatchDeleter(MetadataTable.NAME, Authorizations.EMPTY, 1);
bd.setRanges(Collections.singleton(extent.toMetaRange()));
bd.fetchColumnFamily(BulkFileColumnFamily.NAME);
bd.delete();
loader.load(fateTxid, asCtx, extent, bulkLoadPath, status.getLen(), true);
assertEquals(tabletFiles, getFiles(c, extent));
assertEquals(Set.of(), getLoaded(c, extent));
assertEquals(testData, readTable(table, c));
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class BulkNewIT method testBadLoadPlans.
@Test
public void testBadLoadPlans() throws Exception {
try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
addSplits(c, tableName, "0333 0666 0999 1333 1666");
String dir = getDir("/testBulkFile-");
writeData(dir + "/f1.", aconf, 0, 333);
writeData(dir + "/f2.", aconf, 0, 666);
final var importMappingOptions = c.tableOperations().importDirectory(dir).to(tableName);
// Create a plan with more files than exists in dir
LoadPlan loadPlan = LoadPlan.builder().loadFileTo("f1.rf", RangeType.TABLE, null, row(333)).loadFileTo("f2.rf", RangeType.TABLE, null, row(666)).loadFileTo("f3.rf", RangeType.TABLE, null, row(666)).build();
final var tooManyFiles = importMappingOptions.plan(loadPlan);
assertThrows(IllegalArgumentException.class, tooManyFiles::load);
// Create a plan with fewer files than exists in dir
loadPlan = LoadPlan.builder().loadFileTo("f1.rf", RangeType.TABLE, null, row(333)).build();
final var tooFewFiles = importMappingOptions.plan(loadPlan);
assertThrows(IllegalArgumentException.class, tooFewFiles::load);
// Create a plan with tablet boundary that does not exist
loadPlan = LoadPlan.builder().loadFileTo("f1.rf", RangeType.TABLE, null, row(555)).loadFileTo("f2.rf", RangeType.TABLE, null, row(555)).build();
final var nonExistentBoundary = importMappingOptions.plan(loadPlan);
assertThrows(AccumuloException.class, nonExistentBoundary::load);
}
}
Aggregations