use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class BatchWriterOpts method getBatchWriterConfig.
public BatchWriterConfig getBatchWriterConfig() {
BatchWriterConfig config = new BatchWriterConfig();
config.setMaxWriteThreads(this.batchThreads);
config.setMaxLatency(this.batchLatency, TimeUnit.MILLISECONDS);
config.setMaxMemory(this.batchMemory);
config.setTimeout(this.batchTimeout, TimeUnit.MILLISECONDS);
return config;
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class MetadataTableUtil method deleteTable.
public static void deleteTable(Table.ID tableId, boolean insertDeletes, ClientContext context, ZooLock lock) throws AccumuloException, IOException {
try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000).setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2))) {
// scan metadata for our table and delete everything we find
Mutation m = null;
ms.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
// insert deletes before deleting data from metadata... this makes the code fault tolerant
if (insertDeletes) {
ms.fetchColumnFamily(DataFileColumnFamily.NAME);
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);
for (Entry<Key, Value> cell : ms) {
Key key = cell.getKey();
if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
}
if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
}
}
bw.flush();
ms.clearColumns();
}
for (Entry<Key, Value> cell : ms) {
Key key = cell.getKey();
if (m == null) {
m = new Mutation(key.getRow());
if (lock != null)
putLockID(lock, m);
}
if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
bw.addMutation(m);
m = new Mutation(key.getRow());
if (lock != null)
putLockID(lock, m);
}
m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
}
if (m != null)
bw.addMutation(m);
}
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class ConfigurableMajorCompactionIT method writeFile.
private void writeFile(Connector conn, String tableName) throws Exception {
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation("row");
m.put("cf", "cq", "value");
bw.addMutation(m);
bw.close();
conn.tableOperations().flush(tableName, null, null, true);
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class FileArchiveIT method testDeletedTableIsArchived.
@Test
public void testDeletedTableIsArchived() throws Exception {
final Connector conn = getConnector();
final String tableName = getUniqueNames(1)[0];
conn.tableOperations().create(tableName);
final Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
Assert.assertNotNull("Could not get table ID", tableId);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation("row");
m.put("", "", "value");
bw.addMutation(m);
bw.close();
// Compact memory to disk
conn.tableOperations().compact(tableName, null, null, true, true);
try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
Entry<Key, Value> entry = Iterables.getOnlyElement(s);
final String file = entry.getKey().getColumnQualifier().toString();
final Path p = new Path(file);
conn.tableOperations().delete(tableName);
log.info("File for table: {}", file);
FileSystem fs = getCluster().getFileSystem();
int i = 0;
while (fs.exists(p)) {
i++;
Thread.sleep(1000);
if (0 == i % 10) {
log.info("Waited {} iterations, file still exists", i);
}
}
log.info("File was removed");
String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
log.info("File relative to accumulo dir: {}", filePath);
Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
// Remove the leading '/' to make sure Path treats the 2nd arg as a child.
Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile, fs.exists(archivedFile));
}
}
use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.
the class FileArchiveIT method testUnusuedFilesAndDeletedTable.
@Test
public void testUnusuedFilesAndDeletedTable() throws Exception {
final Connector conn = getConnector();
final String tableName = getUniqueNames(1)[0];
conn.tableOperations().create(tableName);
final Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
Assert.assertNotNull("Could not get table ID", tableId);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation("row");
m.put("", "", "value");
bw.addMutation(m);
bw.close();
// Compact memory to disk
conn.tableOperations().compact(tableName, null, null, true, true);
Entry<Key, Value> entry;
Path fileArchiveDir;
FileSystem fs;
int i = 0;
try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
entry = Iterables.getOnlyElement(s);
final String file = entry.getKey().getColumnQualifier().toString();
final Path p = new Path(file);
// Then force another to make an unreferenced file
conn.tableOperations().compact(tableName, null, null, true, true);
log.info("File for table: {}", file);
fs = getCluster().getFileSystem();
while (fs.exists(p)) {
i++;
Thread.sleep(1000);
if (0 == i % 10) {
log.info("Waited {} iterations, file still exists", i);
}
}
log.info("File was removed");
String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
log.info("File relative to accumulo dir: {}", filePath);
fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
// Remove the leading '/' to make sure Path treats the 2nd arg as a child.
Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile, fs.exists(archivedFile));
// Offline the table so we can be sure there is a single file
conn.tableOperations().offline(tableName, true);
}
// See that the file in metadata currently is
try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
entry = Iterables.getOnlyElement(s);
final String finalFile = entry.getKey().getColumnQualifier().toString();
final Path finalPath = new Path(finalFile);
conn.tableOperations().delete(tableName);
log.info("File for table: {}", finalPath);
i = 0;
while (fs.exists(finalPath)) {
i++;
Thread.sleep(1000);
if (0 == i % 10) {
log.info("Waited {} iterations, file still exists", i);
}
}
log.info("File was removed");
String finalFilePath = finalPath.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
log.info("File relative to accumulo dir: {}", finalFilePath);
Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
// Remove the leading '/' to make sure Path treats the 2nd arg as a child.
Path finalArchivedFile = new Path(fileArchiveDir, finalFilePath.substring(1));
Assert.assertTrue("File doesn't exists in archive directory: " + finalArchivedFile, fs.exists(finalArchivedFile));
}
}
Aggregations