Search in sources :

Example 51 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class BatchWriterOpts method getBatchWriterConfig.

public BatchWriterConfig getBatchWriterConfig() {
    BatchWriterConfig config = new BatchWriterConfig();
    config.setMaxWriteThreads(this.batchThreads);
    config.setMaxLatency(this.batchLatency, TimeUnit.MILLISECONDS);
    config.setMaxMemory(this.batchMemory);
    config.setTimeout(this.batchTimeout, TimeUnit.MILLISECONDS);
    return config;
}
Also used : BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig)

Example 52 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class MetadataTableUtil method deleteTable.

public static void deleteTable(Table.ID tableId, boolean insertDeletes, ClientContext context, ZooLock lock) throws AccumuloException, IOException {
    try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
        BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000).setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2))) {
        // scan metadata for our table and delete everything we find
        Mutation m = null;
        ms.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
        // insert deletes before deleting data from metadata... this makes the code fault tolerant
        if (insertDeletes) {
            ms.fetchColumnFamily(DataFileColumnFamily.NAME);
            TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);
            for (Entry<Key, Value> cell : ms) {
                Key key = cell.getKey();
                if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
                    FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
                    bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
                }
                if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
                    bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
                }
            }
            bw.flush();
            ms.clearColumns();
        }
        for (Entry<Key, Value> cell : ms) {
            Key key = cell.getKey();
            if (m == null) {
                m = new Mutation(key.getRow());
                if (lock != null)
                    putLockID(lock, m);
            }
            if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
                bw.addMutation(m);
                m = new Mutation(key.getRow());
                if (lock != null)
                    putLockID(lock, m);
            }
            m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
        }
        if (m != null)
            bw.addMutation(m);
    }
}
Also used : IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) FileRef(org.apache.accumulo.server.fs.FileRef) BatchWriterImpl(org.apache.accumulo.core.client.impl.BatchWriterImpl) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 53 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class ConfigurableMajorCompactionIT method writeFile.

private void writeFile(Connector conn, String tableName) throws Exception {
    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("row");
    m.put("cf", "cq", "value");
    bw.addMutation(m);
    bw.close();
    conn.tableOperations().flush(tableName, null, null, true);
}
Also used : BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation)

Example 54 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class FileArchiveIT method testDeletedTableIsArchived.

@Test
public void testDeletedTableIsArchived() throws Exception {
    final Connector conn = getConnector();
    final String tableName = getUniqueNames(1)[0];
    conn.tableOperations().create(tableName);
    final Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
    Assert.assertNotNull("Could not get table ID", tableId);
    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("row");
    m.put("", "", "value");
    bw.addMutation(m);
    bw.close();
    // Compact memory to disk
    conn.tableOperations().compact(tableName, null, null, true, true);
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
        s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
        Entry<Key, Value> entry = Iterables.getOnlyElement(s);
        final String file = entry.getKey().getColumnQualifier().toString();
        final Path p = new Path(file);
        conn.tableOperations().delete(tableName);
        log.info("File for table: {}", file);
        FileSystem fs = getCluster().getFileSystem();
        int i = 0;
        while (fs.exists(p)) {
            i++;
            Thread.sleep(1000);
            if (0 == i % 10) {
                log.info("Waited {} iterations, file still exists", i);
            }
        }
        log.info("File was removed");
        String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
        log.info("File relative to accumulo dir: {}", filePath);
        Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
        Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
        // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
        Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
        Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile, fs.exists(archivedFile));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Table(org.apache.accumulo.core.client.impl.Table) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) FileSystem(org.apache.hadoop.fs.FileSystem) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 55 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class FileArchiveIT method testUnusuedFilesAndDeletedTable.

@Test
public void testUnusuedFilesAndDeletedTable() throws Exception {
    final Connector conn = getConnector();
    final String tableName = getUniqueNames(1)[0];
    conn.tableOperations().create(tableName);
    final Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
    Assert.assertNotNull("Could not get table ID", tableId);
    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("row");
    m.put("", "", "value");
    bw.addMutation(m);
    bw.close();
    // Compact memory to disk
    conn.tableOperations().compact(tableName, null, null, true, true);
    Entry<Key, Value> entry;
    Path fileArchiveDir;
    FileSystem fs;
    int i = 0;
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
        s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
        entry = Iterables.getOnlyElement(s);
        final String file = entry.getKey().getColumnQualifier().toString();
        final Path p = new Path(file);
        // Then force another to make an unreferenced file
        conn.tableOperations().compact(tableName, null, null, true, true);
        log.info("File for table: {}", file);
        fs = getCluster().getFileSystem();
        while (fs.exists(p)) {
            i++;
            Thread.sleep(1000);
            if (0 == i % 10) {
                log.info("Waited {} iterations, file still exists", i);
            }
        }
        log.info("File was removed");
        String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
        log.info("File relative to accumulo dir: {}", filePath);
        fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
        Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
        // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
        Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
        Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile, fs.exists(archivedFile));
        // Offline the table so we can be sure there is a single file
        conn.tableOperations().offline(tableName, true);
    }
    // See that the file in metadata currently is
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
        s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
        entry = Iterables.getOnlyElement(s);
        final String finalFile = entry.getKey().getColumnQualifier().toString();
        final Path finalPath = new Path(finalFile);
        conn.tableOperations().delete(tableName);
        log.info("File for table: {}", finalPath);
        i = 0;
        while (fs.exists(finalPath)) {
            i++;
            Thread.sleep(1000);
            if (0 == i % 10) {
                log.info("Waited {} iterations, file still exists", i);
            }
        }
        log.info("File was removed");
        String finalFilePath = finalPath.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
        log.info("File relative to accumulo dir: {}", finalFilePath);
        Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
        // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
        Path finalArchivedFile = new Path(fileArchiveDir, finalFilePath.substring(1));
        Assert.assertTrue("File doesn't exists in archive directory: " + finalArchivedFile, fs.exists(finalArchivedFile));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Table(org.apache.accumulo.core.client.impl.Table) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) FileSystem(org.apache.hadoop.fs.FileSystem) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Aggregations

BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)182 BatchWriter (org.apache.accumulo.core.client.BatchWriter)135 Mutation (org.apache.accumulo.core.data.Mutation)131 Value (org.apache.accumulo.core.data.Value)88 Text (org.apache.hadoop.io.Text)60 Key (org.apache.accumulo.core.data.Key)59 Test (org.junit.Test)58 Scanner (org.apache.accumulo.core.client.Scanner)57 Connector (org.apache.accumulo.core.client.Connector)38 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)33 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)28 AccumuloException (org.apache.accumulo.core.client.AccumuloException)26 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)24 Authorizations (org.apache.accumulo.core.security.Authorizations)22 Range (org.apache.accumulo.core.data.Range)20 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)19 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)19 ColumnVisibility (org.apache.accumulo.core.security.ColumnVisibility)19 Entry (java.util.Map.Entry)18 IOException (java.io.IOException)14