Search in sources :

Example 46 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project vertexium by visallo.

the class AccumuloGraphConfiguration method createBatchWriterConfig.

public BatchWriterConfig createBatchWriterConfig() {
    long maxMemory = getConfigLong(BATCHWRITER_MAX_MEMORY, DEFAULT_BATCHWRITER_MAX_MEMORY);
    long maxLatency = getConfigLong(BATCHWRITER_MAX_LATENCY, DEFAULT_BATCHWRITER_MAX_LATENCY);
    int maxWriteThreads = getInt(BATCHWRITER_MAX_WRITE_THREADS, DEFAULT_BATCHWRITER_MAX_WRITE_THREADS);
    long timeout = getConfigLong(BATCHWRITER_TIMEOUT, DEFAULT_BATCHWRITER_TIMEOUT);
    BatchWriterConfig config = new BatchWriterConfig();
    config.setMaxMemory(maxMemory);
    config.setMaxLatency(maxLatency, TimeUnit.MILLISECONDS);
    config.setMaxWriteThreads(maxWriteThreads);
    config.setTimeout(timeout, TimeUnit.MILLISECONDS);
    return config;
}
Also used : BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig)

Example 47 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class TabletGroupWatcher method deleteTablets.

private void deleteTablets(MergeInfo info) throws AccumuloException {
    KeyExtent extent = info.getExtent();
    String targetSystemTable = extent.isMeta() ? RootTable.NAME : MetadataTable.NAME;
    Master.log.debug("Deleting tablets for {}", extent);
    char timeType = '\0';
    KeyExtent followingTablet = null;
    if (extent.getEndRow() != null) {
        Key nextExtent = new Key(extent.getEndRow()).followingKey(PartialKey.ROW);
        followingTablet = getHighTablet(new KeyExtent(extent.getTableId(), nextExtent.getRow(), extent.getEndRow()));
        Master.log.debug("Found following tablet {}", followingTablet);
    }
    try {
        Connector conn = this.master.getConnector();
        Text start = extent.getPrevEndRow();
        if (start == null) {
            start = new Text();
        }
        Master.log.debug("Making file deletion entries for {}", extent);
        Range deleteRange = new Range(KeyExtent.getMetadataEntry(extent.getTableId(), start), false, KeyExtent.getMetadataEntry(extent.getTableId(), extent.getEndRow()), true);
        Scanner scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
        scanner.setRange(deleteRange);
        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
        TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(scanner);
        scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
        scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
        Set<FileRef> datafiles = new TreeSet<>();
        for (Entry<Key, Value> entry : scanner) {
            Key key = entry.getKey();
            if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
                datafiles.add(new FileRef(this.master.fs, key));
                if (datafiles.size() > 1000) {
                    MetadataTableUtil.addDeleteEntries(extent, datafiles, master);
                    datafiles.clear();
                }
            } else if (TabletsSection.ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
                timeType = entry.getValue().toString().charAt(0);
            } else if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
                throw new IllegalStateException("Tablet " + key.getRow() + " is assigned during a merge!");
            } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
                // ACCUMULO-2974 Need to include the TableID when converting a relative path to an absolute path.
                // The value has the leading path separator already included so it doesn't need it included.
                String path = entry.getValue().toString();
                if (path.contains(":")) {
                    datafiles.add(new FileRef(path));
                } else {
                    datafiles.add(new FileRef(path, this.master.fs.getFullPath(FileType.TABLE, Path.SEPARATOR + extent.getTableId() + path)));
                }
                if (datafiles.size() > 1000) {
                    MetadataTableUtil.addDeleteEntries(extent, datafiles, master);
                    datafiles.clear();
                }
            }
        }
        MetadataTableUtil.addDeleteEntries(extent, datafiles, master);
        BatchWriter bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
        try {
            deleteTablets(info, deleteRange, bw, conn);
        } finally {
            bw.close();
        }
        if (followingTablet != null) {
            Master.log.debug("Updating prevRow of {} to {}", followingTablet, extent.getPrevEndRow());
            bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
            try {
                Mutation m = new Mutation(followingTablet.getMetadataEntry());
                TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(extent.getPrevEndRow()));
                ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
                bw.addMutation(m);
                bw.flush();
            } finally {
                bw.close();
            }
        } else {
            // Recreate the default tablet to hold the end of the table
            Master.log.debug("Recreating the last tablet to point to {}", extent.getPrevEndRow());
            VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(extent.getTableId());
            String tdir = master.getFileSystem().choose(chooserEnv, ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + extent.getTableId() + Constants.DEFAULT_TABLET_LOCATION;
            MetadataTableUtil.addTablet(new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), tdir, master, timeType, this.master.masterLock);
        }
    } catch (RuntimeException | IOException | TableNotFoundException | AccumuloSecurityException ex) {
        throw new AccumuloException(ex);
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) FileRef(org.apache.accumulo.server.fs.FileRef) VolumeChooserEnvironment(org.apache.accumulo.server.fs.VolumeChooserEnvironment) TreeSet(java.util.TreeSet) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 48 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class StatusMaker method deleteStatusRecord.

/**
 * Because there is only one active Master, and thus one active StatusMaker, the only safe time that we can issue the delete for a Status which is closed is
 * immediately after writing it to the replication table.
 * <p>
 * If we try to defer and delete these entries in another thread/process, we will have no assurance that the Status message was propagated to the replication
 * table. It is easiest, in terms of concurrency, to do this all in one step.
 *
 * @param k
 *          The Key to delete
 */
protected void deleteStatusRecord(Key k) {
    log.debug("Deleting {} from metadata table as it's no longer needed", k.toStringNoTruncate());
    if (null == metadataWriter) {
        try {
            metadataWriter = conn.createBatchWriter(sourceTableName, new BatchWriterConfig());
        } catch (TableNotFoundException e) {
            throw new RuntimeException("Metadata table doesn't exist");
        }
    }
    try {
        Mutation m = new Mutation(k.getRow());
        m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
        metadataWriter.addMutation(m);
        metadataWriter.flush();
    } catch (MutationsRejectedException e) {
        log.warn("Failed to delete status mutations for metadata table, will retry", e);
    }
}
Also used : TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) Mutation(org.apache.accumulo.core.data.Mutation) MutationsRejectedException(org.apache.accumulo.core.client.MutationsRejectedException)

Example 49 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class OutputConfigurator method setBatchWriterOptions.

/**
 * Sets the configuration for for the job's {@link BatchWriter} instances. If not set, a new {@link BatchWriterConfig}, with sensible built-in defaults is
 * used. Setting the configuration multiple times overwrites any previous configuration.
 *
 * @param implementingClass
 *          the class whose name will be used as a prefix for the property configuration key
 * @param conf
 *          the Hadoop configuration object to configure
 * @param bwConfig
 *          the configuration for the {@link BatchWriter}
 * @since 1.6.0
 */
public static void setBatchWriterOptions(Class<?> implementingClass, Configuration conf, BatchWriterConfig bwConfig) {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    String serialized;
    try {
        bwConfig.write(new DataOutputStream(baos));
        serialized = new String(baos.toByteArray(), UTF_8);
        baos.close();
    } catch (IOException e) {
        throw new IllegalArgumentException("unable to serialize " + BatchWriterConfig.class.getName());
    }
    conf.set(enumToConfKey(implementingClass, WriteOpts.BATCH_WRITER_CONFIG), serialized);
}
Also used : DataOutputStream(java.io.DataOutputStream) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) ByteArrayOutputStream(java.io.ByteArrayOutputStream) IOException(java.io.IOException)

Example 50 with BatchWriterConfig

use of org.apache.accumulo.core.client.BatchWriterConfig in project accumulo by apache.

the class ConnectionInfoFactory method getBatchWriterConfig.

public static BatchWriterConfig getBatchWriterConfig(ConnectionInfo info) {
    BatchWriterConfig batchWriterConfig = new BatchWriterConfig();
    Long maxMemory = getLong(info, ClientProperty.BATCH_WRITER_MAX_MEMORY_BYTES);
    if (maxMemory != null) {
        batchWriterConfig.setMaxMemory(maxMemory);
    }
    Long maxLatency = getLong(info, ClientProperty.BATCH_WRITER_MAX_LATENCY_SEC);
    if (maxLatency != null) {
        batchWriterConfig.setMaxLatency(maxLatency, TimeUnit.SECONDS);
    }
    Long timeout = getLong(info, ClientProperty.BATCH_WRITER_MAX_TIMEOUT_SEC);
    if (timeout != null) {
        batchWriterConfig.setTimeout(timeout, TimeUnit.SECONDS);
    }
    String durability = getString(info, ClientProperty.BATCH_WRITER_DURABILITY);
    if (!durability.isEmpty()) {
        batchWriterConfig.setDurability(Durability.valueOf(durability.toUpperCase()));
    }
    return batchWriterConfig;
}
Also used : BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig)

Aggregations

BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)182 BatchWriter (org.apache.accumulo.core.client.BatchWriter)135 Mutation (org.apache.accumulo.core.data.Mutation)131 Value (org.apache.accumulo.core.data.Value)88 Text (org.apache.hadoop.io.Text)60 Key (org.apache.accumulo.core.data.Key)59 Test (org.junit.Test)58 Scanner (org.apache.accumulo.core.client.Scanner)57 Connector (org.apache.accumulo.core.client.Connector)38 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)33 MutationsRejectedException (org.apache.accumulo.core.client.MutationsRejectedException)28 AccumuloException (org.apache.accumulo.core.client.AccumuloException)26 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)24 Authorizations (org.apache.accumulo.core.security.Authorizations)22 Range (org.apache.accumulo.core.data.Range)20 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)19 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)19 ColumnVisibility (org.apache.accumulo.core.security.ColumnVisibility)19 Entry (java.util.Map.Entry)18 IOException (java.io.IOException)14