Search in sources :

Example 16 with AcceptableThriftTableOperationException

use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.

the class CompactionDriver method isReady.

@Override
public long isReady(long tid, Master master) throws Exception {
    String zCancelID = Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_CANCEL_ID;
    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
    if (Long.parseLong(new String(zoo.getData(zCancelID, null))) >= compactId) {
        // compaction was canceled
        throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.OTHER, "Compaction canceled");
    }
    MapCounter<TServerInstance> serversToFlush = new MapCounter<>();
    Connector conn = master.getConnector();
    Scanner scanner;
    if (tableId.equals(MetadataTable.ID)) {
        scanner = new IsolatedScanner(conn.createScanner(RootTable.NAME, Authorizations.EMPTY));
        scanner.setRange(MetadataSchema.TabletsSection.getRange());
    } else {
        scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
        Range range = new KeyExtent(tableId, null, startRow == null ? null : new Text(startRow)).toMetadataRange();
        scanner.setRange(range);
    }
    TabletsSection.ServerColumnFamily.COMPACT_COLUMN.fetch(scanner);
    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
    long t1 = System.currentTimeMillis();
    RowIterator ri = new RowIterator(scanner);
    int tabletsToWaitFor = 0;
    int tabletCount = 0;
    while (ri.hasNext()) {
        Iterator<Entry<Key, Value>> row = ri.next();
        long tabletCompactID = -1;
        TServerInstance server = null;
        Entry<Key, Value> entry = null;
        while (row.hasNext()) {
            entry = row.next();
            Key key = entry.getKey();
            if (TabletsSection.ServerColumnFamily.COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
                tabletCompactID = Long.parseLong(entry.getValue().toString());
            if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily()))
                server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
        }
        if (tabletCompactID < compactId) {
            tabletsToWaitFor++;
            if (server != null)
                serversToFlush.increment(server, 1);
        }
        tabletCount++;
        Text tabletEndRow = new KeyExtent(entry.getKey().getRow(), (Text) null).getEndRow();
        if (tabletEndRow == null || (endRow != null && tabletEndRow.compareTo(new Text(endRow)) >= 0))
            break;
    }
    long scanTime = System.currentTimeMillis() - t1;
    Instance instance = master.getInstance();
    Tables.clearCache(instance);
    if (tabletCount == 0 && !Tables.exists(instance, tableId))
        throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.NOTFOUND, null);
    if (serversToFlush.size() == 0 && Tables.getTableState(instance, tableId) == TableState.OFFLINE)
        throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.OFFLINE, null);
    if (tabletsToWaitFor == 0)
        return 0;
    for (TServerInstance tsi : serversToFlush.keySet()) {
        try {
            final TServerConnection server = master.getConnection(tsi);
            if (server != null)
                server.compact(master.getMasterLock(), tableId.canonicalID(), startRow, endRow);
        } catch (TException ex) {
            LoggerFactory.getLogger(CompactionDriver.class).error(ex.toString());
        }
    }
    long sleepTime = 500;
    if (serversToFlush.size() > 0)
        // make wait time depend on the server with the most to
        sleepTime = Collections.max(serversToFlush.values()) * sleepTime;
    // compact
    sleepTime = Math.max(2 * scanTime, sleepTime);
    sleepTime = Math.min(sleepTime, 30000);
    return sleepTime;
}
Also used : TException(org.apache.thrift.TException) Connector(org.apache.accumulo.core.client.Connector) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) Instance(org.apache.accumulo.core.client.Instance) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) AcceptableThriftTableOperationException(org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException) TServerConnection(org.apache.accumulo.server.master.LiveTServerSet.TServerConnection) Entry(java.util.Map.Entry) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) RowIterator(org.apache.accumulo.core.client.RowIterator) MapCounter(org.apache.accumulo.core.util.MapCounter) Value(org.apache.accumulo.core.data.Value) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Key(org.apache.accumulo.core.data.Key)

Example 17 with AcceptableThriftTableOperationException

use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.

the class ImportTable method checkVersions.

public void checkVersions(Master env) throws AcceptableThriftTableOperationException {
    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
    Integer exportVersion = null;
    Integer dataVersion = null;
    try (ZipInputStream zis = new ZipInputStream(env.getFileSystem().open(path))) {
        ZipEntry zipEntry;
        while ((zipEntry = zis.getNextEntry()) != null) {
            if (zipEntry.getName().equals(Constants.EXPORT_INFO_FILE)) {
                BufferedReader in = new BufferedReader(new InputStreamReader(zis, UTF_8));
                String line = null;
                while ((line = in.readLine()) != null) {
                    String[] sa = line.split(":", 2);
                    if (sa[0].equals(ExportTable.EXPORT_VERSION_PROP)) {
                        exportVersion = Integer.parseInt(sa[1]);
                    } else if (sa[0].equals(ExportTable.DATA_VERSION_PROP)) {
                        dataVersion = Integer.parseInt(sa[1]);
                    }
                }
                break;
            }
        }
    } catch (IOException ioe) {
        log.warn("{}", ioe.getMessage(), ioe);
        throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Failed to read export metadata " + ioe.getMessage());
    }
    if (exportVersion == null || exportVersion > ExportTable.VERSION)
        throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible export version " + exportVersion);
    if (dataVersion == null || dataVersion > ServerConstants.DATA_VERSION)
        throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible data version " + exportVersion);
}
Also used : Path(org.apache.hadoop.fs.Path) ZipInputStream(java.util.zip.ZipInputStream) InputStreamReader(java.io.InputStreamReader) ZipEntry(java.util.zip.ZipEntry) BufferedReader(java.io.BufferedReader) IOException(java.io.IOException) AcceptableThriftTableOperationException(org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException)

Aggregations

AcceptableThriftTableOperationException (org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException)17 IZooReaderWriter (org.apache.accumulo.fate.zookeeper.IZooReaderWriter)7 Path (org.apache.hadoop.fs.Path)7 IOException (java.io.IOException)6 Instance (org.apache.accumulo.core.client.Instance)6 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)5 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)4 FileStatus (org.apache.hadoop.fs.FileStatus)4 Text (org.apache.hadoop.io.Text)3 BufferedWriter (java.io.BufferedWriter)2 OutputStreamWriter (java.io.OutputStreamWriter)2 ZipEntry (java.util.zip.ZipEntry)2 ZipInputStream (java.util.zip.ZipInputStream)2 Connector (org.apache.accumulo.core.client.Connector)2 Scanner (org.apache.accumulo.core.client.Scanner)2 Key (org.apache.accumulo.core.data.Key)2 Value (org.apache.accumulo.core.data.Value)2 Mutator (org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator)2 HdfsZooInstance (org.apache.accumulo.server.client.HdfsZooInstance)2 TServerInstance (org.apache.accumulo.server.master.state.TServerInstance)2