use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class CompactionDriver method isReady.
@Override
public long isReady(long tid, Master master) throws Exception {
String zCancelID = Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_CANCEL_ID;
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
if (Long.parseLong(new String(zoo.getData(zCancelID, null))) >= compactId) {
// compaction was canceled
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.OTHER, "Compaction canceled");
}
MapCounter<TServerInstance> serversToFlush = new MapCounter<>();
Connector conn = master.getConnector();
Scanner scanner;
if (tableId.equals(MetadataTable.ID)) {
scanner = new IsolatedScanner(conn.createScanner(RootTable.NAME, Authorizations.EMPTY));
scanner.setRange(MetadataSchema.TabletsSection.getRange());
} else {
scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
Range range = new KeyExtent(tableId, null, startRow == null ? null : new Text(startRow)).toMetadataRange();
scanner.setRange(range);
}
TabletsSection.ServerColumnFamily.COMPACT_COLUMN.fetch(scanner);
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
long t1 = System.currentTimeMillis();
RowIterator ri = new RowIterator(scanner);
int tabletsToWaitFor = 0;
int tabletCount = 0;
while (ri.hasNext()) {
Iterator<Entry<Key, Value>> row = ri.next();
long tabletCompactID = -1;
TServerInstance server = null;
Entry<Key, Value> entry = null;
while (row.hasNext()) {
entry = row.next();
Key key = entry.getKey();
if (TabletsSection.ServerColumnFamily.COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
tabletCompactID = Long.parseLong(entry.getValue().toString());
if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily()))
server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
}
if (tabletCompactID < compactId) {
tabletsToWaitFor++;
if (server != null)
serversToFlush.increment(server, 1);
}
tabletCount++;
Text tabletEndRow = new KeyExtent(entry.getKey().getRow(), (Text) null).getEndRow();
if (tabletEndRow == null || (endRow != null && tabletEndRow.compareTo(new Text(endRow)) >= 0))
break;
}
long scanTime = System.currentTimeMillis() - t1;
Instance instance = master.getInstance();
Tables.clearCache(instance);
if (tabletCount == 0 && !Tables.exists(instance, tableId))
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.NOTFOUND, null);
if (serversToFlush.size() == 0 && Tables.getTableState(instance, tableId) == TableState.OFFLINE)
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.OFFLINE, null);
if (tabletsToWaitFor == 0)
return 0;
for (TServerInstance tsi : serversToFlush.keySet()) {
try {
final TServerConnection server = master.getConnection(tsi);
if (server != null)
server.compact(master.getMasterLock(), tableId.canonicalID(), startRow, endRow);
} catch (TException ex) {
LoggerFactory.getLogger(CompactionDriver.class).error(ex.toString());
}
}
long sleepTime = 500;
if (serversToFlush.size() > 0)
// make wait time depend on the server with the most to
sleepTime = Collections.max(serversToFlush.values()) * sleepTime;
// compact
sleepTime = Math.max(2 * scanTime, sleepTime);
sleepTime = Math.min(sleepTime, 30000);
return sleepTime;
}
use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class ImportTable method checkVersions.
public void checkVersions(Master env) throws AcceptableThriftTableOperationException {
Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
Integer exportVersion = null;
Integer dataVersion = null;
try (ZipInputStream zis = new ZipInputStream(env.getFileSystem().open(path))) {
ZipEntry zipEntry;
while ((zipEntry = zis.getNextEntry()) != null) {
if (zipEntry.getName().equals(Constants.EXPORT_INFO_FILE)) {
BufferedReader in = new BufferedReader(new InputStreamReader(zis, UTF_8));
String line = null;
while ((line = in.readLine()) != null) {
String[] sa = line.split(":", 2);
if (sa[0].equals(ExportTable.EXPORT_VERSION_PROP)) {
exportVersion = Integer.parseInt(sa[1]);
} else if (sa[0].equals(ExportTable.DATA_VERSION_PROP)) {
dataVersion = Integer.parseInt(sa[1]);
}
}
break;
}
}
} catch (IOException ioe) {
log.warn("{}", ioe.getMessage(), ioe);
throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Failed to read export metadata " + ioe.getMessage());
}
if (exportVersion == null || exportVersion > ExportTable.VERSION)
throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible export version " + exportVersion);
if (dataVersion == null || dataVersion > ServerConstants.DATA_VERSION)
throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible data version " + exportVersion);
}
Aggregations