Search in sources :

Example 6 with IZooReaderWriter

use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.

the class DatafileManager method bringMinorCompactionOnline.

void bringMinorCompactionOnline(FileRef tmpDatafile, FileRef newDatafile, FileRef absMergeFile, DataFileValue dfv, CommitSession commitSession, long flushId) throws IOException {
    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
    if (tablet.getExtent().isRootTablet()) {
        try {
            if (!zoo.isLockHeld(tablet.getTabletServer().getLock().getLockID())) {
                throw new IllegalStateException();
            }
        } catch (Exception e) {
            throw new IllegalStateException("Can not bring major compaction online, lock not held", e);
        }
    }
    // always exist
    do {
        try {
            if (dfv.getNumEntries() == 0) {
                tablet.getTabletServer().getFileSystem().deleteRecursively(tmpDatafile.path());
            } else {
                if (tablet.getTabletServer().getFileSystem().exists(newDatafile.path())) {
                    log.warn("Target map file already exist {}", newDatafile);
                    tablet.getTabletServer().getFileSystem().deleteRecursively(newDatafile.path());
                }
                rename(tablet.getTabletServer().getFileSystem(), tmpDatafile.path(), newDatafile.path());
            }
            break;
        } catch (IOException ioe) {
            log.warn("Tablet " + tablet.getExtent() + " failed to rename " + newDatafile + " after MinC, will retry in 60 secs...", ioe);
            sleepUninterruptibly(1, TimeUnit.MINUTES);
        }
    } while (true);
    long t1, t2;
    // the code below always assumes merged files are in use by scans... this must be done
    // because the in memory list of files is not updated until after the metadata table
    // therefore the file is available to scans until memory is updated, but want to ensure
    // the file is not available for garbage collection... if memory were updated
    // before this point (like major compactions do), then the following code could wait
    // for scans to finish like major compactions do.... used to wait for scans to finish
    // here, but that was incorrect because a scan could start after waiting but before
    // memory was updated... assuming the file is always in use by scans leads to
    // one uneeded metadata update when it was not actually in use
    Set<FileRef> filesInUseByScans = Collections.emptySet();
    if (absMergeFile != null)
        filesInUseByScans = Collections.singleton(absMergeFile);
    // this metadata write does not go up... it goes sideways or to itself
    if (absMergeFile != null)
        MetadataTableUtil.addDeleteEntries(tablet.getExtent(), Collections.singleton(absMergeFile), tablet.getTabletServer());
    Set<String> unusedWalLogs = tablet.beginClearingUnusedLogs();
    boolean replicate = ReplicationConfigurationUtil.isEnabled(tablet.getExtent(), tablet.getTableConfiguration());
    Set<String> logFileOnly = null;
    if (replicate) {
        // unusedWalLogs is of the form host/fileURI, need to strip off the host portion
        logFileOnly = new HashSet<>();
        for (String unusedWalLog : unusedWalLogs) {
            int index = unusedWalLog.indexOf('/');
            if (-1 == index) {
                log.warn("Could not find host component to strip from DFSLogger representation of WAL");
            } else {
                unusedWalLog = unusedWalLog.substring(index + 1);
            }
            logFileOnly.add(unusedWalLog);
        }
    }
    try {
        // the order of writing to metadata and walog is important in the face of machine/process failures
        // need to write to metadata before writing to walog, when things are done in the reverse order
        // data could be lost... the minor compaction start even should be written before the following metadata
        // write is made
        tablet.updateTabletDataFile(commitSession.getMaxCommittedTime(), newDatafile, absMergeFile, dfv, unusedWalLogs, filesInUseByScans, flushId);
        // tablet is online and thus these WALs are referenced by that tablet. Therefore, the WAL replication status cannot be 'closed'.
        if (replicate) {
            if (log.isDebugEnabled()) {
                log.debug("Recording that data has been ingested into {} using {}", tablet.getExtent(), logFileOnly);
            }
            for (String logFile : logFileOnly) {
                ReplicationTableUtil.updateFiles(tablet.getTabletServer(), tablet.getExtent(), logFile, StatusUtil.openWithUnknownLength());
            }
        }
    } finally {
        tablet.finishClearingUnusedLogs();
    }
    do {
        try {
            // the purpose of making this update use the new commit session, instead of the old one passed in,
            // is because the new one will reference the logs used by current memory...
            tablet.getTabletServer().minorCompactionFinished(tablet.getTabletMemory().getCommitSession(), newDatafile.toString(), commitSession.getWALogSeq() + 2);
            break;
        } catch (IOException e) {
            log.error("Failed to write to write-ahead log " + e.getMessage() + " will retry", e);
            sleepUninterruptibly(1, TimeUnit.SECONDS);
        }
    } while (true);
    synchronized (tablet) {
        t1 = System.currentTimeMillis();
        if (datafileSizes.containsKey(newDatafile)) {
            log.error("Adding file that is already in set {}", newDatafile);
        }
        if (dfv.getNumEntries() > 0) {
            datafileSizes.put(newDatafile, dfv);
        }
        if (absMergeFile != null) {
            datafileSizes.remove(absMergeFile);
        }
        unreserveMergingMinorCompactionFile(absMergeFile);
        tablet.flushComplete(flushId);
        t2 = System.currentTimeMillis();
    }
    // must do this after list of files in memory is updated above
    removeFilesAfterScan(filesInUseByScans);
    if (absMergeFile != null)
        log.debug("TABLET_HIST {} MinC [{},memory] -> {}", tablet.getExtent(), absMergeFile, newDatafile);
    else
        log.debug("TABLET_HIST {} MinC [memory] -> {}", tablet.getExtent(), newDatafile);
    log.debug(String.format("MinC finish lock %.2f secs %s", (t2 - t1) / 1000.0, tablet.getExtent().toString()));
    long splitSize = tablet.getTableConfiguration().getAsBytes(Property.TABLE_SPLIT_THRESHOLD);
    if (dfv.getSize() > splitSize) {
        log.debug(String.format("Minor Compaction wrote out file larger than split threshold.  split threshold = %,d  file size = %,d", splitSize, dfv.getSize()));
    }
}
Also used : FileRef(org.apache.accumulo.server.fs.FileRef) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) IOException(java.io.IOException) IOException(java.io.IOException)

Example 7 with IZooReaderWriter

use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.

the class ProblemReports method iterator.

public Iterator<ProblemReport> iterator(final Table.ID table) {
    try {
        return new Iterator<ProblemReport>() {

            IZooReaderWriter zoo = ZooReaderWriter.getInstance();

            private int iter1Count = 0;

            private Iterator<String> iter1;

            private Iterator<String> getIter1() {
                if (iter1 == null) {
                    try {
                        List<String> children;
                        if (table == null || isMeta(table)) {
                            children = zoo.getChildren(ZooUtil.getRoot(context.getInstance()) + Constants.ZPROBLEMS);
                        } else {
                            children = Collections.emptyList();
                        }
                        iter1 = children.iterator();
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                }
                return iter1;
            }

            private Iterator<Entry<Key, Value>> iter2;

            private Iterator<Entry<Key, Value>> getIter2() {
                if (iter2 == null) {
                    try {
                        if ((table == null || !isMeta(table)) && iter1Count == 0) {
                            Connector connector = context.getConnector();
                            Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
                            scanner.setTimeout(3, TimeUnit.SECONDS);
                            if (table == null) {
                                scanner.setRange(new Range(new Text("~err_"), false, new Text("~err`"), false));
                            } else {
                                scanner.setRange(new Range(new Text("~err_" + table)));
                            }
                            iter2 = scanner.iterator();
                        } else {
                            Map<Key, Value> m = Collections.emptyMap();
                            iter2 = m.entrySet().iterator();
                        }
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                }
                return iter2;
            }

            @Override
            public boolean hasNext() {
                if (getIter1().hasNext()) {
                    return true;
                }
                if (getIter2().hasNext()) {
                    return true;
                }
                return false;
            }

            @Override
            public ProblemReport next() {
                try {
                    if (getIter1().hasNext()) {
                        iter1Count++;
                        return ProblemReport.decodeZooKeeperEntry(getIter1().next());
                    }
                    if (getIter2().hasNext()) {
                        return ProblemReport.decodeMetadataEntry(getIter2().next());
                    }
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }
                throw new NoSuchElementException();
            }

            @Override
            public void remove() {
                throw new UnsupportedOperationException();
            }
        };
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) NoSuchElementException(java.util.NoSuchElementException) Entry(java.util.Map.Entry) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) Iterator(java.util.Iterator) SortedKeyIterator(org.apache.accumulo.core.iterators.SortedKeyIterator) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) NoSuchElementException(java.util.NoSuchElementException)

Example 8 with IZooReaderWriter

use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.

the class FateCommand method execute.

@Override
public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws ParseException, KeeperException, InterruptedException, IOException {
    Instance instance = shellState.getInstance();
    String[] args = cl.getArgs();
    if (args.length <= 0) {
        throw new ParseException("Must provide a command to execute");
    }
    String cmd = args[0];
    boolean failedCommand = false;
    AdminUtil<FateCommand> admin = new AdminUtil<>(false);
    String path = ZooUtil.getRoot(instance) + Constants.ZFATE;
    String masterPath = ZooUtil.getRoot(instance) + Constants.ZMASTER_LOCK;
    IZooReaderWriter zk = getZooReaderWriter(shellState.getInstance(), cl.getOptionValue(secretOption.getOpt()));
    ZooStore<FateCommand> zs = new ZooStore<>(path, zk);
    if ("fail".equals(cmd)) {
        if (args.length <= 1) {
            throw new ParseException("Must provide transaction ID");
        }
        for (int i = 1; i < args.length; i++) {
            if (!admin.prepFail(zs, zk, masterPath, args[i])) {
                System.out.printf("Could not fail transaction: %s%n", args[i]);
                failedCommand = true;
            }
        }
    } else if ("delete".equals(cmd)) {
        if (args.length <= 1) {
            throw new ParseException("Must provide transaction ID");
        }
        for (int i = 1; i < args.length; i++) {
            if (admin.prepDelete(zs, zk, masterPath, args[i])) {
                admin.deleteLocks(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS, args[i]);
            } else {
                System.out.printf("Could not delete transaction: %s%n", args[i]);
                failedCommand = true;
            }
        }
    } else if ("list".equals(cmd) || "print".equals(cmd)) {
        // Parse transaction ID filters for print display
        Set<Long> filterTxid = null;
        if (args.length >= 2) {
            filterTxid = new HashSet<>(args.length);
            for (int i = 1; i < args.length; i++) {
                try {
                    Long val = Long.parseLong(args[i], 16);
                    filterTxid.add(val);
                } catch (NumberFormatException nfe) {
                    // Failed to parse, will exit instead of displaying everything since the intention was to potentially filter some data
                    System.out.printf("Invalid transaction ID format: %s%n", args[i]);
                    return 1;
                }
            }
        }
        // Parse TStatus filters for print display
        EnumSet<TStatus> filterStatus = null;
        if (cl.hasOption(statusOption.getOpt())) {
            filterStatus = EnumSet.noneOf(TStatus.class);
            String[] tstat = cl.getOptionValues(statusOption.getOpt());
            for (int i = 0; i < tstat.length; i++) {
                try {
                    filterStatus.add(TStatus.valueOf(tstat[i]));
                } catch (IllegalArgumentException iae) {
                    System.out.printf("Invalid transaction status name: %s%n", tstat[i]);
                    return 1;
                }
            }
        }
        StringBuilder buf = new StringBuilder(8096);
        Formatter fmt = new Formatter(buf);
        admin.print(zs, zk, ZooUtil.getRoot(instance) + Constants.ZTABLE_LOCKS, fmt, filterTxid, filterStatus);
        shellState.printLines(Collections.singletonList(buf.toString()).iterator(), !cl.hasOption(disablePaginationOpt.getOpt()));
    } else if ("dump".equals(cmd)) {
        List<Long> txids;
        if (args.length == 1) {
            txids = zs.list();
        } else {
            txids = new ArrayList<>();
            for (int i = 1; i < args.length; i++) {
                txids.add(Long.parseLong(args[i], 16));
            }
        }
        Gson gson = new GsonBuilder().registerTypeAdapter(ReadOnlyRepo.class, new InterfaceSerializer<>()).registerTypeAdapter(Repo.class, new InterfaceSerializer<>()).registerTypeAdapter(byte[].class, new ByteArraySerializer()).setPrettyPrinting().create();
        List<FateStack> txStacks = new ArrayList<>();
        for (Long txid : txids) {
            List<ReadOnlyRepo<FateCommand>> repoStack = zs.getStack(txid);
            txStacks.add(new FateStack(txid, repoStack));
        }
        System.out.println(gson.toJson(txStacks));
    } else {
        throw new ParseException("Invalid command option");
    }
    return failedCommand ? 1 : 0;
}
Also used : Instance(org.apache.accumulo.core.client.Instance) Formatter(java.util.Formatter) ArrayList(java.util.ArrayList) Gson(com.google.gson.Gson) AdminUtil(org.apache.accumulo.fate.AdminUtil) TStatus(org.apache.accumulo.fate.ReadOnlyTStore.TStatus) ArrayList(java.util.ArrayList) List(java.util.List) ReadOnlyRepo(org.apache.accumulo.fate.ReadOnlyRepo) GsonBuilder(com.google.gson.GsonBuilder) ZooStore(org.apache.accumulo.fate.ZooStore) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) ParseException(org.apache.commons.cli.ParseException)

Example 9 with IZooReaderWriter

use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.

the class FateCommand method getZooReaderWriter.

protected synchronized IZooReaderWriter getZooReaderWriter(Instance instance, String secret) {
    if (secret == null) {
        AccumuloConfiguration conf = SiteConfiguration.getInstance();
        secret = conf.get(Property.INSTANCE_SECRET);
    }
    return new ZooReaderWriter(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut(), SCHEME, (USER + ":" + secret).getBytes());
}
Also used : ZooReaderWriter(org.apache.accumulo.fate.zookeeper.ZooReaderWriter) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration)

Example 10 with IZooReaderWriter

use of org.apache.accumulo.fate.zookeeper.IZooReaderWriter in project accumulo by apache.

the class RenameNamespace method call.

@Override
public Repo<Master> call(long id, Master master) throws Exception {
    Instance instance = master.getInstance();
    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
    Utils.tableNameLock.lock();
    try {
        Utils.checkNamespaceDoesNotExist(instance, newName, namespaceId, TableOperation.RENAME);
        final String tap = ZooUtil.getRoot(instance) + Constants.ZNAMESPACES + "/" + namespaceId + Constants.ZNAMESPACE_NAME;
        zoo.mutate(tap, null, null, new Mutator() {

            @Override
            public byte[] mutate(byte[] current) throws Exception {
                final String currentName = new String(current);
                if (currentName.equals(newName))
                    // assume in this case the operation is running again, so we are done
                    return null;
                if (!currentName.equals(oldName)) {
                    throw new AcceptableThriftTableOperationException(null, oldName, TableOperation.RENAME, TableOperationExceptionType.NAMESPACE_NOTFOUND, "Name changed while processing");
                }
                return newName.getBytes();
            }
        });
        Tables.clearCache(instance);
    } finally {
        Utils.tableNameLock.unlock();
        Utils.unreserveNamespace(namespaceId, id, true);
    }
    LoggerFactory.getLogger(RenameNamespace.class).debug("Renamed namespace {} {} {}", namespaceId, oldName, newName);
    return null;
}
Also used : Instance(org.apache.accumulo.core.client.Instance) Mutator(org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter) AcceptableThriftTableOperationException(org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException) AcceptableThriftTableOperationException(org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException)

Aggregations

IZooReaderWriter (org.apache.accumulo.fate.zookeeper.IZooReaderWriter)57 KeeperException (org.apache.zookeeper.KeeperException)25 IOException (java.io.IOException)13 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)11 Instance (org.apache.accumulo.core.client.Instance)11 AcceptableThriftTableOperationException (org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException)8 Mutator (org.apache.accumulo.fate.zookeeper.IZooReaderWriter.Mutator)6 HdfsZooInstance (org.apache.accumulo.server.client.HdfsZooInstance)6 AccumuloException (org.apache.accumulo.core.client.AccumuloException)5 TException (org.apache.thrift.TException)5 NoNodeException (org.apache.zookeeper.KeeperException.NoNodeException)5 ArrayList (java.util.ArrayList)4 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)4 ZooReaderWriterFactory (org.apache.accumulo.server.zookeeper.ZooReaderWriterFactory)4 File (java.io.File)3 Entry (java.util.Map.Entry)3 Connector (org.apache.accumulo.core.client.Connector)3 Scanner (org.apache.accumulo.core.client.Scanner)3 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)3 Key (org.apache.accumulo.core.data.Key)3