Search in sources :

Example 61 with TreeMap

use of java.util.TreeMap in project hadoop by apache.

the class FlowRunCoprocessor method prePut.

/*
   * (non-Javadoc)
   *
   * This method adds the tags onto the cells in the Put. It is presumed that
   * all the cells in one Put have the same set of Tags. The existing cell
   * timestamp is overwritten for non-metric cells and each such cell gets a new
   * unique timestamp generated by {@link TimestampGenerator}
   *
   * @see
   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#prePut(org.apache
   * .hadoop.hbase.coprocessor.ObserverContext,
   * org.apache.hadoop.hbase.client.Put,
   * org.apache.hadoop.hbase.regionserver.wal.WALEdit,
   * org.apache.hadoop.hbase.client.Durability)
   */
@Override
public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put, WALEdit edit, Durability durability) throws IOException {
    Map<String, byte[]> attributes = put.getAttributesMap();
    if (!isFlowRunRegion) {
        return;
    }
    // Assumption is that all the cells in a put are the same operation.
    List<Tag> tags = new ArrayList<>();
    if ((attributes != null) && (attributes.size() > 0)) {
        for (Map.Entry<String, byte[]> attribute : attributes.entrySet()) {
            Tag t = HBaseTimelineStorageUtils.getTagFromAttribute(attribute);
            tags.add(t);
        }
        byte[] tagByteArray = Tag.fromList(tags);
        NavigableMap<byte[], List<Cell>> newFamilyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        for (Map.Entry<byte[], List<Cell>> entry : put.getFamilyCellMap().entrySet()) {
            List<Cell> newCells = new ArrayList<>(entry.getValue().size());
            for (Cell cell : entry.getValue()) {
                // for each cell in the put add the tags
                // Assumption is that all the cells in
                // one put are the same operation
                // also, get a unique cell timestamp for non-metric cells
                // this way we don't inadvertently overwrite cell versions
                long cellTimestamp = getCellTimestamp(cell.getTimestamp(), tags);
                newCells.add(CellUtil.createCell(CellUtil.cloneRow(cell), CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cellTimestamp, KeyValue.Type.Put, CellUtil.cloneValue(cell), tagByteArray));
            }
            newFamilyMap.put(entry.getKey(), newCells);
        }
        // for each entry
        // Update the family map for the Put
        put.setFamilyCellMap(newFamilyMap);
    }
}
Also used : ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) ArrayList(java.util.ArrayList) List(java.util.List) Tag(org.apache.hadoop.hbase.Tag) Map(java.util.Map) NavigableMap(java.util.NavigableMap) TreeMap(java.util.TreeMap) Cell(org.apache.hadoop.hbase.Cell)

Example 62 with TreeMap

use of java.util.TreeMap in project hadoop by apache.

the class FileSystemTimelineReaderImpl method getEntities.

private Set<TimelineEntity> getEntities(File dir, String entityType, TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve) throws IOException {
    // First sort the selected entities based on created/start time.
    Map<Long, Set<TimelineEntity>> sortedEntities = new TreeMap<>(new Comparator<Long>() {

        @Override
        public int compare(Long l1, Long l2) {
            return l2.compareTo(l1);
        }
    });
    for (File entityFile : dir.listFiles()) {
        if (!entityFile.getName().contains(TIMELINE_SERVICE_STORAGE_EXTENSION)) {
            continue;
        }
        try (BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(entityFile), Charset.forName("UTF-8")))) {
            TimelineEntity entity = readEntityFromFile(reader);
            if (!entity.getType().equals(entityType)) {
                continue;
            }
            if (!isTimeInRange(entity.getCreatedTime(), filters.getCreatedTimeBegin(), filters.getCreatedTimeEnd())) {
                continue;
            }
            if (filters.getRelatesTo() != null && !filters.getRelatesTo().getFilterList().isEmpty() && !TimelineStorageUtils.matchRelatesTo(entity, filters.getRelatesTo())) {
                continue;
            }
            if (filters.getIsRelatedTo() != null && !filters.getIsRelatedTo().getFilterList().isEmpty() && !TimelineStorageUtils.matchIsRelatedTo(entity, filters.getIsRelatedTo())) {
                continue;
            }
            if (filters.getInfoFilters() != null && !filters.getInfoFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchInfoFilters(entity, filters.getInfoFilters())) {
                continue;
            }
            if (filters.getConfigFilters() != null && !filters.getConfigFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchConfigFilters(entity, filters.getConfigFilters())) {
                continue;
            }
            if (filters.getMetricFilters() != null && !filters.getMetricFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchMetricFilters(entity, filters.getMetricFilters())) {
                continue;
            }
            if (filters.getEventFilters() != null && !filters.getEventFilters().getFilterList().isEmpty() && !TimelineStorageUtils.matchEventFilters(entity, filters.getEventFilters())) {
                continue;
            }
            TimelineEntity entityToBeReturned = createEntityToBeReturned(entity, dataToRetrieve.getFieldsToRetrieve());
            Set<TimelineEntity> entitiesCreatedAtSameTime = sortedEntities.get(entityToBeReturned.getCreatedTime());
            if (entitiesCreatedAtSameTime == null) {
                entitiesCreatedAtSameTime = new HashSet<TimelineEntity>();
            }
            entitiesCreatedAtSameTime.add(entityToBeReturned);
            sortedEntities.put(entityToBeReturned.getCreatedTime(), entitiesCreatedAtSameTime);
        }
    }
    Set<TimelineEntity> entities = new HashSet<TimelineEntity>();
    long entitiesAdded = 0;
    for (Set<TimelineEntity> entitySet : sortedEntities.values()) {
        for (TimelineEntity entity : entitySet) {
            entities.add(entity);
            ++entitiesAdded;
            if (entitiesAdded >= filters.getLimit()) {
                return entities;
            }
        }
    }
    return entities;
}
Also used : HashSet(java.util.HashSet) EnumSet(java.util.EnumSet) Set(java.util.Set) InputStreamReader(java.io.InputStreamReader) TreeMap(java.util.TreeMap) TimelineEntity(org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity) FileInputStream(java.io.FileInputStream) BufferedReader(java.io.BufferedReader) File(java.io.File) HashSet(java.util.HashSet)

Example 63 with TreeMap

use of java.util.TreeMap in project hbase by apache.

the class GeneratedMessage method getAllFieldsMutable.

/**
   * Internal helper to return a modifiable map containing all the fields.
   * The returned Map is modifialbe so that the caller can add additional
   * extension fields to implement {@link #getAllFields()}.
   *
   * @param getBytesForString whether to generate ByteString for string fields
   */
private Map<FieldDescriptor, Object> getAllFieldsMutable(boolean getBytesForString) {
    final TreeMap<FieldDescriptor, Object> result = new TreeMap<FieldDescriptor, Object>();
    final Descriptor descriptor = internalGetFieldAccessorTable().descriptor;
    final List<FieldDescriptor> fields = descriptor.getFields();
    for (int i = 0; i < fields.size(); i++) {
        FieldDescriptor field = fields.get(i);
        final OneofDescriptor oneofDescriptor = field.getContainingOneof();
        /*
       * If the field is part of a Oneof, then at maximum one field in the Oneof is set
       * and it is not repeated. There is no need to iterate through the others.
       */
        if (oneofDescriptor != null) {
            // Skip other fields in the Oneof we know are not set
            i += oneofDescriptor.getFieldCount() - 1;
            if (!hasOneof(oneofDescriptor)) {
                // If no field is set in the Oneof, skip all the fields in the Oneof
                continue;
            }
            // Get the pointer to the only field which is set in the Oneof
            field = getOneofFieldDescriptor(oneofDescriptor);
        } else {
            // If we are not in a Oneof, we need to check if the field is set and if it is repeated
            if (field.isRepeated()) {
                final List<?> value = (List<?>) getField(field);
                if (!value.isEmpty()) {
                    result.put(field, value);
                }
                continue;
            }
            if (!hasField(field)) {
                continue;
            }
        }
        // Add the field to the map
        if (getBytesForString && field.getJavaType() == FieldDescriptor.JavaType.STRING) {
            result.put(field, getFieldRaw(field));
        } else {
            result.put(field, getField(field));
        }
    }
    return result;
}
Also used : OneofDescriptor(org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor) Descriptor(org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor) FileDescriptor(org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor) EnumValueDescriptor(org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor) EnumDescriptor(org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor) FieldDescriptor(org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor) ArrayList(java.util.ArrayList) List(java.util.List) TreeMap(java.util.TreeMap) OneofDescriptor(org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor) FieldDescriptor(org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor)

Example 64 with TreeMap

use of java.util.TreeMap in project hbase by apache.

the class HRegion method bulkLoadHFiles.

@Override
public Map<byte[], List<Path>> bulkLoadHFiles(Collection<Pair<byte[], String>> familyPaths, boolean assignSeqId, BulkLoadListener bulkLoadListener, boolean copyFile) throws IOException {
    long seqId = -1;
    Map<byte[], List<Path>> storeFiles = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    Map<String, Long> storeFilesSizes = new HashMap<>();
    Preconditions.checkNotNull(familyPaths);
    // we need writeLock for multi-family bulk load
    startBulkRegionOperation(hasMultipleColumnFamilies(familyPaths));
    boolean isSuccessful = false;
    try {
        this.writeRequestsCount.increment();
        // There possibly was a split that happened between when the split keys
        // were gathered and before the HRegion's write lock was taken.  We need
        // to validate the HFile region before attempting to bulk load all of them
        List<IOException> ioes = new ArrayList<>();
        List<Pair<byte[], String>> failures = new ArrayList<>();
        for (Pair<byte[], String> p : familyPaths) {
            byte[] familyName = p.getFirst();
            String path = p.getSecond();
            HStore store = getHStore(familyName);
            if (store == null) {
                IOException ioe = new org.apache.hadoop.hbase.DoNotRetryIOException("No such column family " + Bytes.toStringBinary(familyName));
                ioes.add(ioe);
            } else {
                try {
                    store.assertBulkLoadHFileOk(new Path(path));
                } catch (WrongRegionException wre) {
                    // recoverable (file doesn't fit in region)
                    failures.add(p);
                } catch (IOException ioe) {
                    // unrecoverable (hdfs problem)
                    ioes.add(ioe);
                }
            }
        }
        // validation failed because of some sort of IO problem.
        if (ioes.size() != 0) {
            IOException e = MultipleIOException.createIOException(ioes);
            LOG.error("There were one or more IO errors when checking if the bulk load is ok.", e);
            throw e;
        }
        // validation failed, bail out before doing anything permanent.
        if (failures.size() != 0) {
            StringBuilder list = new StringBuilder();
            for (Pair<byte[], String> p : failures) {
                list.append("\n").append(Bytes.toString(p.getFirst())).append(" : ").append(p.getSecond());
            }
            // problem when validating
            LOG.warn("There was a recoverable bulk load failure likely due to a" + " split.  These (family, HFile) pairs were not loaded: " + list);
            return null;
        }
        // a sequence id that we can be sure is beyond the last hfile written).
        if (assignSeqId) {
            FlushResult fs = flushcache(true, false);
            if (fs.isFlushSucceeded()) {
                seqId = ((FlushResultImpl) fs).flushSequenceId;
            } else if (fs.getResult() == FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY) {
                seqId = ((FlushResultImpl) fs).flushSequenceId;
            } else {
                throw new IOException("Could not bulk load with an assigned sequential ID because the " + "flush didn't run. Reason for not flushing: " + ((FlushResultImpl) fs).failureReason);
            }
        }
        Map<byte[], List<Pair<Path, Path>>> familyWithFinalPath = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        for (Pair<byte[], String> p : familyPaths) {
            byte[] familyName = p.getFirst();
            String path = p.getSecond();
            HStore store = getHStore(familyName);
            if (!familyWithFinalPath.containsKey(familyName)) {
                familyWithFinalPath.put(familyName, new ArrayList<>());
            }
            List<Pair<Path, Path>> lst = familyWithFinalPath.get(familyName);
            try {
                String finalPath = path;
                if (bulkLoadListener != null) {
                    finalPath = bulkLoadListener.prepareBulkLoad(familyName, path, copyFile);
                }
                Pair<Path, Path> pair = store.preBulkLoadHFile(finalPath, seqId);
                lst.add(pair);
            } catch (IOException ioe) {
                // A failure here can cause an atomicity violation that we currently
                // cannot recover from since it is likely a failed HDFS operation.
                LOG.error("There was a partial failure due to IO when attempting to" + " load " + Bytes.toString(p.getFirst()) + " : " + p.getSecond(), ioe);
                if (bulkLoadListener != null) {
                    try {
                        bulkLoadListener.failedBulkLoad(familyName, path);
                    } catch (Exception ex) {
                        LOG.error("Error while calling failedBulkLoad for family " + Bytes.toString(familyName) + " with path " + path, ex);
                    }
                }
                throw ioe;
            }
        }
        if (this.getCoprocessorHost() != null) {
            for (Map.Entry<byte[], List<Pair<Path, Path>>> entry : familyWithFinalPath.entrySet()) {
                this.getCoprocessorHost().preCommitStoreFile(entry.getKey(), entry.getValue());
            }
        }
        for (Map.Entry<byte[], List<Pair<Path, Path>>> entry : familyWithFinalPath.entrySet()) {
            byte[] familyName = entry.getKey();
            for (Pair<Path, Path> p : entry.getValue()) {
                String path = p.getFirst().toString();
                Path commitedStoreFile = p.getSecond();
                HStore store = getHStore(familyName);
                try {
                    store.bulkLoadHFile(familyName, path, commitedStoreFile);
                    // Note the size of the store file
                    try {
                        FileSystem fs = commitedStoreFile.getFileSystem(baseConf);
                        storeFilesSizes.put(commitedStoreFile.getName(), fs.getFileStatus(commitedStoreFile).getLen());
                    } catch (IOException e) {
                        LOG.warn("Failed to find the size of hfile " + commitedStoreFile);
                        storeFilesSizes.put(commitedStoreFile.getName(), 0L);
                    }
                    if (storeFiles.containsKey(familyName)) {
                        storeFiles.get(familyName).add(commitedStoreFile);
                    } else {
                        List<Path> storeFileNames = new ArrayList<>();
                        storeFileNames.add(commitedStoreFile);
                        storeFiles.put(familyName, storeFileNames);
                    }
                    if (bulkLoadListener != null) {
                        bulkLoadListener.doneBulkLoad(familyName, path);
                    }
                } catch (IOException ioe) {
                    // A failure here can cause an atomicity violation that we currently
                    // cannot recover from since it is likely a failed HDFS operation.
                    // TODO Need a better story for reverting partial failures due to HDFS.
                    LOG.error("There was a partial failure due to IO when attempting to" + " load " + Bytes.toString(familyName) + " : " + p.getSecond(), ioe);
                    if (bulkLoadListener != null) {
                        try {
                            bulkLoadListener.failedBulkLoad(familyName, path);
                        } catch (Exception ex) {
                            LOG.error("Error while calling failedBulkLoad for family " + Bytes.toString(familyName) + " with path " + path, ex);
                        }
                    }
                    throw ioe;
                }
            }
        }
        isSuccessful = true;
    } finally {
        if (wal != null && !storeFiles.isEmpty()) {
            // Write a bulk load event for hfiles that are loaded
            try {
                WALProtos.BulkLoadDescriptor loadDescriptor = ProtobufUtil.toBulkLoadDescriptor(this.getRegionInfo().getTable(), UnsafeByteOperations.unsafeWrap(this.getRegionInfo().getEncodedNameAsBytes()), storeFiles, storeFilesSizes, seqId);
                WALUtil.writeBulkLoadMarkerAndSync(this.wal, this.getReplicationScope(), getRegionInfo(), loadDescriptor, mvcc);
            } catch (IOException ioe) {
                if (this.rsServices != null) {
                    // Have to abort region server because some hfiles has been loaded but we can't write
                    // the event into WAL
                    isSuccessful = false;
                    this.rsServices.abort("Failed to write bulk load event into WAL.", ioe);
                }
            }
        }
        closeBulkRegionOperation();
    }
    return isSuccessful ? storeFiles : null;
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ArrayList(java.util.ArrayList) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) AbstractList(java.util.AbstractList) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) TimeoutIOException(org.apache.hadoop.hbase.exceptions.TimeoutIOException) WALProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos) TreeMap(java.util.TreeMap) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) IncompatibleFilterException(org.apache.hadoop.hbase.filter.IncompatibleFilterException) RegionInRecoveryException(org.apache.hadoop.hbase.exceptions.RegionInRecoveryException) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) InterruptedIOException(java.io.InterruptedIOException) CallerDisconnectedException(org.apache.hadoop.hbase.ipc.CallerDisconnectedException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) TimeoutException(java.util.concurrent.TimeoutException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) ParseException(java.text.ParseException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) EOFException(java.io.EOFException) FileNotFoundException(java.io.FileNotFoundException) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) TimeoutIOException(org.apache.hadoop.hbase.exceptions.TimeoutIOException) UnknownProtocolException(org.apache.hadoop.hbase.exceptions.UnknownProtocolException) AtomicLong(java.util.concurrent.atomic.AtomicLong) Map(java.util.Map) TreeMap(java.util.TreeMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) NavigableMap(java.util.NavigableMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap)

Example 65 with TreeMap

use of java.util.TreeMap in project hbase by apache.

the class HRegion method internalPrepareFlushCache.

@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DLS_DEAD_LOCAL_STORE", justification = "FindBugs seems confused about trxId")
protected PrepareFlushResult internalPrepareFlushCache(final WAL wal, final long myseqid, final Collection<Store> storesToFlush, MonitoredTask status, boolean writeFlushWalMarker) throws IOException {
    if (this.rsServices != null && this.rsServices.isAborted()) {
        // Don't flush when server aborting, it's unsafe
        throw new IOException("Aborting flush because server is aborted...");
    }
    final long startTime = EnvironmentEdgeManager.currentTime();
    // to go get one.
    if (this.memstoreDataSize.get() <= 0) {
        // Take an update lock so no edits can come into memory just yet.
        this.updatesLock.writeLock().lock();
        WriteEntry writeEntry = null;
        try {
            if (this.memstoreDataSize.get() <= 0) {
                // Presume that if there are still no edits in the memstore, then there are no edits for
                // this region out in the WAL subsystem so no need to do any trickery clearing out
                // edits in the WAL sub-system. Up the sequence number so the resulting flush id is for
                // sure just beyond the last appended region edit and not associated with any edit
                // (useful as marker when bulk loading, etc.).
                FlushResult flushResult = null;
                if (wal != null) {
                    writeEntry = mvcc.begin();
                    long flushOpSeqId = writeEntry.getWriteNumber();
                    flushResult = new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, flushOpSeqId, "Nothing to flush", writeFlushRequestMarkerToWAL(wal, writeFlushWalMarker));
                    mvcc.completeAndWait(writeEntry);
                    // Set to null so we don't complete it again down in finally block.
                    writeEntry = null;
                    return new PrepareFlushResult(flushResult, myseqid);
                } else {
                    return new PrepareFlushResult(new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, "Nothing to flush", false), myseqid);
                }
            }
        } finally {
            if (writeEntry != null) {
                // If writeEntry is non-null, this operation failed; the mvcc transaction failed...
                // but complete it anyways so it doesn't block the mvcc queue.
                mvcc.complete(writeEntry);
            }
            this.updatesLock.writeLock().unlock();
        }
    }
    logFatLineOnFlush(storesToFlush, myseqid);
    // Stop updates while we snapshot the memstore of all of these regions' stores. We only have
    // to do this for a moment.  It is quick. We also set the memstore size to zero here before we
    // allow updates again so its value will represent the size of the updates received
    // during flush
    // We have to take an update lock during snapshot, or else a write could end up in both snapshot
    // and memstore (makes it difficult to do atomic rows then)
    status.setStatus("Obtaining lock to block concurrent updates");
    // block waiting for the lock for internal flush
    this.updatesLock.writeLock().lock();
    status.setStatus("Preparing flush snapshotting stores in " + getRegionInfo().getEncodedName());
    MemstoreSize totalSizeOfFlushableStores = new MemstoreSize();
    Map<byte[], Long> flushedFamilyNamesToSeq = new HashMap<>();
    for (Store store : storesToFlush) {
        flushedFamilyNamesToSeq.put(store.getFamily().getName(), ((HStore) store).preFlushSeqIDEstimation());
    }
    TreeMap<byte[], StoreFlushContext> storeFlushCtxs = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    TreeMap<byte[], List<Path>> committedFiles = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    TreeMap<byte[], MemstoreSize> storeFlushableSize = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    // The sequence id of this flush operation which is used to log FlushMarker and pass to
    // createFlushContext to use as the store file's sequence id. It can be in advance of edits
    // still in the memstore, edits that are in other column families yet to be flushed.
    long flushOpSeqId = HConstants.NO_SEQNUM;
    // The max flushed sequence id after this flush operation completes. All edits in memstore
    // will be in advance of this sequence id.
    long flushedSeqId = HConstants.NO_SEQNUM;
    byte[] encodedRegionName = getRegionInfo().getEncodedNameAsBytes();
    try {
        if (wal != null) {
            Long earliestUnflushedSequenceIdForTheRegion = wal.startCacheFlush(encodedRegionName, flushedFamilyNamesToSeq);
            if (earliestUnflushedSequenceIdForTheRegion == null) {
                // This should never happen. This is how startCacheFlush signals flush cannot proceed.
                String msg = this.getRegionInfo().getEncodedName() + " flush aborted; WAL closing.";
                status.setStatus(msg);
                return new PrepareFlushResult(new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false), myseqid);
            }
            flushOpSeqId = getNextSequenceId(wal);
            // Back up 1, minus 1 from oldest sequence id in memstore to get last 'flushed' edit
            flushedSeqId = earliestUnflushedSequenceIdForTheRegion.longValue() == HConstants.NO_SEQNUM ? flushOpSeqId : earliestUnflushedSequenceIdForTheRegion.longValue() - 1;
        } else {
            // use the provided sequence Id as WAL is not being used for this flush.
            flushedSeqId = flushOpSeqId = myseqid;
        }
        for (Store s : storesToFlush) {
            MemstoreSize flushableSize = s.getSizeToFlush();
            totalSizeOfFlushableStores.incMemstoreSize(flushableSize);
            storeFlushCtxs.put(s.getFamily().getName(), s.createFlushContext(flushOpSeqId));
            // for writing stores to WAL
            committedFiles.put(s.getFamily().getName(), null);
            storeFlushableSize.put(s.getFamily().getName(), flushableSize);
        }
        // write the snapshot start to WAL
        if (wal != null && !writestate.readOnly) {
            FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, getRegionInfo(), flushOpSeqId, committedFiles);
            // No sync. Sync is below where no updates lock and we do FlushAction.COMMIT_FLUSH
            WALUtil.writeFlushMarker(wal, this.getReplicationScope(), getRegionInfo(), desc, false, mvcc);
        }
        // Prepare flush (take a snapshot)
        for (StoreFlushContext flush : storeFlushCtxs.values()) {
            flush.prepare();
        }
    } catch (IOException ex) {
        doAbortFlushToWAL(wal, flushOpSeqId, committedFiles);
        throw ex;
    } finally {
        this.updatesLock.writeLock().unlock();
    }
    String s = "Finished memstore snapshotting " + this + ", syncing WAL and waiting on mvcc, " + "flushsize=" + totalSizeOfFlushableStores;
    status.setStatus(s);
    doSyncOfUnflushedWALChanges(wal, getRegionInfo());
    return new PrepareFlushResult(storeFlushCtxs, committedFiles, storeFlushableSize, startTime, flushOpSeqId, flushedSeqId, totalSizeOfFlushableStores);
}
Also used : WriteEntry(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) TimeoutIOException(org.apache.hadoop.hbase.exceptions.TimeoutIOException) TreeMap(java.util.TreeMap) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArrayList(java.util.ArrayList) AbstractList(java.util.AbstractList) List(java.util.List)

Aggregations

TreeMap (java.util.TreeMap)4400 Map (java.util.Map)1245 ArrayList (java.util.ArrayList)930 HashMap (java.util.HashMap)871 Test (org.junit.Test)614 List (java.util.List)542 Before (org.junit.Before)505 IOException (java.io.IOException)402 HashSet (java.util.HashSet)301 Set (java.util.Set)268 File (java.io.File)267 SortedMap (java.util.SortedMap)240 TreeSet (java.util.TreeSet)213 LinkedHashMap (java.util.LinkedHashMap)196 Key (org.apache.accumulo.core.data.Key)156 Value (org.apache.accumulo.core.data.Value)156 Iterator (java.util.Iterator)149 NavigableMap (java.util.NavigableMap)124 Collection (java.util.Collection)115 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)111