Search in sources :

Example 26 with GemFireIOException

use of org.apache.geode.GemFireIOException in project geode by apache.

the class AbstractRegionMap method clear.

/**
   * Clear the region and, if an RVV is given, return a collection of the version sources in all
   * remaining tags
   */
public Set<VersionSource> clear(RegionVersionVector rvv) {
    Set<VersionSource> result = new HashSet<VersionSource>();
    if (!_isOwnerALocalRegion()) {
        // Fix for #41333. Just clear the the map
        // if we failed during initialization.
        _mapClear();
        return null;
    }
    if (logger.isDebugEnabled()) {
        logger.debug("Clearing entries for {} rvv={}", _getOwner(), " rvv=" + rvv);
    }
    LocalRegion lr = _getOwner();
    RegionVersionVector localRvv = lr.getVersionVector();
    incClearCount(lr);
    // lock for size calcs if the region might have tombstones
    Object lockObj = lr.getConcurrencyChecksEnabled() ? lr.getSizeGuard() : new Object();
    synchronized (lockObj) {
        if (rvv == null) {
            int delta = 0;
            try {
                // TODO soplog need to determine if stats should
                delta = sizeInVM();
            // reflect only size in memory or the complete thing
            } catch (GemFireIOException e) {
            // ignore rather than throwing an exception during cache close
            }
            int tombstones = lr.getTombstoneCount();
            _mapClear();
            _getOwner().updateSizeOnClearRegion(delta - tombstones);
            _getOwner().incTombstoneCount(-tombstones);
            if (delta != 0) {
                incEntryCount(-delta);
            }
        } else {
            int delta = 0;
            int tombstones = 0;
            VersionSource myId = _getOwner().getVersionMember();
            if (localRvv != rvv) {
                localRvv.recordGCVersions(rvv);
            }
            final boolean isTraceEnabled = logger.isTraceEnabled();
            for (RegionEntry re : regionEntries()) {
                synchronized (re) {
                    Token value = re.getValueAsToken();
                    // if it's already being removed or the entry is being created we leave it alone
                    if (value == Token.REMOVED_PHASE1 || value == Token.REMOVED_PHASE2) {
                        continue;
                    }
                    VersionSource id = re.getVersionStamp().getMemberID();
                    if (id == null) {
                        id = myId;
                    }
                    if (rvv.contains(id, re.getVersionStamp().getRegionVersion())) {
                        if (isTraceEnabled) {
                            logger.trace("region clear op is removing {} {}", re.getKey(), re.getVersionStamp());
                        }
                        boolean tombstone = re.isTombstone();
                        // note: it.remove() did not reliably remove the entry so we use remove(K,V) here
                        if (_getMap().remove(re.getKey(), re)) {
                            if (OffHeapRegionEntryHelper.doesClearNeedToCheckForOffHeap()) {
                                // OFFHEAP _getValue ok
                                GatewaySenderEventImpl.release(re._getValue());
                            }
                            // disk at this point.
                            try {
                                re.removePhase1(lr, true);
                            } catch (RegionClearedException e) {
                            // do nothing, it's already cleared.
                            }
                            re.removePhase2();
                            lruEntryDestroy(re);
                            if (tombstone) {
                                _getOwner().incTombstoneCount(-1);
                                tombstones += 1;
                            } else {
                                delta += 1;
                            }
                        }
                    } else {
                        // rvv does not contain this entry so it is retained
                        result.add(id);
                    }
                }
            }
            _getOwner().updateSizeOnClearRegion(delta);
            incEntryCount(-delta);
            incEntryCount(-tombstones);
            if (logger.isDebugEnabled()) {
                logger.debug("Size after clearing = {}", _getMap().size());
            }
            if (isTraceEnabled && _getMap().size() < 20) {
                _getOwner().dumpBackingMap();
            }
        }
    }
    return result;
}
Also used : VersionSource(org.apache.geode.internal.cache.versions.VersionSource) RegionVersionVector(org.apache.geode.internal.cache.versions.RegionVersionVector) StoredObject(org.apache.geode.internal.offheap.StoredObject) GemFireIOException(org.apache.geode.GemFireIOException) HashSet(java.util.HashSet)

Example 27 with GemFireIOException

use of org.apache.geode.GemFireIOException in project geode by apache.

the class StatArchiveWriter method writeHeader.

private void writeHeader(long initialDate, StatArchiveDescriptor archiveDescriptor) {
    if (logger.isTraceEnabled(LogMarker.STATISTICS)) {
        logger.trace(LogMarker.STATISTICS, "StatArchiveWriter#writeHeader initialDate={} archiveDescriptor={}", initialDate, archiveDescriptor);
    }
    try {
        this.dataOut.writeByte(HEADER_TOKEN);
        this.dataOut.writeByte(ARCHIVE_VERSION);
        this.dataOut.writeLong(initialDate);
        this.dataOut.writeLong(archiveDescriptor.getSystemId());
        this.dataOut.writeLong(archiveDescriptor.getSystemStartTime());
        TimeZone timeZone = getTimeZone();
        this.dataOut.writeInt(timeZone.getRawOffset());
        this.dataOut.writeUTF(timeZone.getID());
        this.dataOut.writeUTF(archiveDescriptor.getSystemDirectoryPath());
        this.dataOut.writeUTF(archiveDescriptor.getProductDescription());
        this.dataOut.writeUTF(getOSInfo());
        this.dataOut.writeUTF(getMachineInfo());
        if (this.trace) {
            this.traceDataOut.println("writeHeader traceStatisticsName: " + traceStatisticsName);
            this.traceDataOut.println("writeHeader traceStatisticsTypeName: " + traceStatisticsTypeName);
            this.traceDataOut.println("writeHeader#writeByte HEADER_TOKEN: " + HEADER_TOKEN);
            this.traceDataOut.println("writeHeader#writeByte ARCHIVE_VERSION: " + ARCHIVE_VERSION);
            this.traceDataOut.println("writeHeader#writeLong initialDate: " + initialDate);
            this.traceDataOut.println("writeHeader#writeLong archiveDescriptor.getSystemId(): " + archiveDescriptor.getSystemId());
            this.traceDataOut.println("writeHeader#writeLong archiveDescriptor.getSystemStartTime(): " + archiveDescriptor.getSystemStartTime());
            this.traceDataOut.println("writeHeader#writeInt timeZone.getRawOffset(): " + timeZone.getRawOffset());
            this.traceDataOut.println("writeHeader#writeUTF timeZone.getID(): " + timeZone.getID());
            this.traceDataOut.println("writeHeader#writeUTF archiveDescriptor.getSystemDirectoryPath(): " + archiveDescriptor.getSystemDirectoryPath());
            this.traceDataOut.println("writeHeader#writeUTF archiveDescriptor.getProductDescription(): " + archiveDescriptor.getProductDescription());
            this.traceDataOut.println("writeHeader#writeUTF getOSInfo(): " + getOSInfo());
            this.traceDataOut.println("writeHeader#writeUTF getMachineInfo(): " + getMachineInfo());
        }
    } catch (IOException ex) {
        throw new GemFireIOException(LocalizedStrings.StatArchiveWriter_FAILED_WRITING_HEADER_TO_STATISTIC_ARCHIVE.toLocalizedString(), ex);
    }
}
Also used : TimeZone(java.util.TimeZone) GemFireIOException(org.apache.geode.GemFireIOException) GemFireIOException(org.apache.geode.GemFireIOException) IOException(java.io.IOException)

Example 28 with GemFireIOException

use of org.apache.geode.GemFireIOException in project geode by apache.

the class StatArchiveWriter method allocatedResourceType.

public void allocatedResourceType(ResourceType resourceType) {
    if (logger.isTraceEnabled(LogMarker.STATISTICS)) {
        logger.trace(LogMarker.STATISTICS, "StatArchiveWriter#allocatedResourceType resourceType={}", resourceType);
    }
    if (resourceType.getStatisticDescriptors().length >= ILLEGAL_STAT_OFFSET) {
        throw new InternalGemFireException(LocalizedStrings.StatArchiveWriter_COULD_NOT_ARCHIVE_TYPE_0_BECAUSE_IT_HAD_MORE_THAN_1_STATISTICS.toLocalizedString(new Object[] { resourceType.getStatisticsType().getName(), Integer.valueOf(ILLEGAL_STAT_OFFSET - 1) }));
    }
    // write the type to the archive
    try {
        this.dataOut.writeByte(RESOURCE_TYPE_TOKEN);
        this.dataOut.writeInt(resourceType.getId());
        this.dataOut.writeUTF(resourceType.getStatisticsType().getName());
        this.dataOut.writeUTF(resourceType.getStatisticsType().getDescription());
        StatisticDescriptor[] stats = resourceType.getStatisticDescriptors();
        this.dataOut.writeShort(stats.length);
        if (this.trace && (traceStatisticsTypeName == null || traceStatisticsTypeName.equals(resourceType.getStatisticsType().getName()))) {
            this.traceDataOut.println("allocatedResourceType#writeByte RESOURCE_TYPE_TOKEN: " + RESOURCE_TYPE_TOKEN);
            this.traceDataOut.println("allocatedResourceType#writeInt resourceType.getId(): " + resourceType.getId());
            this.traceDataOut.println("allocatedResourceType#writeUTF resourceType.getStatisticsType().getName(): " + resourceType.getStatisticsType().getName());
            this.traceDataOut.println("allocatedResourceType#writeUTF resourceType.getStatisticsType().getDescription(): " + resourceType.getStatisticsType().getDescription());
            this.traceDataOut.println("allocatedResourceType#writeShort stats.length: " + stats.length);
        }
        for (int i = 0; i < stats.length; i++) {
            this.dataOut.writeUTF(stats[i].getName());
            this.dataOut.writeByte(((StatisticDescriptorImpl) stats[i]).getTypeCode());
            this.dataOut.writeBoolean(stats[i].isCounter());
            this.dataOut.writeBoolean(stats[i].isLargerBetter());
            this.dataOut.writeUTF(stats[i].getUnit());
            this.dataOut.writeUTF(stats[i].getDescription());
            if (this.trace && (traceStatisticsTypeName == null || traceStatisticsTypeName.equals(resourceType.getStatisticsType().getName()))) {
                this.traceDataOut.println("allocatedResourceType#writeUTF stats[i].getName(): " + stats[i].getName());
                this.traceDataOut.println("allocatedResourceType#writeByte ((StatisticDescriptorImpl)stats[i]).getTypeCode(): " + ((StatisticDescriptorImpl) stats[i]).getTypeCode());
                this.traceDataOut.println("allocatedResourceType#writeBoolean stats[i].isCounter(): " + stats[i].isCounter());
                this.traceDataOut.println("allocatedResourceType#writeBoolean stats[i].isLargerBetter(): " + stats[i].isLargerBetter());
                this.traceDataOut.println("allocatedResourceType#writeUTF stats[i].getUnit(): " + stats[i].getUnit());
                this.traceDataOut.println("allocatedResourceType#writeUTF stats[i].getDescription(): " + stats[i].getDescription());
            }
        }
    } catch (IOException ex) {
        throw new GemFireIOException(LocalizedStrings.StatArchiveWriter_FAILED_WRITING_NEW_RESOURCE_TYPE_TO_STATISTIC_ARCHIVE.toLocalizedString(), ex);
    }
}
Also used : InternalGemFireException(org.apache.geode.InternalGemFireException) GemFireIOException(org.apache.geode.GemFireIOException) GemFireIOException(org.apache.geode.GemFireIOException) IOException(java.io.IOException) StatisticDescriptor(org.apache.geode.StatisticDescriptor)

Example 29 with GemFireIOException

use of org.apache.geode.GemFireIOException in project geode by apache.

the class DiskStoreCommands method compactOfflineDiskStore.

@CliCommand(value = CliStrings.COMPACT_OFFLINE_DISK_STORE, help = CliStrings.COMPACT_OFFLINE_DISK_STORE__HELP)
@CliMetaData(shellOnly = true, relatedTopic = { CliStrings.TOPIC_GEODE_DISKSTORE })
public Result compactOfflineDiskStore(@CliOption(key = CliStrings.COMPACT_OFFLINE_DISK_STORE__NAME, mandatory = true, help = CliStrings.COMPACT_OFFLINE_DISK_STORE__NAME__HELP) String diskStoreName, @CliOption(key = CliStrings.COMPACT_OFFLINE_DISK_STORE__DISKDIRS, mandatory = true, unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE, help = CliStrings.COMPACT_OFFLINE_DISK_STORE__DISKDIRS__HELP) String[] diskDirs, @CliOption(key = CliStrings.COMPACT_OFFLINE_DISK_STORE__MAXOPLOGSIZE, unspecifiedDefaultValue = "-1", help = CliStrings.COMPACT_OFFLINE_DISK_STORE__MAXOPLOGSIZE__HELP) long maxOplogSize, @CliOption(key = CliStrings.COMPACT_OFFLINE_DISK_STORE__J, unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE, help = CliStrings.COMPACT_OFFLINE_DISK_STORE__J__HELP) String[] jvmProps) {
    Result result = null;
    LogWrapper logWrapper = LogWrapper.getInstance();
    StringBuilder output = new StringBuilder();
    StringBuilder error = new StringBuilder();
    String errorMessage = "";
    Process compacterProcess = null;
    try {
        String validatedDirectories = validatedDirectories(diskDirs);
        if (validatedDirectories != null) {
            throw new IllegalArgumentException("Could not find " + CliStrings.COMPACT_OFFLINE_DISK_STORE__DISKDIRS + ": \"" + validatedDirectories + "\"");
        }
        List<String> commandList = new ArrayList<String>();
        commandList.add(System.getProperty("java.home") + File.separatorChar + "bin" + File.separatorChar + "java");
        configureLogging(commandList);
        if (jvmProps != null && jvmProps.length != 0) {
            for (int i = 0; i < jvmProps.length; i++) {
                commandList.add(jvmProps[i]);
            }
        }
        commandList.add("-classpath");
        commandList.add(System.getProperty("java.class.path", "."));
        commandList.add(DiskStoreCompacter.class.getName());
        commandList.add(CliStrings.COMPACT_OFFLINE_DISK_STORE__NAME + "=" + diskStoreName);
        if (diskDirs != null && diskDirs.length != 0) {
            StringBuilder builder = new StringBuilder();
            int arrayLength = diskDirs.length;
            for (int i = 0; i < arrayLength; i++) {
                if (File.separatorChar == '\\') {
                    // see 46120
                    builder.append(diskDirs[i].replace("\\", "/"));
                } else {
                    builder.append(diskDirs[i]);
                }
                if (i + 1 != arrayLength) {
                    builder.append(',');
                }
            }
            commandList.add(CliStrings.COMPACT_OFFLINE_DISK_STORE__DISKDIRS + "=" + builder.toString());
        }
        // -1 is ignore as maxOplogSize
        commandList.add(CliStrings.COMPACT_OFFLINE_DISK_STORE__MAXOPLOGSIZE + "=" + maxOplogSize);
        ProcessBuilder procBuilder = new ProcessBuilder(commandList);
        compacterProcess = procBuilder.start();
        InputStream inputStream = compacterProcess.getInputStream();
        InputStream errorStream = compacterProcess.getErrorStream();
        BufferedReader inputReader = new BufferedReader(new InputStreamReader(inputStream));
        BufferedReader errorReader = new BufferedReader(new InputStreamReader(errorStream));
        String line = null;
        while ((line = inputReader.readLine()) != null) {
            output.append(line).append(GfshParser.LINE_SEPARATOR);
        }
        line = null;
        boolean switchToStackTrace = false;
        while ((line = errorReader.readLine()) != null) {
            if (!switchToStackTrace && DiskStoreCompacter.STACKTRACE_START.equals(line)) {
                switchToStackTrace = true;
            } else if (switchToStackTrace) {
                error.append(line).append(GfshParser.LINE_SEPARATOR);
            } else {
                errorMessage = errorMessage + line;
            }
        }
        if (!errorMessage.isEmpty()) {
            throw new GemFireIOException(errorMessage);
        }
        // do we have to waitFor??
        compacterProcess.destroy();
        result = ResultBuilder.createInfoResult(output.toString());
    } catch (IOException e) {
        if (output.length() != 0) {
            Gfsh.println(output.toString());
        }
        String fieldsMessage = (maxOplogSize != -1 ? CliStrings.COMPACT_OFFLINE_DISK_STORE__MAXOPLOGSIZE + "=" + maxOplogSize + "," : "");
        fieldsMessage += CliUtil.arrayToString(diskDirs);
        String errorString = CliStrings.format(CliStrings.COMPACT_OFFLINE_DISK_STORE__MSG__ERROR_WHILE_COMPACTING_DISKSTORE_0_WITH_1_REASON_2, new Object[] { diskStoreName, fieldsMessage });
        result = ResultBuilder.createUserErrorResult(errorString);
        if (logWrapper.fineEnabled()) {
            logWrapper.fine(e.getMessage(), e);
        }
    } catch (GemFireIOException e) {
        if (output.length() != 0) {
            Gfsh.println(output.toString());
        }
        result = ResultBuilder.createUserErrorResult(errorMessage);
        if (logWrapper.fineEnabled()) {
            logWrapper.fine(error.toString());
        }
    } catch (IllegalArgumentException e) {
        if (output.length() != 0) {
            Gfsh.println(output.toString());
        }
        result = ResultBuilder.createUserErrorResult(e.getMessage());
    } finally {
        if (compacterProcess != null) {
            try {
                // just to check whether the process has exited
                // Process.exitValue() throws IllegalThreadStateException if Process
                // is alive
                compacterProcess.exitValue();
            } catch (IllegalThreadStateException ise) {
                // not yet terminated, destroy the process
                compacterProcess.destroy();
            }
        }
    }
    return result;
}
Also used : InputStreamReader(java.io.InputStreamReader) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) LogWrapper(org.apache.geode.management.internal.cli.LogWrapper) GemFireIOException(org.apache.geode.GemFireIOException) IOException(java.io.IOException) ConverterHint(org.apache.geode.management.cli.ConverterHint) Result(org.apache.geode.management.cli.Result) CliFunctionResult(org.apache.geode.management.internal.cli.functions.CliFunctionResult) DiskStoreCompacter(org.apache.geode.management.internal.cli.util.DiskStoreCompacter) BufferedReader(java.io.BufferedReader) GemFireIOException(org.apache.geode.GemFireIOException) CliCommand(org.springframework.shell.core.annotation.CliCommand) CliMetaData(org.apache.geode.management.cli.CliMetaData)

Example 30 with GemFireIOException

use of org.apache.geode.GemFireIOException in project geode by apache.

the class JGroupsMessengerJUnitTest method testSerializationError.

@Test
public void testSerializationError() throws Exception {
    for (int i = 0; i < 2; i++) {
        boolean enableMcast = (i == 1);
        initMocks(enableMcast);
        InternalDistributedMember mbr = createAddress(8888);
        DistributedCacheOperation.CacheOperationMessage msg = mock(DistributedCacheOperation.CacheOperationMessage.class);
        when(msg.getRecipients()).thenReturn(new InternalDistributedMember[] { mbr });
        when(msg.getMulticast()).thenReturn(enableMcast);
        if (!enableMcast) {
            // for non-mcast we send a message with a reply-processor
            when(msg.getProcessorId()).thenReturn(1234);
        } else {
            // for mcast we send a direct-ack message and expect the messenger
            // to register it
            stub(msg.isDirectAck()).toReturn(true);
        }
        when(msg.getDSFID()).thenReturn((int) DataSerializableFixedID.PUT_ALL_MESSAGE);
        // for code coverage we need to test with both a SerializationException and
        // an IOException. The former is wrapped in a GemfireIOException while the
        // latter is not
        doThrow(new SerializationException()).when(msg).toData(any(DataOutput.class));
        try {
            messenger.send(msg);
            fail("expected a failure");
        } catch (GemFireIOException e) {
        // success
        }
        if (enableMcast) {
            verify(msg, atLeastOnce()).registerProcessor();
        }
        doThrow(new IOException()).when(msg).toData(any(DataOutput.class));
        try {
            messenger.send(msg);
            fail("expected a failure");
        } catch (GemFireIOException e) {
        // success
        }
    }
}
Also used : DataOutput(java.io.DataOutput) DistributedCacheOperation(org.apache.geode.internal.cache.DistributedCacheOperation) SerializationException(org.apache.commons.lang.SerializationException) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) GemFireIOException(org.apache.geode.GemFireIOException) GemFireIOException(org.apache.geode.GemFireIOException) IOException(java.io.IOException) Test(org.junit.Test) MembershipTest(org.apache.geode.test.junit.categories.MembershipTest) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Aggregations

GemFireIOException (org.apache.geode.GemFireIOException)31 IOException (java.io.IOException)20 File (java.io.File)5 Test (org.junit.Test)4 InputStream (java.io.InputStream)3 Properties (java.util.Properties)3 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)3 BufferedReader (java.io.BufferedReader)2 DataOutput (java.io.DataOutput)2 FileNotFoundException (java.io.FileNotFoundException)2 FileOutputStream (java.io.FileOutputStream)2 InputStreamReader (java.io.InputStreamReader)2 PrintStream (java.io.PrintStream)2 StringReader (java.io.StringReader)2 ArrayList (java.util.ArrayList)2 SerializationException (org.apache.commons.lang.SerializationException)2 CancelException (org.apache.geode.CancelException)2 ForcedDisconnectException (org.apache.geode.ForcedDisconnectException)2 GemFireConfigException (org.apache.geode.GemFireConfigException)2 InternalGemFireException (org.apache.geode.InternalGemFireException)2