use of java.util.EnumSet in project robovm by robovm.
the class EnumSetTest method test_containsAll_LCollection.
/**
* java.util.EnumSet#containsAll(Collection)
*/
@SuppressWarnings({ "unchecked", "boxing" })
public void test_containsAll_LCollection() {
EnumSet<EnumFoo> set = EnumSet.noneOf(EnumFoo.class);
Enum[] elements = EnumFoo.class.getEnumConstants();
for (int i = 0; i < elements.length; i++) {
set.add((EnumFoo) elements[i]);
}
try {
set.containsAll(null);
fail("Should throw NullPointerException");
} catch (NullPointerException e) {
// expected
}
EnumSet<EmptyEnum> emptySet = EnumSet.noneOf(EmptyEnum.class);
elements = EmptyEnum.class.getEnumConstants();
for (int i = 0; i < elements.length; i++) {
emptySet.add((EmptyEnum) elements[i]);
}
boolean result = set.containsAll(emptySet);
assertTrue("Should return true", result);
Collection rawCollection = new ArrayList();
result = set.containsAll(rawCollection);
assertTrue("Should contain empty collection:", result);
rawCollection.add(1);
result = set.containsAll(rawCollection);
assertFalse("Should return false", result);
rawCollection.add(EnumWithInnerClass.a);
result = set.containsAll(rawCollection);
assertFalse("Should return false", result);
EnumSet rawSet = EnumSet.noneOf(EnumFoo.class);
result = set.containsAll(rawSet);
assertTrue("Should contain empty set", result);
emptySet = EnumSet.noneOf(EmptyEnum.class);
result = set.containsAll(emptySet);
assertTrue("No class cast should be performed on empty set", result);
Collection<EnumFoo> collection = new ArrayList<EnumFoo>();
collection.add(EnumFoo.a);
result = set.containsAll(collection);
assertTrue("Should contain all elements in collection", result);
EnumSet<EnumFoo> fooSet = EnumSet.noneOf(EnumFoo.class);
fooSet.add(EnumFoo.a);
result = set.containsAll(fooSet);
assertTrue("Should return true", result);
set.clear();
try {
set.containsAll(null);
fail("Should throw NullPointerException");
} catch (NullPointerException e) {
// expected
}
Collection<EnumWithInnerClass> collectionWithSubclass = new ArrayList<EnumWithInnerClass>();
collectionWithSubclass.add(EnumWithInnerClass.a);
result = set.containsAll(collectionWithSubclass);
assertFalse("Should return false", result);
EnumSet<EnumWithInnerClass> setWithSubclass = EnumSet.noneOf(EnumWithInnerClass.class);
setWithSubclass.add(EnumWithInnerClass.a);
result = set.containsAll(setWithSubclass);
assertFalse("Should return false", result);
// test enum type with more than 64 elements
Set<HugeEnum> hugeSet = EnumSet.noneOf(HugeEnum.class);
hugeSet.add(HugeEnum.a);
hugeSet.add(HugeEnum.b);
hugeSet.add(HugeEnum.aa);
hugeSet.add(HugeEnum.bb);
hugeSet.add(HugeEnum.cc);
hugeSet.add(HugeEnum.dd);
Set<HugeEnum> anotherHugeSet = EnumSet.noneOf(HugeEnum.class);
hugeSet.add(HugeEnum.b);
hugeSet.add(HugeEnum.cc);
result = hugeSet.containsAll(anotherHugeSet);
assertTrue(result);
try {
hugeSet.containsAll(null);
fail("Should throw NullPointerException");
} catch (NullPointerException e) {
// expected
}
Set<HugeEnumWithInnerClass> hugeSetWithInnerClass = EnumSet.noneOf(HugeEnumWithInnerClass.class);
hugeSetWithInnerClass.add(HugeEnumWithInnerClass.a);
hugeSetWithInnerClass.add(HugeEnumWithInnerClass.b);
result = hugeSetWithInnerClass.containsAll(hugeSetWithInnerClass);
assertTrue(result);
result = hugeSet.containsAll(hugeSetWithInnerClass);
assertFalse(result);
rawCollection = new ArrayList();
result = hugeSet.containsAll(rawCollection);
assertTrue("Should contain empty collection:", result);
rawCollection.add(1);
result = hugeSet.containsAll(rawCollection);
assertFalse("Should return false", result);
rawCollection.add(EnumWithInnerClass.a);
result = set.containsAll(rawCollection);
assertFalse("Should return false", result);
rawSet = EnumSet.noneOf(HugeEnum.class);
result = hugeSet.containsAll(rawSet);
assertTrue("Should contain empty set", result);
EnumSet<HugeEnumWithInnerClass> emptyHugeSet = EnumSet.noneOf(HugeEnumWithInnerClass.class);
result = hugeSet.containsAll(emptyHugeSet);
assertTrue("No class cast should be performed on empty set", result);
Collection<HugeEnum> hugeCollection = new ArrayList<HugeEnum>();
hugeCollection.add(HugeEnum.a);
result = hugeSet.containsAll(hugeCollection);
assertTrue("Should contain all elements in collection", result);
hugeSet.clear();
try {
hugeSet.containsAll(null);
fail("Should throw NullPointerException");
} catch (NullPointerException e) {
// expected
}
Collection<HugeEnumWithInnerClass> hugeCollectionWithSubclass = new ArrayList<HugeEnumWithInnerClass>();
hugeCollectionWithSubclass.add(HugeEnumWithInnerClass.a);
result = hugeSet.containsAll(hugeCollectionWithSubclass);
assertFalse("Should return false", result);
EnumSet<HugeEnumWithInnerClass> hugeSetWithSubclass = EnumSet.noneOf(HugeEnumWithInnerClass.class);
hugeSetWithSubclass.add(HugeEnumWithInnerClass.a);
result = hugeSet.containsAll(hugeSetWithSubclass);
assertFalse("Should return false", result);
}
use of java.util.EnumSet in project geode by apache.
the class DiskInitFileParser method parse.
public DiskStoreID parse() throws IOException, ClassNotFoundException {
Version gfversion = Version.GFE_662;
DiskStoreID result = null;
boolean endOfFile = false;
while (!endOfFile) {
if (dis.atEndOfFile()) {
endOfFile = true;
break;
}
byte opCode = dis.readByte();
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "DiskInitFile opcode={}", opCode);
}
switch(opCode) {
case DiskInitFile.IF_EOF_ID:
endOfFile = true;
gotEOF = true;
break;
case DiskInitFile.IFREC_INSTANTIATOR_ID:
{
int id = dis.readInt();
String cn = readClassName(dis);
String icn = readClassName(dis);
readEndOfRecord(dis);
interpreter.cmnInstantiatorId(id, cn, icn);
}
break;
case DiskInitFile.IFREC_DATA_SERIALIZER_ID:
{
Class<?> dsc = readClass(dis);
readEndOfRecord(dis);
interpreter.cmnDataSerializerId(dsc);
}
break;
case DiskInitFile.IFREC_ONLINE_MEMBER_ID:
{
long drId = readDiskRegionID(dis);
PersistentMemberID pmid = readPMID(dis, gfversion);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_ONLINE_MEMBER_ID drId={} omid={}", drId, pmid);
}
interpreter.cmnOnlineMemberId(drId, pmid);
}
break;
case DiskInitFile.IFREC_OFFLINE_MEMBER_ID:
{
long drId = readDiskRegionID(dis);
PersistentMemberID pmid = readPMID(dis, gfversion);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_OFFLINE_MEMBER_ID drId={} pmid={}", drId, pmid);
}
interpreter.cmnOfflineMemberId(drId, pmid);
}
break;
case DiskInitFile.IFREC_RM_MEMBER_ID:
{
long drId = readDiskRegionID(dis);
PersistentMemberID pmid = readPMID(dis, gfversion);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_RM_MEMBER_ID drId={} pmid={}", drId, pmid);
}
interpreter.cmnRmMemberId(drId, pmid);
}
break;
case DiskInitFile.IFREC_MY_MEMBER_INITIALIZING_ID:
{
long drId = readDiskRegionID(dis);
PersistentMemberID pmid = readPMID(dis, gfversion);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_MY_MEMBER_INITIALIZING_ID drId={} pmid={}", drId, pmid);
}
interpreter.cmnAddMyInitializingPMID(drId, pmid);
}
break;
case DiskInitFile.IFREC_MY_MEMBER_INITIALIZED_ID:
{
long drId = readDiskRegionID(dis);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_MY_MEMBER_INITIALIZED_ID drId={}", drId);
}
interpreter.cmnMarkInitialized(drId);
}
break;
case DiskInitFile.IFREC_CREATE_REGION_ID:
{
long drId = readDiskRegionID(dis);
String regName = dis.readUTF();
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_CREATE_REGION_ID drId= name={}", drId, regName);
}
interpreter.cmnCreateRegion(drId, regName);
}
break;
case DiskInitFile.IFREC_BEGIN_DESTROY_REGION_ID:
{
long drId = readDiskRegionID(dis);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_BEGIN_DESTROY_REGION_ID drId={}", drId);
}
interpreter.cmnBeginDestroyRegion(drId);
}
break;
case DiskInitFile.IFREC_END_DESTROY_REGION_ID:
{
long drId = readDiskRegionID(dis);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_END_DESTROY_REGION_ID drId={}", drId);
}
interpreter.cmnEndDestroyRegion(drId);
}
break;
case DiskInitFile.IFREC_BEGIN_PARTIAL_DESTROY_REGION_ID:
{
long drId = readDiskRegionID(dis);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_BEGIN_PARTIAL_DESTROY_REGION_ID drId={}", drId);
}
interpreter.cmnBeginPartialDestroyRegion(drId);
}
break;
case DiskInitFile.IFREC_END_PARTIAL_DESTROY_REGION_ID:
{
long drId = readDiskRegionID(dis);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_END_PARTIAL_DESTROY_REGION_ID drId={}", drId);
}
interpreter.cmnEndPartialDestroyRegion(drId);
}
break;
case DiskInitFile.IFREC_CLEAR_REGION_ID:
{
long drId = readDiskRegionID(dis);
long clearOplogEntryId = dis.readLong();
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_CLEAR_REGION_ID drId={} oplogEntryId={}", drId, clearOplogEntryId);
}
interpreter.cmnClearRegion(drId, clearOplogEntryId);
}
break;
case DiskInitFile.IFREC_CLEAR_REGION_WITH_RVV_ID:
{
long drId = readDiskRegionID(dis);
int size = dis.readInt();
ConcurrentHashMap<DiskStoreID, RegionVersionHolder<DiskStoreID>> memberToVersion = new ConcurrentHashMap<DiskStoreID, RegionVersionHolder<DiskStoreID>>(size);
for (int i = 0; i < size; i++) {
DiskStoreID id = new DiskStoreID();
InternalDataSerializer.invokeFromData(id, dis);
RegionVersionHolder holder = new RegionVersionHolder(dis);
memberToVersion.put(id, holder);
}
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_CLEAR_REGION_WITH_RVV_ID drId={} memberToVersion={}", drId, memberToVersion);
}
interpreter.cmnClearRegion(drId, memberToVersion);
}
break;
case DiskInitFile.IFREC_CRF_CREATE:
{
long oplogId = dis.readLong();
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_CRF_CREATE oplogId={}", oplogId);
}
interpreter.cmnCrfCreate(oplogId);
}
break;
case DiskInitFile.IFREC_DRF_CREATE:
{
long oplogId = dis.readLong();
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_DRF_CREATE oplogId={}", oplogId);
}
interpreter.cmnDrfCreate(oplogId);
}
break;
case DiskInitFile.IFREC_KRF_CREATE:
{
long oplogId = dis.readLong();
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_KRF_CREATE oplogId={}", oplogId);
}
interpreter.cmnKrfCreate(oplogId);
}
break;
case DiskInitFile.IFREC_CRF_DELETE:
{
long oplogId = dis.readLong();
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_CRF_DELETE oplogId={}", oplogId);
}
interpreter.cmnCrfDelete(oplogId);
}
break;
case DiskInitFile.IFREC_DRF_DELETE:
{
long oplogId = dis.readLong();
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_DRF_DELETE oplogId={}", oplogId);
}
interpreter.cmnDrfDelete(oplogId);
}
break;
case DiskInitFile.IFREC_REGION_CONFIG_ID:
{
long drId = readDiskRegionID(dis);
byte lruAlgorithm = dis.readByte();
byte lruAction = dis.readByte();
int lruLimit = dis.readInt();
int concurrencyLevel = dis.readInt();
int initialCapacity = dis.readInt();
float loadFactor = dis.readFloat();
boolean statisticsEnabled = dis.readBoolean();
boolean isBucket = dis.readBoolean();
EnumSet<DiskRegionFlag> flags = EnumSet.noneOf(DiskRegionFlag.class);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_REGION_CONFIG_ID drId={}", drId);
}
interpreter.cmnRegionConfig(drId, lruAlgorithm, lruAction, lruLimit, concurrencyLevel, initialCapacity, loadFactor, statisticsEnabled, isBucket, flags, // fixes bug 43910
ProxyBucketRegion.NO_FIXED_PARTITION_NAME, -1, null, false);
}
break;
case DiskInitFile.IFREC_REGION_CONFIG_ID_66:
{
long drId = readDiskRegionID(dis);
byte lruAlgorithm = dis.readByte();
byte lruAction = dis.readByte();
int lruLimit = dis.readInt();
int concurrencyLevel = dis.readInt();
int initialCapacity = dis.readInt();
float loadFactor = dis.readFloat();
boolean statisticsEnabled = dis.readBoolean();
boolean isBucket = dis.readBoolean();
EnumSet<DiskRegionFlag> flags = EnumSet.noneOf(DiskRegionFlag.class);
String partitionName = dis.readUTF();
int startingBucketId = dis.readInt();
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_REGION_CONFIG_ID drId={}", drId);
}
interpreter.cmnRegionConfig(drId, lruAlgorithm, lruAction, lruLimit, concurrencyLevel, initialCapacity, loadFactor, statisticsEnabled, isBucket, flags, partitionName, startingBucketId, null, false);
}
break;
case DiskInitFile.IFREC_REGION_CONFIG_ID_80:
{
long drId = readDiskRegionID(dis);
byte lruAlgorithm = dis.readByte();
byte lruAction = dis.readByte();
int lruLimit = dis.readInt();
int concurrencyLevel = dis.readInt();
int initialCapacity = dis.readInt();
float loadFactor = dis.readFloat();
boolean statisticsEnabled = dis.readBoolean();
boolean isBucket = dis.readBoolean();
EnumSet<DiskRegionFlag> flags = EnumSet.noneOf(DiskRegionFlag.class);
String partitionName = dis.readUTF();
int startingBucketId = dis.readInt();
String compressorClassName = dis.readUTF();
if ("".equals(compressorClassName)) {
compressorClassName = null;
}
if (dis.readBoolean()) {
flags.add(DiskRegionFlag.IS_WITH_VERSIONING);
}
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_REGION_CONFIG_ID drId={}", drId);
}
interpreter.cmnRegionConfig(drId, lruAlgorithm, lruAction, lruLimit, concurrencyLevel, initialCapacity, loadFactor, statisticsEnabled, isBucket, flags, partitionName, startingBucketId, compressorClassName, false);
}
break;
case DiskInitFile.IFREC_REGION_CONFIG_ID_90:
{
long drId = readDiskRegionID(dis);
byte lruAlgorithm = dis.readByte();
byte lruAction = dis.readByte();
int lruLimit = dis.readInt();
int concurrencyLevel = dis.readInt();
int initialCapacity = dis.readInt();
float loadFactor = dis.readFloat();
boolean statisticsEnabled = dis.readBoolean();
boolean isBucket = dis.readBoolean();
EnumSet<DiskRegionFlag> flags = EnumSet.noneOf(DiskRegionFlag.class);
String partitionName = dis.readUTF();
int startingBucketId = dis.readInt();
String compressorClassName = dis.readUTF();
if ("".equals(compressorClassName)) {
compressorClassName = null;
}
if (dis.readBoolean()) {
flags.add(DiskRegionFlag.IS_WITH_VERSIONING);
}
boolean offHeap = dis.readBoolean();
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_REGION_CONFIG_ID drId={}", drId);
}
interpreter.cmnRegionConfig(drId, lruAlgorithm, lruAction, lruLimit, concurrencyLevel, initialCapacity, loadFactor, statisticsEnabled, isBucket, flags, partitionName, startingBucketId, compressorClassName, offHeap);
}
break;
case DiskInitFile.IFREC_OFFLINE_AND_EQUAL_MEMBER_ID:
{
long drId = readDiskRegionID(dis);
PersistentMemberID pmid = readPMID(dis, gfversion);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_OFFLINE_AND_EQUAL_MEMBER_ID drId={} pmid={}", drId, pmid);
}
interpreter.cmdOfflineAndEqualMemberId(drId, pmid);
}
break;
case DiskInitFile.IFREC_DISKSTORE_ID:
{
long leastSigBits = dis.readLong();
long mostSigBits = dis.readLong();
readEndOfRecord(dis);
result = new DiskStoreID(mostSigBits, leastSigBits);
interpreter.cmnDiskStoreID(result);
}
break;
case DiskInitFile.OPLOG_MAGIC_SEQ_ID:
readOplogMagicSeqRecord(dis, OPLOG_TYPE.IF);
break;
case DiskInitFile.IFREC_PR_CREATE:
{
String name = dis.readUTF();
int numBuckets = dis.readInt();
String colocatedWith = dis.readUTF();
readEndOfRecord(dis);
PRPersistentConfig config = new PRPersistentConfig(numBuckets, colocatedWith);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_PR_CREATE name={}, config={}", name, config);
}
interpreter.cmnPRCreate(name, config);
}
break;
case DiskInitFile.IFREC_GEMFIRE_VERSION:
{
short ver = Version.readOrdinal(dis);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_GEMFIRE_VERSION version={}", ver);
}
try {
gfversion = Version.fromOrdinal(ver, false);
} catch (UnsupportedVersionException e) {
throw new DiskAccessException(LocalizedStrings.Oplog_UNEXPECTED_PRODUCT_VERSION_0.toLocalizedString(ver), e, this.interpreter.getNameForError());
}
interpreter.cmnGemfireVersion(gfversion);
break;
}
case DiskInitFile.IFREC_PR_DESTROY:
{
String name = dis.readUTF();
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_PR_DESTROY name={}", name);
}
interpreter.cmnPRDestroy(name);
}
break;
case DiskInitFile.IFREC_ADD_CANONICAL_MEMBER_ID:
{
int id = dis.readInt();
Object object = DataSerializer.readObject(dis);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_ADD_CANONICAL_MEMBER_ID id={} name={}", id, object);
}
interpreter.cmnAddCanonicalMemberId(id, object);
break;
}
case DiskInitFile.IFREC_REVOKE_DISK_STORE_ID:
{
PersistentMemberPattern pattern = new PersistentMemberPattern();
InternalDataSerializer.invokeFromData(pattern, dis);
readEndOfRecord(dis);
if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_REVOKE_DISK_STORE_ID id={}" + pattern);
}
interpreter.cmnRevokeDiskStoreId(pattern);
}
break;
default:
throw new DiskAccessException(LocalizedStrings.DiskInitFile_UNKNOWN_OPCODE_0_FOUND.toLocalizedString(opCode), this.interpreter.getNameForError());
}
if (interpreter.isClosing()) {
break;
}
}
return result;
}
use of java.util.EnumSet in project Railcraft by Railcraft.
the class ChargeManager method forConnections.
public static void forConnections(World world, BlockPos pos, BiConsumer<BlockPos, IChargeBlock.ChargeDef> action) {
IBlockState state = WorldPlugin.getBlockState(world, pos);
if (state.getBlock() instanceof IChargeBlock) {
IChargeBlock block = (IChargeBlock) state.getBlock();
IChargeBlock.ChargeDef chargeDef = block.getChargeDef(state, world, pos);
if (chargeDef != null) {
Map<BlockPos, EnumSet<IChargeBlock.ConnectType>> possibleConnections = chargeDef.getConnectType().getPossibleConnectionLocations(pos);
for (Map.Entry<BlockPos, EnumSet<IChargeBlock.ConnectType>> connection : possibleConnections.entrySet()) {
IBlockState otherState = WorldPlugin.getBlockState(world, connection.getKey());
if (otherState.getBlock() instanceof IChargeBlock) {
IChargeBlock.ChargeDef other = ((IChargeBlock) otherState.getBlock()).getChargeDef(WorldPlugin.getBlockState(world, connection.getKey()), world, connection.getKey());
if (other != null && other.getConnectType().getPossibleConnectionLocations(connection.getKey()).get(pos).contains(chargeDef.getConnectType())) {
action.accept(connection.getKey(), other);
}
}
}
}
}
}
use of java.util.EnumSet in project dubbo by alibaba.
the class Hessian2EnumSetTest method set.
@Test
public void set() throws Exception {
EnumSet<Type> types = EnumSet.of(Type.High, Type.Lower);
EnumSet set = baseHession2Serialize(types);
assertTrue(set.contains(Type.High));
assertFalse(set.contains(Type.Normal));
}
use of java.util.EnumSet in project dubbo by alibaba.
the class Hessian2EnumSetTest method singleton.
@Test
public void singleton() throws Exception {
EnumSet h = EnumSet.of(Type.High);
EnumSet set = baseHession2Serialize(h);
assertTrue(Arrays.asList(set.toArray()).contains(Type.High));
assertFalse(Arrays.asList(set.toArray()).contains(Type.Lower));
}
Aggregations