use of java.util.EnumMap in project cassandra by apache.
the class CloudstackSnitchTest method testRacks.
@Test
public void testRacks() throws IOException, ConfigurationException {
az = "ch-gva-1";
CloudstackSnitch snitch = new TestCloudstackSnitch();
InetAddress local = InetAddress.getByName("127.0.0.1");
InetAddress nonlocal = InetAddress.getByName("127.0.0.7");
Gossiper.instance.addSavedEndpoint(nonlocal);
Map<ApplicationState, VersionedValue> stateMap = new EnumMap<>(ApplicationState.class);
stateMap.put(ApplicationState.DC, StorageService.instance.valueFactory.datacenter("ch-zrh"));
stateMap.put(ApplicationState.RACK, StorageService.instance.valueFactory.rack("2"));
Gossiper.instance.getEndpointStateForEndpoint(nonlocal).addApplicationStates(stateMap);
assertEquals("ch-zrh", snitch.getDatacenter(nonlocal));
assertEquals("2", snitch.getRack(nonlocal));
assertEquals("ch-gva", snitch.getDatacenter(local));
assertEquals("1", snitch.getRack(local));
}
use of java.util.EnumMap in project cassandra by apache.
the class EC2SnitchTest method testRac.
@Test
public void testRac() throws IOException, ConfigurationException {
az = "us-east-1d";
Ec2Snitch snitch = new TestEC2Snitch();
InetAddress local = InetAddress.getByName("127.0.0.1");
InetAddress nonlocal = InetAddress.getByName("127.0.0.7");
Gossiper.instance.addSavedEndpoint(nonlocal);
Map<ApplicationState, VersionedValue> stateMap = new EnumMap<>(ApplicationState.class);
stateMap.put(ApplicationState.DC, StorageService.instance.valueFactory.datacenter("us-west"));
stateMap.put(ApplicationState.RACK, StorageService.instance.valueFactory.datacenter("1a"));
Gossiper.instance.getEndpointStateForEndpoint(nonlocal).addApplicationStates(stateMap);
assertEquals("us-west", snitch.getDatacenter(nonlocal));
assertEquals("1a", snitch.getRack(nonlocal));
assertEquals("us-east", snitch.getDatacenter(local));
assertEquals("1d", snitch.getRack(local));
}
use of java.util.EnumMap in project cassandra by apache.
the class GoogleCloudSnitchTest method testRac.
@Test
public void testRac() throws IOException, ConfigurationException {
az = "us-central1-a";
GoogleCloudSnitch snitch = new TestGoogleCloudSnitch();
InetAddress local = InetAddress.getByName("127.0.0.1");
InetAddress nonlocal = InetAddress.getByName("127.0.0.7");
Gossiper.instance.addSavedEndpoint(nonlocal);
Map<ApplicationState, VersionedValue> stateMap = new EnumMap<>(ApplicationState.class);
stateMap.put(ApplicationState.DC, StorageService.instance.valueFactory.datacenter("europe-west1"));
stateMap.put(ApplicationState.RACK, StorageService.instance.valueFactory.datacenter("a"));
Gossiper.instance.getEndpointStateForEndpoint(nonlocal).addApplicationStates(stateMap);
assertEquals("europe-west1", snitch.getDatacenter(nonlocal));
assertEquals("a", snitch.getRack(nonlocal));
assertEquals("us-central1", snitch.getDatacenter(local));
assertEquals("a", snitch.getRack(local));
}
use of java.util.EnumMap in project hadoop by apache.
the class TestDFSNetworkTopology method testAddAndRemoveTopology.
/**
* Test the correctness of storage type info when nodes are added and removed.
* @throws Exception
*/
@Test
public void testAddAndRemoveTopology() throws Exception {
String[] newRack = { "/l1/d1/r1", "/l1/d1/r3", "/l1/d3/r3", "/l1/d3/r3" };
String[] newHost = { "nhost1", "nhost2", "nhost3", "nhost4" };
String[] newips = { "30.30.30.30", "31.31.31.31", "32.32.32.32", "33.33.33.33" };
StorageType[] newTypes = { StorageType.DISK, StorageType.SSD, StorageType.SSD, StorageType.SSD };
DatanodeDescriptor[] newDD = new DatanodeDescriptor[4];
for (int i = 0; i < 4; i++) {
DatanodeStorageInfo dsi = DFSTestUtil.createDatanodeStorageInfo("s" + newHost[i], newips[i], newRack[i], newHost[i], newTypes[i], null);
newDD[i] = dsi.getDatanodeDescriptor();
CLUSTER.add(newDD[i]);
}
DFSTopologyNodeImpl d1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1/d1");
HashMap<String, EnumMap<StorageType, Integer>> d1info = d1.getChildrenStorageInfo();
assertEquals(3, d1info.keySet().size());
assertTrue(d1info.get("r1").size() == 2 && d1info.get("r2").size() == 2 && d1info.get("r3").size() == 1);
assertEquals(2, (int) d1info.get("r1").get(StorageType.DISK));
assertEquals(1, (int) d1info.get("r1").get(StorageType.ARCHIVE));
assertEquals(2, (int) d1info.get("r2").get(StorageType.DISK));
assertEquals(1, (int) d1info.get("r2").get(StorageType.ARCHIVE));
assertEquals(1, (int) d1info.get("r3").get(StorageType.SSD));
DFSTopologyNodeImpl d3 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1/d3");
HashMap<String, EnumMap<StorageType, Integer>> d3info = d3.getChildrenStorageInfo();
assertEquals(1, d3info.keySet().size());
assertTrue(d3info.get("r3").size() == 1);
assertEquals(2, (int) d3info.get("r3").get(StorageType.SSD));
DFSTopologyNodeImpl l1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1");
HashMap<String, EnumMap<StorageType, Integer>> l1info = l1.getChildrenStorageInfo();
assertEquals(3, l1info.keySet().size());
assertTrue(l1info.get("d1").size() == 3 && l1info.get("d2").size() == 3 && l1info.get("d3").size() == 1);
assertEquals(4, (int) l1info.get("d1").get(StorageType.DISK));
assertEquals(2, (int) l1info.get("d1").get(StorageType.ARCHIVE));
assertEquals(1, (int) l1info.get("d1").get(StorageType.SSD));
assertEquals(1, (int) l1info.get("d2").get(StorageType.SSD));
assertEquals(1, (int) l1info.get("d2").get(StorageType.RAM_DISK));
assertEquals(1, (int) l1info.get("d2").get(StorageType.DISK));
assertEquals(2, (int) l1info.get("d3").get(StorageType.SSD));
for (int i = 0; i < 4; i++) {
CLUSTER.remove(newDD[i]);
}
// /d1/r3 should've been out, /d1/r1 should've been resumed
DFSTopologyNodeImpl nd1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1/d1");
HashMap<String, EnumMap<StorageType, Integer>> nd1info = nd1.getChildrenStorageInfo();
assertEquals(2, nd1info.keySet().size());
assertTrue(nd1info.get("r1").size() == 2 && nd1info.get("r2").size() == 2);
assertEquals(1, (int) nd1info.get("r1").get(StorageType.DISK));
assertEquals(1, (int) nd1info.get("r1").get(StorageType.ARCHIVE));
assertEquals(2, (int) nd1info.get("r2").get(StorageType.DISK));
assertEquals(1, (int) nd1info.get("r2").get(StorageType.ARCHIVE));
// /l1/d3 should've been out, and /l1/d1 should've been resumed
DFSTopologyNodeImpl nl1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1");
HashMap<String, EnumMap<StorageType, Integer>> nl1info = nl1.getChildrenStorageInfo();
assertEquals(2, nl1info.keySet().size());
assertTrue(l1info.get("d1").size() == 2 && l1info.get("d2").size() == 3);
assertEquals(2, (int) nl1info.get("d1").get(StorageType.ARCHIVE));
assertEquals(3, (int) nl1info.get("d1").get(StorageType.DISK));
assertEquals(1, (int) l1info.get("d2").get(StorageType.DISK));
assertEquals(1, (int) l1info.get("d2").get(StorageType.RAM_DISK));
assertEquals(1, (int) l1info.get("d2").get(StorageType.SSD));
assertNull(CLUSTER.getNode("/l1/d3"));
}
use of java.util.EnumMap in project collect by opendatakit.
the class QRCodeUtils method decodeFromBitmap.
public static String decodeFromBitmap(Bitmap bitmap) throws DataFormatException, IOException, FormatException, ChecksumException, NotFoundException {
Map<DecodeHintType, Object> tmpHintsMap = new EnumMap<>(DecodeHintType.class);
tmpHintsMap.put(DecodeHintType.TRY_HARDER, Boolean.TRUE);
tmpHintsMap.put(DecodeHintType.POSSIBLE_FORMATS, BarcodeFormat.QR_CODE);
tmpHintsMap.put(DecodeHintType.PURE_BARCODE, Boolean.FALSE);
Reader reader = new QRCodeMultiReader();
Result result = reader.decode(getBinaryBitmap(bitmap), tmpHintsMap);
return CompressionUtils.decompress(result.getText());
}
Aggregations