use of java.util.LinkedHashMap in project groovy by apache.
the class InvokerHelper method createMap.
public static Map createMap(Object[] values) {
Map answer = new LinkedHashMap(values.length / 2);
int i = 0;
while (i < values.length - 1) {
if ((values[i] instanceof SpreadMap) && (values[i + 1] instanceof Map)) {
Map smap = (Map) values[i + 1];
for (Object key : smap.keySet()) {
answer.put(key, smap.get(key));
}
i += 2;
} else {
answer.put(values[i++], values[i++]);
}
}
return answer;
}
use of java.util.LinkedHashMap in project hadoop by apache.
the class StorageLocationChecker method check.
/**
* Initiate a check of the supplied storage volumes and return
* a list of failed volumes.
*
* StorageLocations are returned in the same order as the input
* for compatibility with existing unit tests.
*
* @param conf HDFS configuration.
* @param dataDirs list of volumes to check.
* @return returns a list of failed volumes. Returns the empty list if
* there are no failed volumes.
*
* @throws InterruptedException if the check was interrupted.
* @throws IOException if the number of failed volumes exceeds the
* maximum allowed or if there are no good
* volumes.
*/
public List<StorageLocation> check(final Configuration conf, final Collection<StorageLocation> dataDirs) throws InterruptedException, IOException {
final HashMap<StorageLocation, Boolean> goodLocations = new LinkedHashMap<>();
final Set<StorageLocation> failedLocations = new HashSet<>();
final Map<StorageLocation, ListenableFuture<VolumeCheckResult>> futures = Maps.newHashMap();
final LocalFileSystem localFS = FileSystem.getLocal(conf);
final CheckContext context = new CheckContext(localFS, expectedPermission);
// Start parallel disk check operations on all StorageLocations.
for (StorageLocation location : dataDirs) {
goodLocations.put(location, true);
Optional<ListenableFuture<VolumeCheckResult>> olf = delegateChecker.schedule(location, context);
if (olf.isPresent()) {
futures.put(location, olf.get());
}
}
if (maxVolumeFailuresTolerated >= dataDirs.size()) {
throw new DiskErrorException("Invalid value configured for " + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " + maxVolumeFailuresTolerated + ". Value configured is >= " + "to the number of configured volumes (" + dataDirs.size() + ").");
}
final long checkStartTimeMs = timer.monotonicNow();
// Retrieve the results of the disk checks.
for (Map.Entry<StorageLocation, ListenableFuture<VolumeCheckResult>> entry : futures.entrySet()) {
// Determine how much time we can allow for this check to complete.
// The cumulative wait time cannot exceed maxAllowedTimeForCheck.
final long waitSoFarMs = (timer.monotonicNow() - checkStartTimeMs);
final long timeLeftMs = Math.max(0, maxAllowedTimeForCheckMs - waitSoFarMs);
final StorageLocation location = entry.getKey();
try {
final VolumeCheckResult result = entry.getValue().get(timeLeftMs, TimeUnit.MILLISECONDS);
switch(result) {
case HEALTHY:
break;
case DEGRADED:
LOG.warn("StorageLocation {} appears to be degraded.", location);
break;
case FAILED:
LOG.warn("StorageLocation {} detected as failed.", location);
failedLocations.add(location);
goodLocations.remove(location);
break;
default:
LOG.error("Unexpected health check result {} for StorageLocation {}", result, location);
}
} catch (ExecutionException | TimeoutException e) {
LOG.warn("Exception checking StorageLocation " + location, e.getCause());
failedLocations.add(location);
goodLocations.remove(location);
}
}
if (failedLocations.size() > maxVolumeFailuresTolerated) {
throw new DiskErrorException("Too many failed volumes - " + "current valid volumes: " + goodLocations.size() + ", volumes configured: " + dataDirs.size() + ", volumes failed: " + failedLocations.size() + ", volume failures tolerated: " + maxVolumeFailuresTolerated);
}
if (goodLocations.size() == 0) {
throw new DiskErrorException("All directories in " + DFS_DATANODE_DATA_DIR_KEY + " are invalid: " + failedLocations);
}
return new ArrayList<>(goodLocations.keySet());
}
use of java.util.LinkedHashMap in project hadoop by apache.
the class KMSServerJSONUtils method toJSON.
@SuppressWarnings("unchecked")
public static Map toJSON(KeyProvider.KeyVersion keyVersion) {
Map json = new LinkedHashMap();
if (keyVersion != null) {
json.put(KMSRESTConstants.NAME_FIELD, keyVersion.getName());
json.put(KMSRESTConstants.VERSION_NAME_FIELD, keyVersion.getVersionName());
json.put(KMSRESTConstants.MATERIAL_FIELD, Base64.encodeBase64URLSafeString(keyVersion.getMaterial()));
}
return json;
}
use of java.util.LinkedHashMap in project hadoop by apache.
the class KMSServerJSONUtils method toJSON.
@SuppressWarnings("unchecked")
public static Map toJSON(String keyName, KeyProvider.Metadata meta) {
Map json = new LinkedHashMap();
if (meta != null) {
json.put(KMSRESTConstants.NAME_FIELD, keyName);
json.put(KMSRESTConstants.CIPHER_FIELD, meta.getCipher());
json.put(KMSRESTConstants.LENGTH_FIELD, meta.getBitLength());
json.put(KMSRESTConstants.DESCRIPTION_FIELD, meta.getDescription());
json.put(KMSRESTConstants.ATTRIBUTES_FIELD, meta.getAttributes());
json.put(KMSRESTConstants.CREATED_FIELD, meta.getCreated().getTime());
json.put(KMSRESTConstants.VERSIONS_FIELD, (long) meta.getVersions());
}
return json;
}
use of java.util.LinkedHashMap in project hadoop by apache.
the class InstrumentationService method init.
@Override
@SuppressWarnings("unchecked")
public void init() throws ServiceException {
timersSize = getServiceConfig().getInt(CONF_TIMERS_SIZE, 10);
counterLock = new ReentrantLock();
timerLock = new ReentrantLock();
variableLock = new ReentrantLock();
samplerLock = new ReentrantLock();
Map<String, VariableHolder> jvmVariables = new ConcurrentHashMap<String, VariableHolder>();
counters = new ConcurrentHashMap<String, Map<String, AtomicLong>>();
timers = new ConcurrentHashMap<String, Map<String, Timer>>();
variables = new ConcurrentHashMap<String, Map<String, VariableHolder>>();
samplers = new ConcurrentHashMap<String, Map<String, Sampler>>();
samplersList = new ArrayList<Sampler>();
all = new LinkedHashMap<String, Map<String, ?>>();
all.put("os-env", System.getenv());
all.put("sys-props", (Map<String, ?>) (Map) System.getProperties());
all.put("jvm", jvmVariables);
all.put("counters", (Map) counters);
all.put("timers", (Map) timers);
all.put("variables", (Map) variables);
all.put("samplers", (Map) samplers);
jvmVariables.put("free.memory", new VariableHolder<Long>(new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return Runtime.getRuntime().freeMemory();
}
}));
jvmVariables.put("max.memory", new VariableHolder<Long>(new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return Runtime.getRuntime().maxMemory();
}
}));
jvmVariables.put("total.memory", new VariableHolder<Long>(new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return Runtime.getRuntime().totalMemory();
}
}));
}
Aggregations