use of org.apache.flink.util.AbstractID in project flink by apache.
the class SharedSlotsTest method allocateAndReleaseEmptySlot.
@Test
public void allocateAndReleaseEmptySlot() {
try {
JobID jobId = new JobID();
JobVertexID vertexId = new JobVertexID();
SlotSharingGroup sharingGroup = new SlotSharingGroup(vertexId);
SlotSharingGroupAssignment assignment = sharingGroup.getTaskAssignment();
assertEquals(0, assignment.getNumberOfSlots());
assertEquals(0, assignment.getNumberOfAvailableSlotsForGroup(vertexId));
Instance instance = SchedulerTestUtils.getRandomInstance(2);
assertEquals(2, instance.getTotalNumberOfSlots());
assertEquals(0, instance.getNumberOfAllocatedSlots());
assertEquals(2, instance.getNumberOfAvailableSlots());
// allocate a shared slot
SharedSlot slot = instance.allocateSharedSlot(jobId, assignment);
assertEquals(2, instance.getTotalNumberOfSlots());
assertEquals(1, instance.getNumberOfAllocatedSlots());
assertEquals(1, instance.getNumberOfAvailableSlots());
// check that the new slot is fresh
assertTrue(slot.isAlive());
assertFalse(slot.isCanceled());
assertFalse(slot.isReleased());
assertEquals(0, slot.getNumberLeaves());
assertFalse(slot.hasChildren());
assertTrue(slot.isRootAndEmpty());
assertNotNull(slot.toString());
assertTrue(slot.getSubSlots().isEmpty());
assertEquals(0, slot.getSlotNumber());
assertEquals(0, slot.getRootSlotNumber());
// release the slot immediately.
slot.releaseSlot();
assertTrue(slot.isCanceled());
assertTrue(slot.isReleased());
// the slot sharing group and instance should not
assertEquals(2, instance.getTotalNumberOfSlots());
assertEquals(0, instance.getNumberOfAllocatedSlots());
assertEquals(2, instance.getNumberOfAvailableSlots());
assertEquals(0, assignment.getNumberOfSlots());
assertEquals(0, assignment.getNumberOfAvailableSlotsForGroup(vertexId));
// we should not be able to allocate any children from this released slot
assertNull(slot.allocateSharedSlot(new AbstractID()));
assertNull(slot.allocateSubSlot(new AbstractID()));
// we cannot add this slot to the assignment group
assertNull(assignment.addSharedSlotAndAllocateSubSlot(slot, Locality.NON_LOCAL, vertexId));
assertEquals(0, assignment.getNumberOfSlots());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.util.AbstractID in project flink by apache.
the class RocksDBStateBackend method ensureRocksDBIsLoaded.
// ------------------------------------------------------------------------
// static library loading utilities
// ------------------------------------------------------------------------
private void ensureRocksDBIsLoaded(String tempDirectory) throws IOException {
synchronized (RocksDBStateBackend.class) {
if (!rocksDbInitialized) {
final File tempDirParent = new File(tempDirectory).getAbsoluteFile();
LOG.info("Attempting to load RocksDB native library and store it under '{}'", tempDirParent);
Throwable lastException = null;
for (int attempt = 1; attempt <= ROCKSDB_LIB_LOADING_ATTEMPTS; attempt++) {
try {
// when multiple instances of this class and RocksDB exist in different
// class loaders, then we can see the following exception:
// "java.lang.UnsatisfiedLinkError: Native Library /path/to/temp/dir/librocksdbjni-linux64.so
// already loaded in another class loader"
// to avoid that, we need to add a random element to the library file path
// (I know, seems like an unnecessary hack, since the JVM obviously can handle multiple
// instances of the same JNI library being loaded in different class loaders, but
// apparently not when coming from the same file path, so there we go)
final File rocksLibFolder = new File(tempDirParent, "rocksdb-lib-" + new AbstractID());
// make sure the temp path exists
LOG.debug("Attempting to create RocksDB native library folder {}", rocksLibFolder);
// noinspection ResultOfMethodCallIgnored
rocksLibFolder.mkdirs();
// explicitly load the JNI dependency if it has not been loaded before
NativeLibraryLoader.getInstance().loadLibrary(rocksLibFolder.getAbsolutePath());
// this initialization here should validate that the loading succeeded
RocksDB.loadLibrary();
// seems to have worked
LOG.info("Successfully loaded RocksDB native library");
rocksDbInitialized = true;
return;
} catch (Throwable t) {
lastException = t;
LOG.debug("RocksDB JNI library loading attempt {} failed", attempt, t);
// try to force RocksDB to attempt reloading the library
try {
resetRocksDBLoadedFlag();
} catch (Throwable tt) {
LOG.debug("Failed to reset 'initialized' flag in RocksDB native code loader", tt);
}
}
}
throw new IOException("Could not load the native RocksDB library", lastException);
}
}
}
use of org.apache.flink.util.AbstractID in project flink by apache.
the class DataSet method collect.
/**
* Convenience method to get the elements of a DataSet as a List.
* As DataSet can contain a lot of data, this method should be used with caution.
*
* @return A List containing the elements of the DataSet
*/
public List<T> collect() throws Exception {
final String id = new AbstractID().toString();
final TypeSerializer<T> serializer = getType().createSerializer(getExecutionEnvironment().getConfig());
this.output(new Utils.CollectHelper<>(id, serializer)).name("collect()");
JobExecutionResult res = getExecutionEnvironment().execute();
ArrayList<byte[]> accResult = res.getAccumulatorResult(id);
if (accResult != null) {
try {
return SerializedListAccumulator.deserializeList(accResult, serializer);
} catch (ClassNotFoundException e) {
throw new RuntimeException("Cannot find type class of collected data type.", e);
} catch (IOException e) {
throw new RuntimeException("Serialization error while deserializing collected data", e);
}
} else {
throw new RuntimeException("The call to collect() could not retrieve the DataSet.");
}
}
use of org.apache.flink.util.AbstractID in project flink by apache.
the class DataSetUtils method checksumHashCode.
// --------------------------------------------------------------------------------------------
// Checksum
// --------------------------------------------------------------------------------------------
/**
* Convenience method to get the count (number of elements) of a DataSet
* as well as the checksum (sum over element hashes).
*
* @return A ChecksumHashCode that represents the count and checksum of elements in the data set.
* @deprecated replaced with {@code org.apache.flink.graph.asm.dataset.ChecksumHashCode} in Gelly
*/
@Deprecated
public static <T> Utils.ChecksumHashCode checksumHashCode(DataSet<T> input) throws Exception {
final String id = new AbstractID().toString();
input.output(new Utils.ChecksumHashCodeHelper<T>(id)).name("ChecksumHashCode");
JobExecutionResult res = input.getExecutionEnvironment().execute();
return res.<Utils.ChecksumHashCode>getAccumulatorResult(id);
}
use of org.apache.flink.util.AbstractID in project flink by apache.
the class DataSet method count.
/**
* Convenience method to get the count (number of elements) of a DataSet.
*
* @return A long integer that represents the number of elements in the data set.
*/
public long count() throws Exception {
final String id = new AbstractID().toString();
output(new Utils.CountHelper<T>(id)).name("count()");
JobExecutionResult res = getExecutionEnvironment().execute();
return res.<Long>getAccumulatorResult(id);
}
Aggregations