use of org.apache.flink.api.common.io.LocatableInputSplitAssigner in project flink by apache.
the class LocatableSplitAssignerTest method testSerialSplitAssignmentSomeForRemoteHost.
@Test
public void testSerialSplitAssignmentSomeForRemoteHost() {
try {
// host1 reads all local
// host2 reads 10 local and 10 remote
// host3 reads all remote
final String[] hosts = { "host1", "host2", "host3" };
final int NUM_LOCAL_HOST1_SPLITS = 20;
final int NUM_LOCAL_HOST2_SPLITS = 10;
final int NUM_REMOTE_SPLITS = 30;
final int NUM_LOCAL_SPLITS = NUM_LOCAL_HOST1_SPLITS + NUM_LOCAL_HOST2_SPLITS;
// load local splits
int splitCnt = 0;
Set<LocatableInputSplit> splits = new HashSet<LocatableInputSplit>();
// host1 splits
for (int i = 0; i < NUM_LOCAL_HOST1_SPLITS; i++) {
splits.add(new LocatableInputSplit(splitCnt++, "host1"));
}
// host2 splits
for (int i = 0; i < NUM_LOCAL_HOST2_SPLITS; i++) {
splits.add(new LocatableInputSplit(splitCnt++, "host2"));
}
// load remote splits
for (int i = 0; i < NUM_REMOTE_SPLITS; i++) {
splits.add(new LocatableInputSplit(splitCnt++, "remoteHost"));
}
// get all available splits
LocatableInputSplitAssigner ia = new LocatableInputSplitAssigner(splits);
InputSplit is = null;
int i = 0;
while ((is = ia.getNextInputSplit(hosts[i++ % hosts.length], 0)) != null) {
assertTrue(splits.remove(is));
}
// check we had all
assertTrue(splits.isEmpty());
assertNull(ia.getNextInputSplit("anotherHost", 0));
assertEquals(NUM_REMOTE_SPLITS, ia.getNumberOfRemoteAssignments());
assertEquals(NUM_LOCAL_SPLITS, ia.getNumberOfLocalAssignments());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.common.io.LocatableInputSplitAssigner in project flink by apache.
the class LocatableSplitAssignerTest method testSerialSplitAssignmentWithNullHost.
@Test
public void testSerialSplitAssignmentWithNullHost() {
try {
final int NUM_SPLITS = 50;
final String[][] hosts = new String[][] { new String[] { "localhost" }, new String[0], null };
// load some splits
Set<LocatableInputSplit> splits = new HashSet<LocatableInputSplit>();
for (int i = 0; i < NUM_SPLITS; i++) {
splits.add(new LocatableInputSplit(i, hosts[i % 3]));
}
// get all available splits
LocatableInputSplitAssigner ia = new LocatableInputSplitAssigner(splits);
InputSplit is = null;
while ((is = ia.getNextInputSplit(null, 0)) != null) {
assertTrue(splits.remove(is));
}
// check we had all
assertTrue(splits.isEmpty());
assertNull(ia.getNextInputSplit("", 0));
assertEquals(NUM_SPLITS, ia.getNumberOfRemoteAssignments());
assertEquals(0, ia.getNumberOfLocalAssignments());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.common.io.LocatableInputSplitAssigner in project flink by apache.
the class LocatableSplitAssignerTest method testConcurrentSplitAssignmentForMultipleHosts.
@Test
public void testConcurrentSplitAssignmentForMultipleHosts() {
try {
final int NUM_THREADS = 10;
final int NUM_SPLITS = 500;
final int SUM_OF_IDS = (NUM_SPLITS - 1) * (NUM_SPLITS) / 2;
final String[] hosts = { "host1", "host1", "host1", "host2", "host2", "host3" };
// load some splits
Set<LocatableInputSplit> splits = new HashSet<LocatableInputSplit>();
for (int i = 0; i < NUM_SPLITS; i++) {
splits.add(new LocatableInputSplit(i, hosts[i % hosts.length]));
}
final LocatableInputSplitAssigner ia = new LocatableInputSplitAssigner(splits);
final AtomicInteger splitsRetrieved = new AtomicInteger(0);
final AtomicInteger sumOfIds = new AtomicInteger(0);
Runnable retriever = new Runnable() {
@Override
public void run() {
final String threadHost = hosts[(int) (Math.random() * hosts.length)];
LocatableInputSplit split;
while ((split = ia.getNextInputSplit(threadHost, 0)) != null) {
splitsRetrieved.incrementAndGet();
sumOfIds.addAndGet(split.getSplitNumber());
}
}
};
// create the threads
Thread[] threads = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
threads[i] = new Thread(retriever);
threads[i].setDaemon(true);
}
// launch concurrently
for (int i = 0; i < NUM_THREADS; i++) {
threads[i].start();
}
// sync
for (int i = 0; i < NUM_THREADS; i++) {
threads[i].join(5000);
}
// verify
for (int i = 0; i < NUM_THREADS; i++) {
if (threads[i].isAlive()) {
fail("The concurrency test case is erroneous, the thread did not respond in time.");
}
}
assertEquals(NUM_SPLITS, splitsRetrieved.get());
assertEquals(SUM_OF_IDS, sumOfIds.get());
// nothing left
assertNull(ia.getNextInputSplit("testhost", 0));
// at least one fraction of hosts needs be local, no matter how bad the thread races
assertTrue(ia.getNumberOfLocalAssignments() >= NUM_SPLITS / hosts.length);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.common.io.LocatableInputSplitAssigner in project flink by apache.
the class LocatableSplitAssignerTest method testSerialSplitAssignmentMixedLocalHost.
@Test
public void testSerialSplitAssignmentMixedLocalHost() {
try {
final String[] hosts = { "host1", "host1", "host1", "host2", "host2", "host3" };
final int NUM_SPLITS = 10 * hosts.length;
// load some splits
Set<LocatableInputSplit> splits = new HashSet<LocatableInputSplit>();
for (int i = 0; i < NUM_SPLITS; i++) {
splits.add(new LocatableInputSplit(i, hosts[i % hosts.length]));
}
// get all available splits
LocatableInputSplitAssigner ia = new LocatableInputSplitAssigner(splits);
InputSplit is = null;
int i = 0;
while ((is = ia.getNextInputSplit(hosts[i++ % hosts.length], 0)) != null) {
assertTrue(splits.remove(is));
}
// check we had all
assertTrue(splits.isEmpty());
assertNull(ia.getNextInputSplit("anotherHost", 0));
assertEquals(0, ia.getNumberOfRemoteAssignments());
assertEquals(NUM_SPLITS, ia.getNumberOfLocalAssignments());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.api.common.io.LocatableInputSplitAssigner in project flink by apache.
the class LocatableSplitAssignerTest method testConcurrentSplitAssignmentNullHost.
@Test
public void testConcurrentSplitAssignmentNullHost() {
try {
final int NUM_THREADS = 10;
final int NUM_SPLITS = 500;
final int SUM_OF_IDS = (NUM_SPLITS - 1) * (NUM_SPLITS) / 2;
final String[][] hosts = new String[][] { new String[] { "localhost" }, new String[0], null };
// load some splits
Set<LocatableInputSplit> splits = new HashSet<LocatableInputSplit>();
for (int i = 0; i < NUM_SPLITS; i++) {
splits.add(new LocatableInputSplit(i, hosts[i % 3]));
}
final LocatableInputSplitAssigner ia = new LocatableInputSplitAssigner(splits);
final AtomicInteger splitsRetrieved = new AtomicInteger(0);
final AtomicInteger sumOfIds = new AtomicInteger(0);
Runnable retriever = new Runnable() {
@Override
public void run() {
LocatableInputSplit split;
while ((split = ia.getNextInputSplit(null, 0)) != null) {
splitsRetrieved.incrementAndGet();
sumOfIds.addAndGet(split.getSplitNumber());
}
}
};
// create the threads
Thread[] threads = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
threads[i] = new Thread(retriever);
threads[i].setDaemon(true);
}
// launch concurrently
for (int i = 0; i < NUM_THREADS; i++) {
threads[i].start();
}
// sync
for (int i = 0; i < NUM_THREADS; i++) {
threads[i].join(5000);
}
// verify
for (int i = 0; i < NUM_THREADS; i++) {
if (threads[i].isAlive()) {
fail("The concurrency test case is erroneous, the thread did not respond in time.");
}
}
assertEquals(NUM_SPLITS, splitsRetrieved.get());
assertEquals(SUM_OF_IDS, sumOfIds.get());
// nothing left
assertNull(ia.getNextInputSplit("", 0));
assertEquals(NUM_SPLITS, ia.getNumberOfRemoteAssignments());
assertEquals(0, ia.getNumberOfLocalAssignments());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations