use of org.apache.gora.query.impl.PartitionQueryImpl in project gora by apache.
the class TestPartitionQueryImpl method testReadWrite.
@Test
public void testReadWrite() throws Exception {
MockQuery baseQuery = dataStore.newQuery();
baseQuery.setStartKey("start");
baseQuery.setLimit(42);
PartitionQueryImpl<String, MockPersistent> query = new PartitionQueryImpl<>(baseQuery);
TestWritable.testWritable(query);
}
use of org.apache.gora.query.impl.PartitionQueryImpl in project gora by apache.
the class JCacheStore method getPartitions.
@Override
public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException {
List<PartitionQuery<K, T>> partitions = new ArrayList<>();
try {
Member[] clusterMembers = new Member[hazelcastInstance.getCluster().getMembers().size()];
this.hazelcastInstance.getCluster().getMembers().toArray(clusterMembers);
for (Member member : clusterMembers) {
JCacheResult<K, T> result = ((JCacheResult<K, T>) query.execute());
ConcurrentSkipListSet<K> memberOwnedCacheEntries = new ConcurrentSkipListSet<>();
while (result.next()) {
K key = result.getKey();
Partition partition = hazelcastInstance.getPartitionService().getPartition(key);
if (partition.getOwner().getUuid().equals(member.getUuid())) {
memberOwnedCacheEntries.add(key);
}
}
PartitionQueryImpl<K, T> partition = new PartitionQueryImpl<>(query, memberOwnedCacheEntries.first(), memberOwnedCacheEntries.last(), member.getSocketAddress().getHostString());
partition.setConf(this.getConf());
partitions.add(partition);
}
} catch (java.lang.Exception ex) {
LOG.error("Exception occurred while partitioning the query based on Hazelcast partitions.", ex);
return null;
}
LOG.info("Query is partitioned to {} number of partitions.", partitions.size());
return partitions;
}
use of org.apache.gora.query.impl.PartitionQueryImpl in project gora by apache.
the class AccumuloStore method getPartitions.
@Override
public List<PartitionQuery<K, T>> getPartitions(Query<K, T> query) throws IOException {
try {
TabletLocator tl;
if (conn instanceof MockConnector)
tl = new MockTabletLocator();
else
tl = TabletLocator.getLocator(new ClientContext(conn.getInstance(), credentials, AccumuloConfiguration.getTableConfiguration(conn, Tables.getTableId(conn.getInstance(), mapping.tableName))), new Text(Tables.getTableId(conn.getInstance(), mapping.tableName)));
Map<String, Map<KeyExtent, List<Range>>> binnedRanges = new HashMap<>();
tl.invalidateCache();
while (tl.binRanges(new ClientContext(conn.getInstance(), credentials, AccumuloConfiguration.getTableConfiguration(conn, Tables.getTableId(conn.getInstance(), mapping.tableName))), Collections.singletonList(createRange(query)), binnedRanges).size() > 0) {
// TODO log?
if (!Tables.exists(conn.getInstance(), Tables.getTableId(conn.getInstance(), mapping.tableName)))
throw new TableDeletedException(Tables.getTableId(conn.getInstance(), mapping.tableName));
else if (Tables.getTableState(conn.getInstance(), Tables.getTableId(conn.getInstance(), mapping.tableName)) == TableState.OFFLINE)
throw new TableOfflineException(conn.getInstance(), Tables.getTableId(conn.getInstance(), mapping.tableName));
UtilWaitThread.sleep(100);
tl.invalidateCache();
}
List<PartitionQuery<K, T>> ret = new ArrayList<>();
Text startRow = null;
Text endRow = null;
if (query.getStartKey() != null)
startRow = new Text(toBytes(query.getStartKey()));
if (query.getEndKey() != null)
endRow = new Text(toBytes(query.getEndKey()));
//hadoop expects hostnames, accumulo keeps track of IPs... so need to convert
HashMap<String, String> hostNameCache = new HashMap<>();
for (Entry<String, Map<KeyExtent, List<Range>>> entry : binnedRanges.entrySet()) {
String ip = entry.getKey().split(":", 2)[0];
String location = hostNameCache.get(ip);
if (location == null) {
InetAddress inetAddress = InetAddress.getByName(ip);
location = inetAddress.getHostName();
hostNameCache.put(ip, location);
}
Map<KeyExtent, List<Range>> tablets = entry.getValue();
for (KeyExtent ke : tablets.keySet()) {
K startKey = null;
if (startRow == null || !ke.contains(startRow)) {
if (ke.getPrevEndRow() != null) {
startKey = followingKey(encoder, getKeyClass(), getBytes(ke.getPrevEndRow()));
}
} else {
startKey = fromBytes(getKeyClass(), getBytes(startRow));
}
K endKey = null;
if (endRow == null || !ke.contains(endRow)) {
if (ke.getEndRow() != null)
endKey = lastPossibleKey(encoder, getKeyClass(), getBytes(ke.getEndRow()));
} else {
endKey = fromBytes(getKeyClass(), getBytes(endRow));
}
PartitionQueryImpl<K, T> pqi = new PartitionQueryImpl<>(query, startKey, endKey, location);
pqi.setConf(getConf());
ret.add(pqi);
}
}
return ret;
} catch (TableNotFoundException | AccumuloException | AccumuloSecurityException e) {
throw new IOException(e);
}
}
Aggregations