/**
* Distribute triggers on nodes using a consistent hashing strategy.
* This strategy allows to scale and minimize changes and re-distribution when cluster changes.
*
* @param entries a list of entries to distribute
* @param buckets a table of nodes
* @return a map of entries distributed across nodes
*/
public Map<PartitionEntry, Integer> calculatePartition(List<PartitionEntry> entries,
Map<Integer, Integer> buckets) {
if (entries == null) {
throw new IllegalArgumentException("entries must be not null");
}
if (buckets == null || buckets.isEmpty()) {
throw new IllegalArgumentException("entries must be not null");
}
HashFunction md5 = Hashing.md5();
int numBuckets = buckets.size();
Map<PartitionEntry, Integer> newPartition = new HashMap<>();
for (PartitionEntry entry : entries) {
newPartition.put(entry, buckets.get(Hashing.consistentHash(md5.hashInt(entry.hashCode()), numBuckets)));
}
return newPartition;
}
[source,java]