done chan *Done
}
+// So, the current communication scheme is following
+//
+// ........... Done .............
+// v |
+// Generate --> Balance --> [WorkRunner .. x .. x]
+// ^ |
+// ............ (result) ...............
+//
+// which means, that if 'Generate()' is ever blocks in request sending
+// select's case branch and won't be able to obtain result from WorkRunner,
+// deadlock will happen. If Balance() is ever blocks in sending request to
+// worker and won't be able to get 'Done' message from any other worker,
+// deadlock will happen.
+//
func (b *Balancer) Balance(work <-chan Requester) {
fmt.Println("balance(): Starting workers..")
for _, w := range b.pool {
func (b *Balancer) dispatch(req Requester) {
w := heap.Pop(&b.pool).(*worker)
w.pending++
+ // I should place worker object back on the heap as soon as i can (or even
+ // use mutex for this), so if i'll block at Send() below, completed() can
+ // run for this worker (otherwise, completed() won't find worker in the
+ // heap and panic).
heap.Push(&b.pool, w)
fmt.Printf("dispatch() '%v': sending to '%v'\n", req, w)
w.runner.Send(req)