From dc668992c0aa8227712e5e557fb466aeec4f527d Mon Sep 17 00:00:00 2001 From: sgf Date: Fri, 25 Nov 2022 16:12:10 +0300 Subject: [PATCH] chg(balancer): Comments. --- .../balancer_v2/balancer/balancer.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/concurrency-is-not-parallelism/balancer_v2/balancer/balancer.go b/concurrency-is-not-parallelism/balancer_v2/balancer/balancer.go index 7e70326..86ff8f9 100644 --- a/concurrency-is-not-parallelism/balancer_v2/balancer/balancer.go +++ b/concurrency-is-not-parallelism/balancer_v2/balancer/balancer.go @@ -93,6 +93,20 @@ type Balancer struct { done chan *Done } +// So, the current communication scheme is following +// +// ........... Done ............. +// v | +// Generate --> Balance --> [WorkRunner .. x .. x] +// ^ | +// ............ (result) ............... +// +// which means, that if 'Generate()' is ever blocks in request sending +// select's case branch and won't be able to obtain result from WorkRunner, +// deadlock will happen. If Balance() is ever blocks in sending request to +// worker and won't be able to get 'Done' message from any other worker, +// deadlock will happen. +// func (b *Balancer) Balance(work <-chan Requester) { fmt.Println("balance(): Starting workers..") for _, w := range b.pool { @@ -126,6 +140,10 @@ out: func (b *Balancer) dispatch(req Requester) { w := heap.Pop(&b.pool).(*worker) w.pending++ + // I should place worker object back on the heap as soon as i can (or even + // use mutex for this), so if i'll block at Send() below, completed() can + // run for this worker (otherwise, completed() won't find worker in the + // heap and panic). heap.Push(&b.pool, w) fmt.Printf("dispatch() '%v': sending to '%v'\n", req, w) w.runner.Send(req) -- 2.20.1