@@ -129,19 +129,8 @@ func newLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Bui
129129 }
130130}
131131
132- // newLBBuilderWithPickFirst creates a grpclb builder with pick-first.
133- func newLBBuilderWithPickFirst () balancer.Builder {
134- return & lbBuilder {
135- usePickFirst : true ,
136- }
137- }
138-
139132type lbBuilder struct {
140133 fallbackTimeout time.Duration
141-
142- // TODO: delete this when balancer can handle service config. This should be
143- // updated by service config.
144- usePickFirst bool // Use roundrobin or pickfirst for backends.
145134}
146135
147136func (b * lbBuilder ) Name () string {
@@ -167,7 +156,6 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal
167156 cc : newLBCacheClientConn (cc ),
168157 target : target ,
169158 opt : opt ,
170- usePickFirst : b .usePickFirst ,
171159 fallbackTimeout : b .fallbackTimeout ,
172160 doneCh : make (chan struct {}),
173161
@@ -231,11 +219,14 @@ type lbBalancer struct {
231219 // serverList contains anything new. Each generate picker will also have
232220 // reference to this list to do the first layer pick.
233221 fullServerList []* lbpb.Server
222+ // Backend addresses. It's kept so the addresses are available when
223+ // switching between round_robin and pickfirst.
224+ backendAddrs []resolver.Address
234225 // All backends addresses, with metadata set to nil. This list contains all
235226 // backend addresses in the same order and with the same duplicates as in
236227 // serverlist. When generating picker, a SubConn slice with the same order
237228 // but with only READY SCs will be gerenated.
238- backendAddrs []resolver.Address
229+ backendAddrsWithoutMetadata []resolver.Address
239230 // Roundrobin functionalities.
240231 state connectivity.State
241232 subConns map [resolver.Address ]balancer.SubConn // Used to new/remove SubConn.
@@ -275,7 +266,7 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) {
275266 break
276267 }
277268 } else {
278- for _ , a := range lb .backendAddrs {
269+ for _ , a := range lb .backendAddrsWithoutMetadata {
279270 if sc , ok := lb .subConns [a ]; ok {
280271 if st , ok := lb .scStates [sc ]; ok && st == connectivity .Ready {
281272 readySCs = append (readySCs , sc )
@@ -339,6 +330,11 @@ func (lb *lbBalancer) aggregateSubConnStates() connectivity.State {
339330}
340331
341332func (lb * lbBalancer ) HandleSubConnStateChange (sc balancer.SubConn , s connectivity.State ) {
333+ panic ("not used" )
334+ }
335+
336+ func (lb * lbBalancer ) UpdateSubConnState (sc balancer.SubConn , scs balancer.SubConnState ) {
337+ s := scs .ConnectivityState
342338 if grpclog .V (2 ) {
343339 grpclog .Infof ("lbBalancer: handle SubConn state change: %p, %v" , sc , s )
344340 }
@@ -371,7 +367,7 @@ func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivi
371367 if lb .state != connectivity .Ready {
372368 if ! lb .inFallback && ! lb .remoteBalancerConnected {
373369 // Enter fallback.
374- lb .refreshSubConns (lb .resolvedBackendAddrs , false )
370+ lb .refreshSubConns (lb .resolvedBackendAddrs , false , lb . usePickFirst )
375371 }
376372 }
377373}
@@ -410,17 +406,39 @@ func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
410406 return
411407 }
412408 // Enter fallback.
413- lb .refreshSubConns (lb .resolvedBackendAddrs , false )
409+ lb .refreshSubConns (lb .resolvedBackendAddrs , true , lb . usePickFirst )
414410 lb .mu .Unlock ()
415411}
416412
417413// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB
418414// clientConn. The remoteLB clientConn will handle creating/removing remoteLB
419415// connections.
420416func (lb * lbBalancer ) HandleResolvedAddrs (addrs []resolver.Address , err error ) {
417+ panic ("not used" )
418+ }
419+
420+ func (lb * lbBalancer ) handleServiceConfig (sc string ) {
421+ lb .mu .Lock ()
422+ defer lb .mu .Unlock ()
423+
424+ newUsePickFirst := childIsPickFirst (sc )
425+ if lb .usePickFirst == newUsePickFirst {
426+ return
427+ }
421428 if grpclog .V (2 ) {
422- grpclog .Infof ("lbBalancer: handleResolvedResult : %+v" , addrs )
429+ grpclog .Infof ("lbBalancer: switching mode, new usePickFirst : %+v" , newUsePickFirst )
423430 }
431+ lb .refreshSubConns (lb .backendAddrs , lb .inFallback , newUsePickFirst )
432+ lb .regeneratePicker (true )
433+ }
434+
435+ func (lb * lbBalancer ) UpdateResolverState (rs resolver.State ) {
436+ if grpclog .V (2 ) {
437+ grpclog .Infof ("lbBalancer: UpdateResolverState: %+v" , rs )
438+ }
439+ lb .handleServiceConfig (rs .ServiceConfig )
440+
441+ addrs := rs .Addresses
424442 if len (addrs ) <= 0 {
425443 return
426444 }
@@ -457,7 +475,7 @@ func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
457475 // This means we received a new list of resolved backends, and we are
458476 // still in fallback mode. Need to update the list of backends we are
459477 // using to the new list of backends.
460- lb .refreshSubConns (lb .resolvedBackendAddrs , false )
478+ lb .refreshSubConns (lb .resolvedBackendAddrs , true , lb . usePickFirst )
461479 }
462480 lb .mu .Unlock ()
463481}
0 commit comments