Use goreleaser to build and release (#244)

Instead of using gox on one side and an action to release, we can merge
them together with goreleaser which will build for extra targets (arm,
mips if needed in the future) and it also takes care of creating
checksums, a source archive, and a changelog and creating a release with
all the artifacts.

All binaries should respect the old naming convention, so any scripts
out there should still work.

Signed-off-by: Itxaka <igarcia@suse.com>
This commit is contained in:
Itxaka
2021-08-11 08:30:55 +02:00
committed by GitHub
parent 0a4fe57f33
commit 4adc0dc9b9
1133 changed files with 81678 additions and 85598 deletions

View File

@@ -43,7 +43,8 @@ type ccBalancerWrapper struct {
cc *ClientConn
balancerMu sync.Mutex // synchronizes calls to the balancer
balancer balancer.Balancer
scBuffer *buffer.Unbounded
updateCh *buffer.Unbounded
closed *grpcsync.Event
done *grpcsync.Event
mu sync.Mutex
@@ -53,7 +54,8 @@ type ccBalancerWrapper struct {
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
ccb := &ccBalancerWrapper{
cc: cc,
scBuffer: buffer.NewUnbounded(),
updateCh: buffer.NewUnbounded(),
closed: grpcsync.NewEvent(),
done: grpcsync.NewEvent(),
subConns: make(map[*acBalancerWrapper]struct{}),
}
@@ -67,39 +69,53 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
func (ccb *ccBalancerWrapper) watcher() {
for {
select {
case t := <-ccb.scBuffer.Get():
ccb.scBuffer.Load()
if ccb.done.HasFired() {
case t := <-ccb.updateCh.Get():
ccb.updateCh.Load()
if ccb.closed.HasFired() {
break
}
ccb.balancerMu.Lock()
su := t.(*scStateUpdate)
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
} else {
ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
switch u := t.(type) {
case *scStateUpdate:
ccb.balancerMu.Lock()
ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err})
ccb.balancerMu.Unlock()
case *acBalancerWrapper:
ccb.mu.Lock()
if ccb.subConns != nil {
delete(ccb.subConns, u)
ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain)
}
ccb.mu.Unlock()
default:
logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t)
}
ccb.balancerMu.Unlock()
case <-ccb.done.Done():
case <-ccb.closed.Done():
}
if ccb.done.HasFired() {
if ccb.closed.HasFired() {
ccb.balancerMu.Lock()
ccb.balancer.Close()
ccb.balancerMu.Unlock()
ccb.mu.Lock()
scs := ccb.subConns
ccb.subConns = nil
ccb.mu.Unlock()
ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
ccb.done.Fire()
// Fire done before removing the addr conns. We can safely unblock
// ccb.close and allow the removeAddrConns to happen
// asynchronously.
for acbw := range scs {
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
}
ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
return
}
}
}
func (ccb *ccBalancerWrapper) close() {
ccb.done.Fire()
ccb.closed.Fire()
<-ccb.done.Done()
}
func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
@@ -113,7 +129,7 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
if sc == nil {
return
}
ccb.scBuffer.Put(&scStateUpdate{
ccb.updateCh.Put(&scStateUpdate{
sc: sc,
state: s,
err: err,
@@ -123,19 +139,13 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
ccb.balancerMu.Lock()
defer ccb.balancerMu.Unlock()
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
return ub.UpdateClientConnState(*ccs)
}
ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
return nil
return ccb.balancer.UpdateClientConnState(*ccs)
}
func (ccb *ccBalancerWrapper) resolverError(err error) {
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
ccb.balancerMu.Lock()
ub.ResolverError(err)
ccb.balancerMu.Unlock()
}
ccb.balancerMu.Lock()
ccb.balancer.ResolverError(err)
ccb.balancerMu.Unlock()
}
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
@@ -160,32 +170,18 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
}
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
// The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock
// during switchBalancer() if the old balancer calls RemoveSubConn() in its
// Close().
ccb.updateCh.Put(sc)
}
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
acbw, ok := sc.(*acBalancerWrapper)
if !ok {
return
}
ccb.mu.Lock()
defer ccb.mu.Unlock()
if ccb.subConns == nil {
return
}
delete(ccb.subConns, acbw)
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
}
func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
ccb.mu.Lock()
defer ccb.mu.Unlock()
if ccb.subConns == nil {
return
}
// Update picker before updating state. Even though the ordering here does
// not matter, it can lead to multiple calls of Pick in the common start-up
// case where we wait for ready and then perform an RPC. If the picker is
// updated later, we could call the "connecting" picker when the state is
// updated, and then call the "ready" picker after the picker gets updated.
ccb.cc.blockingpicker.updatePicker(p)
ccb.cc.csMgr.updateState(s)
acbw.UpdateAddresses(addrs)
}
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
@@ -199,7 +195,7 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
// case where we wait for ready and then perform an RPC. If the picker is
// updated later, we could call the "connecting" picker when the state is
// updated, and then call the "ready" picker after the picker gets updated.
ccb.cc.blockingpicker.updatePickerV2(s.Picker)
ccb.cc.blockingpicker.updatePicker(s.Picker)
ccb.cc.csMgr.updateState(s.ConnectivityState)
}
@@ -222,7 +218,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
acbw.mu.Lock()
defer acbw.mu.Unlock()
if len(addrs) <= 0 {
acbw.ac.tearDown(errConnDrain)
acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
return
}
if !acbw.ac.tryUpdateAddrs(addrs) {
@@ -237,7 +233,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
acbw.ac.acbw = nil
acbw.ac.mu.Unlock()
acState := acbw.ac.getState()
acbw.ac.tearDown(errConnDrain)
acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
if acState == connectivity.Shutdown {
return
@@ -245,7 +241,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
ac, err := cc.newAddrConn(addrs, opts)
if err != nil {
channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
return
}
acbw.ac = ac