mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-11-28 03:11:01 +00:00
[chore]: Bump google.golang.org/grpc from 1.58.2 to 1.58.3 (#2301)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.58.2 to 1.58.3. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.58.2...v1.58.3) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: indirect ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
parent
ece2e795e0
commit
03732aca3b
6 changed files with 58 additions and 34 deletions
2
go.mod
2
go.mod
|
@ -168,7 +168,7 @@ require (
|
|||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
|
||||
google.golang.org/grpc v1.58.2 // indirect
|
||||
google.golang.org/grpc v1.58.3 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
|
|
4
go.sum
4
go.sum
|
@ -941,8 +941,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
|
|||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I=
|
||||
google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
|
11
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
11
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
|
@ -171,15 +171,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||
ID: http2.SettingMaxFrameSize,
|
||||
Val: http2MaxFrameLen,
|
||||
}}
|
||||
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
|
||||
// permitted in the HTTP2 spec.
|
||||
maxStreams := config.MaxStreams
|
||||
if maxStreams == 0 {
|
||||
maxStreams = math.MaxUint32
|
||||
} else {
|
||||
if config.MaxStreams != math.MaxUint32 {
|
||||
isettings = append(isettings, http2.Setting{
|
||||
ID: http2.SettingMaxConcurrentStreams,
|
||||
Val: maxStreams,
|
||||
Val: config.MaxStreams,
|
||||
})
|
||||
}
|
||||
dynamicWindow := true
|
||||
|
@ -258,7 +253,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||
framer: framer,
|
||||
readerDone: make(chan struct{}),
|
||||
writerDone: make(chan struct{}),
|
||||
maxStreams: maxStreams,
|
||||
maxStreams: config.MaxStreams,
|
||||
inTapHandle: config.InTapHandle,
|
||||
fc: &trInFlow{limit: uint32(icwz)},
|
||||
state: reachable,
|
||||
|
|
71
vendor/google.golang.org/grpc/server.go
generated
vendored
71
vendor/google.golang.org/grpc/server.go
generated
vendored
|
@ -115,12 +115,6 @@ type serviceInfo struct {
|
|||
mdata any
|
||||
}
|
||||
|
||||
type serverWorkerData struct {
|
||||
st transport.ServerTransport
|
||||
wg *sync.WaitGroup
|
||||
stream *transport.Stream
|
||||
}
|
||||
|
||||
// Server is a gRPC server to serve RPC requests.
|
||||
type Server struct {
|
||||
opts serverOptions
|
||||
|
@ -145,7 +139,7 @@ type Server struct {
|
|||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
|
||||
serverWorkerChannel chan *serverWorkerData
|
||||
serverWorkerChannel chan func()
|
||||
}
|
||||
|
||||
type serverOptions struct {
|
||||
|
@ -179,6 +173,7 @@ type serverOptions struct {
|
|||
}
|
||||
|
||||
var defaultServerOptions = serverOptions{
|
||||
maxConcurrentStreams: math.MaxUint32,
|
||||
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
|
||||
maxSendMessageSize: defaultServerMaxSendMessageSize,
|
||||
connectionTimeout: 120 * time.Second,
|
||||
|
@ -404,6 +399,9 @@ func MaxSendMsgSize(m int) ServerOption {
|
|||
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
|
||||
// of concurrent streams to each ServerTransport.
|
||||
func MaxConcurrentStreams(n uint32) ServerOption {
|
||||
if n == 0 {
|
||||
n = math.MaxUint32
|
||||
}
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.maxConcurrentStreams = n
|
||||
})
|
||||
|
@ -605,24 +603,19 @@ const serverWorkerResetThreshold = 1 << 16
|
|||
// [1] https://github.com/golang/go/issues/18138
|
||||
func (s *Server) serverWorker() {
|
||||
for completed := 0; completed < serverWorkerResetThreshold; completed++ {
|
||||
data, ok := <-s.serverWorkerChannel
|
||||
f, ok := <-s.serverWorkerChannel
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
s.handleSingleStream(data)
|
||||
f()
|
||||
}
|
||||
go s.serverWorker()
|
||||
}
|
||||
|
||||
func (s *Server) handleSingleStream(data *serverWorkerData) {
|
||||
defer data.wg.Done()
|
||||
s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
|
||||
}
|
||||
|
||||
// initServerWorkers creates worker goroutines and a channel to process incoming
|
||||
// connections to reduce the time spent overall on runtime.morestack.
|
||||
func (s *Server) initServerWorkers() {
|
||||
s.serverWorkerChannel = make(chan *serverWorkerData)
|
||||
s.serverWorkerChannel = make(chan func())
|
||||
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
|
||||
go s.serverWorker()
|
||||
}
|
||||
|
@ -982,21 +975,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
|
|||
defer st.Close(errors.New("finished serving streams for the server transport"))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
|
||||
st.HandleStreams(func(stream *transport.Stream) {
|
||||
wg.Add(1)
|
||||
|
||||
streamQuota.acquire()
|
||||
f := func() {
|
||||
defer streamQuota.release()
|
||||
defer wg.Done()
|
||||
s.handleStream(st, stream, s.traceInfo(st, stream))
|
||||
}
|
||||
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
data := &serverWorkerData{st: st, wg: &wg, stream: stream}
|
||||
select {
|
||||
case s.serverWorkerChannel <- data:
|
||||
case s.serverWorkerChannel <- f:
|
||||
return
|
||||
default:
|
||||
// If all stream workers are busy, fallback to the default code path.
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
s.handleStream(st, stream, s.traceInfo(st, stream))
|
||||
}()
|
||||
go f()
|
||||
}, func(ctx context.Context, method string) context.Context {
|
||||
if !EnableTracing {
|
||||
return ctx
|
||||
|
@ -2091,3 +2089,34 @@ func validateSendCompressor(name, clientCompressors string) error {
|
|||
}
|
||||
return fmt.Errorf("client does not support compressor %q", name)
|
||||
}
|
||||
|
||||
// atomicSemaphore implements a blocking, counting semaphore. acquire should be
|
||||
// called synchronously; release may be called asynchronously.
|
||||
type atomicSemaphore struct {
|
||||
n atomic.Int64
|
||||
wait chan struct{}
|
||||
}
|
||||
|
||||
func (q *atomicSemaphore) acquire() {
|
||||
if q.n.Add(-1) < 0 {
|
||||
// We ran out of quota. Block until a release happens.
|
||||
<-q.wait
|
||||
}
|
||||
}
|
||||
|
||||
func (q *atomicSemaphore) release() {
|
||||
// N.B. the "<= 0" check below should allow for this to work with multiple
|
||||
// concurrent calls to acquire, but also note that with synchronous calls to
|
||||
// acquire, as our system does, n will never be less than -1. There are
|
||||
// fairness issues (queuing) to consider if this was to be generalized.
|
||||
if q.n.Add(1) <= 0 {
|
||||
// An acquire was waiting on us. Unblock it.
|
||||
q.wait <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func newHandlerQuota(n uint32) *atomicSemaphore {
|
||||
a := &atomicSemaphore{wait: make(chan struct{}, 1)}
|
||||
a.n.Store(int64(n))
|
||||
return a
|
||||
}
|
||||
|
|
2
vendor/google.golang.org/grpc/version.go
generated
vendored
2
vendor/google.golang.org/grpc/version.go
generated
vendored
|
@ -19,4 +19,4 @@
|
|||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.58.2"
|
||||
const Version = "1.58.3"
|
||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -917,7 +917,7 @@ google.golang.org/genproto/googleapis/api/httpbody
|
|||
## explicit; go 1.19
|
||||
google.golang.org/genproto/googleapis/rpc/errdetails
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
# google.golang.org/grpc v1.58.2
|
||||
# google.golang.org/grpc v1.58.3
|
||||
## explicit; go 1.19
|
||||
google.golang.org/grpc
|
||||
google.golang.org/grpc/attributes
|
||||
|
|
Loading…
Reference in a new issue