[chore]: Bump github.com/jackc/pgx/v5 from 5.4.3 to 5.5.0 (#2336)

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot] 2023-11-06 14:44:53 +00:00 committed by GitHub
parent 74b600655d
commit 9b76afc851
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
47 changed files with 2905 additions and 162 deletions

4
go.mod
View file

@ -34,7 +34,7 @@ require (
github.com/gorilla/feeds v1.1.1
github.com/gorilla/websocket v1.5.0
github.com/h2non/filetype v1.1.3
github.com/jackc/pgx/v5 v5.4.3
github.com/jackc/pgx/v5 v5.5.0
github.com/microcosm-cc/bluemonday v1.0.26
github.com/miekg/dns v1.1.56
github.com/minio/minio-go/v7 v7.0.63
@ -122,6 +122,7 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
@ -160,6 +161,7 @@ require (
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
golang.org/x/arch v0.3.0 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/tools v0.13.0 // indirect
google.golang.org/appengine v1.6.8 // indirect

6
go.sum
View file

@ -340,8 +340,10 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.4.3 h1:cxFyXhxlvAifxnkKKdlxv8XqUf59tDlYjnV5YYfsJJY=
github.com/jackc/pgx/v5 v5.4.3/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=

View file

@ -1,3 +1,17 @@
# 5.5.0 (November 4, 2023)
* Add CollectExactlyOneRow. (Julien GOTTELAND)
* Add OpenDBFromPool to create *database/sql.DB from *pgxpool.Pool. (Lev Zakharov)
* Prepare can automatically choose statement name based on sql. This makes it easier to explicitly manage prepared statements.
* Statement cache now uses deterministic, stable statement names.
* database/sql prepared statement names are deterministically generated.
* Fix: SendBatch wasn't respecting context cancellation.
* Fix: Timeout error from pipeline is now normalized.
* Fix: database/sql encoding json.RawMessage to []byte.
* CancelRequest: Wait for the cancel request to be acknowledged by the server. This should improve PgBouncer compatibility. (Anton Levakin)
* stdlib: Use Ping instead of CheckConn in ResetSession
* Add json.Marshaler and json.Unmarshaler for Float4, Float8 (Kirill Mironov)
# 5.4.3 (August 5, 2023)
* Fix: QCharArrayOID was defined with the wrong OID (Christoph Engelbert)

View file

@ -86,9 +86,13 @@ It is also possible to use the `database/sql` interface and convert a connection
See CONTRIBUTING.md for setup instructions.
## Architecture
See the presentation at Golang Estonia, [PGX Top to Bottom](https://www.youtube.com/watch?v=sXMSWhcHCf8) for a description of pgx architecture.
## Supported Go and PostgreSQL Versions
pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.19 and higher and PostgreSQL 11 and higher. pgx also is tested against the latest version of [CockroachDB](https://www.cockroachlabs.com/product/).
pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.20 and higher and PostgreSQL 11 and higher. pgx also is tested against the latest version of [CockroachDB](https://www.cockroachlabs.com/product/).
## Version Policy

View file

@ -2,6 +2,8 @@ package pgx
import (
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"strconv"
@ -35,7 +37,7 @@ type ConnConfig struct {
// DefaultQueryExecMode controls the default mode for executing queries. By default pgx uses the extended protocol
// and automatically prepares and caches prepared statements. However, this may be incompatible with proxies such as
// PGBouncer. In this case it may be preferrable to use QueryExecModeExec or QueryExecModeSimpleProtocol. The same
// PGBouncer. In this case it may be preferable to use QueryExecModeExec or QueryExecModeSimpleProtocol. The same
// functionality can be controlled on a per query basis by passing a QueryExecMode as the first query argument.
DefaultQueryExecMode QueryExecMode
@ -99,8 +101,12 @@ func (ident Identifier) Sanitize() string {
return strings.Join(parts, ".")
}
// ErrNoRows occurs when rows are expected but none are returned.
var ErrNoRows = errors.New("no rows in result set")
var (
// ErrNoRows occurs when rows are expected but none are returned.
ErrNoRows = errors.New("no rows in result set")
// ErrTooManyRows occurs when more rows than expected are returned.
ErrTooManyRows = errors.New("too many rows in result set")
)
var errDisabledStatementCache = fmt.Errorf("cannot use QueryExecModeCacheStatement with disabled statement cache")
var errDisabledDescriptionCache = fmt.Errorf("cannot use QueryExecModeCacheDescribe with disabled description cache")
@ -269,7 +275,7 @@ func connect(ctx context.Context, config *ConnConfig) (c *Conn, err error) {
return c, nil
}
// Close closes a connection. It is safe to call Close on a already closed
// Close closes a connection. It is safe to call Close on an already closed
// connection.
func (c *Conn) Close(ctx context.Context) error {
if c.IsClosed() {
@ -280,12 +286,15 @@ func (c *Conn) Close(ctx context.Context) error {
return err
}
// Prepare creates a prepared statement with name and sql. sql can contain placeholders
// for bound parameters. These placeholders are referenced positional as $1, $2, etc.
// Prepare creates a prepared statement with name and sql. sql can contain placeholders for bound parameters. These
// placeholders are referenced positionally as $1, $2, etc. name can be used instead of sql with Query, QueryRow, and
// Exec to execute the statement. It can also be used with Batch.Queue.
//
// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with the same
// name and sql arguments. This allows a code path to Prepare and Query/Exec without
// concern for if the statement has already been prepared.
// The underlying PostgreSQL identifier for the prepared statement will be name if name != sql or a digest of sql if
// name == sql.
//
// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with the same name and sql arguments. This
// allows a code path to Prepare and Query/Exec without concern for if the statement has already been prepared.
func (c *Conn) Prepare(ctx context.Context, name, sql string) (sd *pgconn.StatementDescription, err error) {
if c.prepareTracer != nil {
ctx = c.prepareTracer.TracePrepareStart(ctx, c, TracePrepareStartData{Name: name, SQL: sql})
@ -307,22 +316,38 @@ func (c *Conn) Prepare(ctx context.Context, name, sql string) (sd *pgconn.Statem
}()
}
sd, err = c.pgConn.Prepare(ctx, name, sql, nil)
var psName, psKey string
if name == sql {
digest := sha256.Sum256([]byte(sql))
psName = "stmt_" + hex.EncodeToString(digest[0:24])
psKey = sql
} else {
psName = name
psKey = name
}
sd, err = c.pgConn.Prepare(ctx, psName, sql, nil)
if err != nil {
return nil, err
}
if name != "" {
c.preparedStatements[name] = sd
if psKey != "" {
c.preparedStatements[psKey] = sd
}
return sd, nil
}
// Deallocate released a prepared statement
// Deallocate releases a prepared statement.
func (c *Conn) Deallocate(ctx context.Context, name string) error {
delete(c.preparedStatements, name)
_, err := c.pgConn.Exec(ctx, "deallocate "+quoteIdentifier(name)).ReadAll()
var psName string
if sd, ok := c.preparedStatements[name]; ok {
delete(c.preparedStatements, name)
psName = sd.Name
} else {
psName = name
}
_, err := c.pgConn.Exec(ctx, "deallocate "+quoteIdentifier(psName)).ReadAll()
return err
}
@ -461,7 +486,7 @@ optionLoop:
}
sd := c.statementCache.Get(sql)
if sd == nil {
sd, err = c.Prepare(ctx, stmtcache.NextStatementName(), sql)
sd, err = c.Prepare(ctx, stmtcache.StatementName(sql), sql)
if err != nil {
return pgconn.CommandTag{}, err
}
@ -573,13 +598,16 @@ type QueryExecMode int32
const (
_ QueryExecMode = iota
// Automatically prepare and cache statements. This uses the extended protocol. Queries are executed in a single
// round trip after the statement is cached. This is the default.
// Automatically prepare and cache statements. This uses the extended protocol. Queries are executed in a single round
// trip after the statement is cached. This is the default. If the database schema is modified or the search_path is
// changed after a statement is cached then the first execution of a previously cached query may fail. e.g. If the
// number of columns returned by a "SELECT *" changes or the type of a column is changed.
QueryExecModeCacheStatement
// Cache statement descriptions (i.e. argument and result types) and assume they do not change. This uses the
// extended protocol. Queries are executed in a single round trip after the description is cached. If the database
// schema is modified or the search_path is changed this may result in undetected result decoding errors.
// Cache statement descriptions (i.e. argument and result types) and assume they do not change. This uses the extended
// protocol. Queries are executed in a single round trip after the description is cached. If the database schema is
// modified or the search_path is changed after a statement is cached then the first execution of a previously cached
// query may fail. e.g. If the number of columns returned by a "SELECT *" changes or the type of a column is changed.
QueryExecModeCacheDescribe
// Get the statement description on every execution. This uses the extended protocol. Queries require two round trips
@ -592,13 +620,13 @@ const (
// Assume the PostgreSQL query parameter types based on the Go type of the arguments. This uses the extended protocol
// with text formatted parameters and results. Queries are executed in a single round trip. Type mappings can be
// registered with pgtype.Map.RegisterDefaultPgType. Queries will be rejected that have arguments that are
// unregistered or ambigious. e.g. A map[string]string may have the PostgreSQL type json or hstore. Modes that know
// unregistered or ambiguous. e.g. A map[string]string may have the PostgreSQL type json or hstore. Modes that know
// the PostgreSQL type can use a map[string]string directly as an argument. This mode cannot.
QueryExecModeExec
// Use the simple protocol. Assume the PostgreSQL query parameter types based on the Go type of the arguments.
// Queries are executed in a single round trip. Type mappings can be registered with
// pgtype.Map.RegisterDefaultPgType. Queries will be rejected that have arguments that are unregistered or ambigious.
// pgtype.Map.RegisterDefaultPgType. Queries will be rejected that have arguments that are unregistered or ambiguous.
// e.g. A map[string]string may have the PostgreSQL type json or hstore. Modes that know the PostgreSQL type can use
// a map[string]string directly as an argument. This mode cannot.
//
@ -815,7 +843,7 @@ func (c *Conn) getStatementDescription(
}
sd = c.statementCache.Get(sql)
if sd == nil {
sd, err = c.Prepare(ctx, stmtcache.NextStatementName(), sql)
sd, err = c.Prepare(ctx, stmtcache.StatementName(sql), sql)
if err != nil {
return nil, err
}
@ -994,7 +1022,7 @@ func (c *Conn) sendBatchQueryExecModeCacheStatement(ctx context.Context, b *Batc
bi.sd = distinctNewQueries[idx]
} else {
sd = &pgconn.StatementDescription{
Name: stmtcache.NextStatementName(),
Name: stmtcache.StatementName(bi.query),
SQL: bi.query,
}
distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)
@ -1062,7 +1090,7 @@ func (c *Conn) sendBatchQueryExecModeDescribeExec(ctx context.Context, b *Batch)
}
func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, distinctNewQueries []*pgconn.StatementDescription, sdCache stmtcache.Cache) (pbr *pipelineBatchResults) {
pipeline := c.pgConn.StartPipeline(context.Background())
pipeline := c.pgConn.StartPipeline(ctx)
defer func() {
if pbr != nil && pbr.err != nil {
pipeline.Close()

View file

@ -34,7 +34,8 @@ func (c *LRUCache) Get(key string) *pgconn.StatementDescription {
}
// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache.
// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache or
// sd.SQL has been invalidated and HandleInvalidated has not been called yet.
func (c *LRUCache) Put(sd *pgconn.StatementDescription) {
if sd.SQL == "" {
panic("cannot store statement description with empty SQL")
@ -44,6 +45,13 @@ func (c *LRUCache) Put(sd *pgconn.StatementDescription) {
return
}
// The statement may have been invalidated but not yet handled. Do not readd it to the cache.
for _, invalidSD := range c.invalidStmts {
if invalidSD.SQL == sd.SQL {
return
}
}
if c.l.Len() == c.cap {
c.invalidateOldest()
}
@ -73,6 +81,8 @@ func (c *LRUCache) InvalidateAll() {
c.l = list.New()
}
// HandleInvalidated returns a slice of all statement descriptions invalidated since the last call to HandleInvalidated.
// Typically, the caller will then deallocate them.
func (c *LRUCache) HandleInvalidated() []*pgconn.StatementDescription {
invalidStmts := c.invalidStmts
c.invalidStmts = nil

View file

@ -2,18 +2,17 @@
package stmtcache
import (
"strconv"
"sync/atomic"
"crypto/sha256"
"encoding/hex"
"github.com/jackc/pgx/v5/pgconn"
)
var stmtCounter int64
// NextStatementName returns a statement name that will be unique for the lifetime of the program.
func NextStatementName() string {
n := atomic.AddInt64(&stmtCounter, 1)
return "stmtcache_" + strconv.FormatInt(n, 10)
// StatementName returns a statement name that will be stable for sql across multiple connections and program
// executions.
func StatementName(sql string) string {
digest := sha256.Sum256([]byte(sql))
return "stmtcache_" + hex.EncodeToString(digest[0:24])
}
// Cache caches statement descriptions.
@ -39,19 +38,3 @@ type Cache interface {
// Cap returns the maximum number of cached prepared statement descriptions.
Cap() int
}
func IsStatementInvalid(err error) bool {
pgErr, ok := err.(*pgconn.PgError)
if !ok {
return false
}
// https://github.com/jackc/pgx/issues/1162
//
// We used to look for the message "cached plan must not change result type". However, that message can be localized.
// Unfortunately, error code "0A000" - "FEATURE NOT SUPPORTED" is used for many different errors and the only way to
// tell the difference is by the message. But all that happens is we clear a statement that we otherwise wouldn't
// have so it should be safe.
possibleInvalidCachedPlanError := pgErr.Code == "0A000"
return possibleInvalidCachedPlanError
}

View file

@ -47,7 +47,7 @@ func (c *PgConn) scramAuth(serverAuthMechanisms []string) error {
return err
}
// Receive server-first-message payload in a AuthenticationSASLContinue.
// Receive server-first-message payload in an AuthenticationSASLContinue.
saslContinue, err := c.rxSASLContinue()
if err != nil {
return err
@ -67,7 +67,7 @@ func (c *PgConn) scramAuth(serverAuthMechanisms []string) error {
return err
}
// Receive server-final-message payload in a AuthenticationSASLFinal.
// Receive server-final-message payload in an AuthenticationSASLFinal.
saslFinal, err := c.rxSASLFinal()
if err != nil {
return err

View file

@ -809,7 +809,7 @@ func makeConnectTimeoutDialFunc(timeout time.Duration) DialFunc {
return d.DialContext
}
// ValidateConnectTargetSessionAttrsReadWrite is an ValidateConnectFunc that implements libpq compatible
// ValidateConnectTargetSessionAttrsReadWrite is a ValidateConnectFunc that implements libpq compatible
// target_session_attrs=read-write.
func ValidateConnectTargetSessionAttrsReadWrite(ctx context.Context, pgConn *PgConn) error {
result := pgConn.ExecParams(ctx, "show transaction_read_only", nil, nil, nil, nil).Read()
@ -824,7 +824,7 @@ func ValidateConnectTargetSessionAttrsReadWrite(ctx context.Context, pgConn *PgC
return nil
}
// ValidateConnectTargetSessionAttrsReadOnly is an ValidateConnectFunc that implements libpq compatible
// ValidateConnectTargetSessionAttrsReadOnly is a ValidateConnectFunc that implements libpq compatible
// target_session_attrs=read-only.
func ValidateConnectTargetSessionAttrsReadOnly(ctx context.Context, pgConn *PgConn) error {
result := pgConn.ExecParams(ctx, "show transaction_read_only", nil, nil, nil, nil).Read()
@ -839,7 +839,7 @@ func ValidateConnectTargetSessionAttrsReadOnly(ctx context.Context, pgConn *PgCo
return nil
}
// ValidateConnectTargetSessionAttrsStandby is an ValidateConnectFunc that implements libpq compatible
// ValidateConnectTargetSessionAttrsStandby is a ValidateConnectFunc that implements libpq compatible
// target_session_attrs=standby.
func ValidateConnectTargetSessionAttrsStandby(ctx context.Context, pgConn *PgConn) error {
result := pgConn.ExecParams(ctx, "select pg_is_in_recovery()", nil, nil, nil, nil).Read()
@ -854,7 +854,7 @@ func ValidateConnectTargetSessionAttrsStandby(ctx context.Context, pgConn *PgCon
return nil
}
// ValidateConnectTargetSessionAttrsPrimary is an ValidateConnectFunc that implements libpq compatible
// ValidateConnectTargetSessionAttrsPrimary is a ValidateConnectFunc that implements libpq compatible
// target_session_attrs=primary.
func ValidateConnectTargetSessionAttrsPrimary(ctx context.Context, pgConn *PgConn) error {
result := pgConn.ExecParams(ctx, "select pg_is_in_recovery()", nil, nil, nil, nil).Read()
@ -869,7 +869,7 @@ func ValidateConnectTargetSessionAttrsPrimary(ctx context.Context, pgConn *PgCon
return nil
}
// ValidateConnectTargetSessionAttrsPreferStandby is an ValidateConnectFunc that implements libpq compatible
// ValidateConnectTargetSessionAttrsPreferStandby is a ValidateConnectFunc that implements libpq compatible
// target_session_attrs=prefer-standby.
func ValidateConnectTargetSessionAttrsPreferStandby(ctx context.Context, pgConn *PgConn) error {
result := pgConn.ExecParams(ctx, "select pg_is_in_recovery()", nil, nil, nil, nil).Read()

View file

@ -74,6 +74,7 @@ type PgConn struct {
frontend *pgproto3.Frontend
bgReader *bgreader.BGReader
slowWriteTimer *time.Timer
bgReaderStarted chan struct{}
config *Config
@ -301,8 +302,14 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
pgConn.parameterStatuses = make(map[string]string)
pgConn.status = connStatusConnecting
pgConn.bgReader = bgreader.New(pgConn.conn)
pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64), pgConn.bgReader.Start)
pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64),
func() {
pgConn.bgReader.Start()
pgConn.bgReaderStarted <- struct{}{}
},
)
pgConn.slowWriteTimer.Stop()
pgConn.bgReaderStarted = make(chan struct{})
pgConn.frontend = config.BuildFrontend(pgConn.bgReader, pgConn.conn)
startupMsg := pgproto3.StartupMessage{
@ -593,7 +600,7 @@ func (pgConn *PgConn) Frontend() *pgproto3.Frontend {
return pgConn.frontend
}
// Close closes a connection. It is safe to call Close on a already closed connection. Close attempts a clean close by
// Close closes a connection. It is safe to call Close on an already closed connection. Close attempts a clean close by
// sending the exit message to PostgreSQL. However, this could block so ctx is available to limit the time to wait. The
// underlying net.Conn.Close() will always be called regardless of any other errors.
func (pgConn *PgConn) Close(ctx context.Context) error {
@ -935,16 +942,21 @@ func (pgConn *PgConn) CancelRequest(ctx context.Context) error {
buf := make([]byte, 16)
binary.BigEndian.PutUint32(buf[0:4], 16)
binary.BigEndian.PutUint32(buf[4:8], 80877102)
binary.BigEndian.PutUint32(buf[8:12], uint32(pgConn.pid))
binary.BigEndian.PutUint32(buf[12:16], uint32(pgConn.secretKey))
// Postgres will process the request and close the connection
// so when don't need to read the reply
// https://www.postgresql.org/docs/current/protocol-flow.html#id-1.10.6.7.10
_, err = cancelConn.Write(buf)
return err
binary.BigEndian.PutUint32(buf[8:12], pgConn.pid)
binary.BigEndian.PutUint32(buf[12:16], pgConn.secretKey)
if _, err := cancelConn.Write(buf); err != nil {
return fmt.Errorf("write to connection for cancellation: %w", err)
}
// Wait for the cancel request to be acknowledged by the server.
// It copies the behavior of the libpq: https://github.com/postgres/postgres/blob/REL_16_0/src/interfaces/libpq/fe-connect.c#L4946-L4960
_, _ = cancelConn.Read(buf)
return nil
}
// WaitForNotification waits for a LISTON/NOTIFY message to be received. It returns an error if a notification was not
// WaitForNotification waits for a LISTEN/NOTIFY message to be received. It returns an error if a notification was not
// received.
func (pgConn *PgConn) WaitForNotification(ctx context.Context) error {
if err := pgConn.lock(); err != nil {
@ -1732,10 +1744,16 @@ func (pgConn *PgConn) enterPotentialWriteReadDeadlock() {
// exitPotentialWriteReadDeadlock must be called after a call to enterPotentialWriteReadDeadlock.
func (pgConn *PgConn) exitPotentialWriteReadDeadlock() {
// The state of the timer is not relevant upon exiting the potential slow write. It may both
// fire (due to a slow write), or not fire (due to a fast write).
_ = pgConn.slowWriteTimer.Stop()
pgConn.bgReader.Stop()
if !pgConn.slowWriteTimer.Stop() {
// The timer starts its function in a separate goroutine. It is necessary to ensure the background reader has
// started before calling Stop. Otherwise, the background reader may not be stopped. That on its own is not a
// serious problem. But what is a serious problem is that the background reader may start at an inopportune time in
// a subsequent query. For example, if a subsequent query was canceled then a deadline may be set on the net.Conn to
// interrupt an in-progress read. After the read is interrupted, but before the deadline is cleared, the background
// reader could start and read a deadline error. Then the next query would receive the an unexpected deadline error.
<-pgConn.bgReaderStarted
pgConn.bgReader.Stop()
}
}
func (pgConn *PgConn) flushWithPotentialWriteReadDeadlock() error {
@ -1764,7 +1782,7 @@ func (pgConn *PgConn) SyncConn(ctx context.Context) error {
}
}
// This should never happen. Only way I can imagine this occuring is if the server is constantly sending data such as
// This should never happen. Only way I can imagine this occurring is if the server is constantly sending data such as
// LISTEN/NOTIFY or log notifications such that we never can get an empty buffer.
return errors.New("SyncConn: conn never synchronized")
}
@ -1830,8 +1848,14 @@ func Construct(hc *HijackedConn) (*PgConn, error) {
pgConn.contextWatcher = newContextWatcher(pgConn.conn)
pgConn.bgReader = bgreader.New(pgConn.conn)
pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64), pgConn.bgReader.Start)
pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64),
func() {
pgConn.bgReader.Start()
pgConn.bgReaderStarted <- struct{}{}
},
)
pgConn.slowWriteTimer.Stop()
pgConn.bgReaderStarted = make(chan struct{})
pgConn.frontend = hc.Config.BuildFrontend(pgConn.bgReader, pgConn.conn)
return pgConn, nil
@ -1996,7 +2020,8 @@ func (p *Pipeline) GetResults() (results any, err error) {
for {
msg, err := p.conn.receiveMessage()
if err != nil {
return nil, err
p.conn.asyncClose()
return nil, normalizeTimeoutError(p.ctx, err)
}
switch msg := msg.(type) {

View file

@ -1,6 +1,6 @@
# pgproto3
Package pgproto3 is a encoder and decoder of the PostgreSQL wire protocol version 3.
Package pgproto3 is an encoder and decoder of the PostgreSQL wire protocol version 3.
pgproto3 can be used as a foundation for PostgreSQL drivers, proxies, mock servers, load balancers and more.

View file

@ -1,7 +1,7 @@
// Package pgproto3 is a encoder and decoder of the PostgreSQL wire protocol version 3.
// Package pgproto3 is an encoder and decoder of the PostgreSQL wire protocol version 3.
//
// The primary interfaces are Frontend and Backend. They correspond to a client and server respectively. Messages are
// sent with Send (or a specialized Send variant). Messages are automatically bufferred to minimize small writes. Call
// sent with Send (or a specialized Send variant). Messages are automatically buffered to minimize small writes. Call
// Flush to ensure a message has actually been sent.
//
// The Trace method of Frontend and Backend can be used to examine the wire-level message traffic. It outputs in a

View file

@ -156,7 +156,7 @@ func (f *Frontend) SendDescribe(msg *Describe) {
}
}
// SendExecute sends a Execute message to the backend (i.e. the server). The message is not guaranteed to be written until
// SendExecute sends an Execute message to the backend (i.e. the server). The message is not guaranteed to be written until
// Flush is called.
func (f *Frontend) SendExecute(msg *Execute) {
prevLen := len(f.wbuf)

View file

@ -38,14 +38,14 @@ func (dst *StartupMessage) Decode(src []byte) error {
for {
idx := bytes.IndexByte(src[rp:], 0)
if idx < 0 {
return &invalidMessageFormatErr{messageType: "StartupMesage"}
return &invalidMessageFormatErr{messageType: "StartupMessage"}
}
key := string(src[rp : rp+idx])
rp += idx + 1
idx = bytes.IndexByte(src[rp:], 0)
if idx < 0 {
return &invalidMessageFormatErr{messageType: "StartupMesage"}
return &invalidMessageFormatErr{messageType: "StartupMessage"}
}
value := string(src[rp : rp+idx])
rp += idx + 1

View file

@ -67,7 +67,7 @@ See example_custom_type_test.go for an example of a custom type for the PostgreS
Sometimes pgx supports a PostgreSQL type such as numeric but the Go type is in an external package that does not have
pgx support such as github.com/shopspring/decimal. These types can be registered with pgtype with custom conversion
logic. See https://github.com/jackc/pgx-shopspring-decimal and https://github.com/jackc/pgx-gofrs-uuid for a example
logic. See https://github.com/jackc/pgx-shopspring-decimal and https://github.com/jackc/pgx-gofrs-uuid for example
integrations.
New PostgreSQL Type Support
@ -149,7 +149,7 @@ Overview of Scanning Implementation
The first step is to use the OID to lookup the correct Codec. If the OID is unavailable, Map will try to find the OID
from previous calls of Map.RegisterDefaultPgType. The Map will call the Codec's PlanScan method to get a plan for
scanning into the Go value. A Codec will support scanning into one or more Go types. Oftentime these Go types are
interfaces rather than explicit types. For example, PointCodec can use any Go type that implments the PointScanner and
interfaces rather than explicit types. For example, PointCodec can use any Go type that implements the PointScanner and
PointValuer interfaces.
If a Go value is not supported directly by a Codec then Map will try wrapping it with additional logic and try again.

View file

@ -3,6 +3,7 @@ package pgtype
import (
"database/sql/driver"
"encoding/binary"
"encoding/json"
"fmt"
"math"
"strconv"
@ -65,6 +66,29 @@ func (f Float4) Value() (driver.Value, error) {
return float64(f.Float32), nil
}
func (f Float4) MarshalJSON() ([]byte, error) {
if !f.Valid {
return []byte("null"), nil
}
return json.Marshal(f.Float32)
}
func (f *Float4) UnmarshalJSON(b []byte) error {
var n *float32
err := json.Unmarshal(b, &n)
if err != nil {
return err
}
if n == nil {
*f = Float4{}
} else {
*f = Float4{Float32: *n, Valid: true}
}
return nil
}
type Float4Codec struct{}
func (Float4Codec) FormatSupported(format int16) bool {

View file

@ -74,6 +74,29 @@ func (f Float8) Value() (driver.Value, error) {
return f.Float64, nil
}
func (f Float8) MarshalJSON() ([]byte, error) {
if !f.Valid {
return []byte("null"), nil
}
return json.Marshal(f.Float64)
}
func (f *Float8) UnmarshalJSON(b []byte) error {
var n *float64
err := json.Unmarshal(b, &n)
if err != nil {
return err
}
if n == nil {
*f = Float8{}
} else {
*f = Float8{Float64: *n, Valid: true}
}
return nil
}
type Float8Codec struct{}
func (Float8Codec) FormatSupported(format int16) bool {
@ -109,13 +132,6 @@ func (Float8Codec) PlanEncode(m *Map, oid uint32, format int16, value any) Encod
return nil
}
func (f *Float8) MarshalJSON() ([]byte, error) {
if !f.Valid {
return []byte("null"), nil
}
return json.Marshal(f.Float64)
}
type encodePlanFloat8CodecBinaryFloat64 struct{}
func (encodePlanFloat8CodecBinaryFloat64) Encode(value any, buf []byte) (newBuf []byte, err error) {

View file

@ -156,7 +156,7 @@ func (scanPlanBinaryInetToNetipPrefixScanner) Scan(src []byte, dst any) error {
}
if len(src) != 8 && len(src) != 20 {
return fmt.Errorf("Received an invalid size for a inet: %d", len(src))
return fmt.Errorf("Received an invalid size for an inet: %d", len(src))
}
// ignore family

View file

@ -179,7 +179,7 @@ func (scanPlanBinaryIntervalToIntervalScanner) Scan(src []byte, dst any) error {
}
if len(src) != 16 {
return fmt.Errorf("Received an invalid size for a interval: %d", len(src))
return fmt.Errorf("Received an invalid size for an interval: %d", len(src))
}
microseconds := int64(binary.BigEndian.Uint64(src))
@ -242,21 +242,21 @@ func (scanPlanTextAnyToIntervalScanner) Scan(src []byte, dst any) error {
return fmt.Errorf("bad interval minute format: %s", timeParts[1])
}
secondParts := strings.SplitN(timeParts[2], ".", 2)
sec, secFrac, secFracFound := strings.Cut(timeParts[2], ".")
seconds, err := strconv.ParseInt(secondParts[0], 10, 64)
seconds, err := strconv.ParseInt(sec, 10, 64)
if err != nil {
return fmt.Errorf("bad interval second format: %s", secondParts[0])
return fmt.Errorf("bad interval second format: %s", sec)
}
var uSeconds int64
if len(secondParts) == 2 {
uSeconds, err = strconv.ParseInt(secondParts[1], 10, 64)
if secFracFound {
uSeconds, err = strconv.ParseInt(secFrac, 10, 64)
if err != nil {
return fmt.Errorf("bad interval decimal format: %s", secondParts[1])
return fmt.Errorf("bad interval decimal format: %s", secFrac)
}
for i := 0; i < 6-len(secondParts[1]); i++ {
for i := 0; i < 6-len(secFrac); i++ {
uSeconds *= 10
}
}

View file

@ -1358,6 +1358,8 @@ var kindToTypes map[reflect.Kind]reflect.Type = map[reflect.Kind]reflect.Type{
reflect.Bool: reflect.TypeOf(false),
}
var byteSliceType = reflect.TypeOf([]byte{})
type underlyingTypeEncodePlan struct {
nextValueType reflect.Type
next EncodePlan
@ -1372,6 +1374,10 @@ func (plan *underlyingTypeEncodePlan) Encode(value any, buf []byte) (newBuf []by
// TryWrapFindUnderlyingTypeEncodePlan tries to convert to a Go builtin type. e.g. If value was of type MyString and
// MyString was defined as a string then a wrapper plan would be returned that converts MyString to string.
func TryWrapFindUnderlyingTypeEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
if value == nil {
return nil, nil, false
}
if _, ok := value.(driver.Valuer); ok {
return nil, nil, false
}
@ -1387,6 +1393,15 @@ func TryWrapFindUnderlyingTypeEncodePlan(value any) (plan WrappedEncodePlanNextS
return &underlyingTypeEncodePlan{nextValueType: nextValueType}, refValue.Convert(nextValueType).Interface(), true
}
// []byte is a special case. It is a slice but we treat it as a scalar type. In the case of a named type like
// json.RawMessage which is defined as []byte the underlying type should be considered as []byte. But any other slice
// does not have a special underlying type.
//
// https://github.com/jackc/pgx/issues/1763
if refValue.Type() != byteSliceType && refValue.Type().AssignableTo(byteSliceType) {
return &underlyingTypeEncodePlan{nextValueType: byteSliceType}, refValue.Convert(byteSliceType).Interface(), true
}
return nil, nil, false
}

View file

@ -50,17 +50,17 @@ func parsePoint(src []byte) (*Point, error) {
if src[0] == '"' && src[len(src)-1] == '"' {
src = src[1 : len(src)-1]
}
parts := strings.SplitN(string(src[1:len(src)-1]), ",", 2)
if len(parts) < 2 {
sx, sy, found := strings.Cut(string(src[1:len(src)-1]), ",")
if !found {
return nil, fmt.Errorf("invalid format for point")
}
x, err := strconv.ParseFloat(parts[0], 64)
x, err := strconv.ParseFloat(sx, 64)
if err != nil {
return nil, err
}
y, err := strconv.ParseFloat(parts[1], 64)
y, err := strconv.ParseFloat(sy, 64)
if err != nil {
return nil, err
}
@ -247,17 +247,17 @@ func (scanPlanTextAnyToPointScanner) Scan(src []byte, dst any) error {
return fmt.Errorf("invalid length for point: %v", len(src))
}
parts := strings.SplitN(string(src[1:len(src)-1]), ",", 2)
if len(parts) < 2 {
sx, sy, found := strings.Cut(string(src[1:len(src)-1]), ",")
if !found {
return fmt.Errorf("invalid format for point")
}
x, err := strconv.ParseFloat(parts[0], 64)
x, err := strconv.ParseFloat(sx, 64)
if err != nil {
return err
}
y, err := strconv.ParseFloat(parts[1], 64)
y, err := strconv.ParseFloat(sy, 64)
if err != nil {
return err
}

View file

@ -205,17 +205,17 @@ func (scanPlanTextAnyToTIDScanner) Scan(src []byte, dst any) error {
return fmt.Errorf("invalid length for tid: %v", len(src))
}
parts := strings.SplitN(string(src[1:len(src)-1]), ",", 2)
if len(parts) < 2 {
block, offset, found := strings.Cut(string(src[1:len(src)-1]), ",")
if !found {
return fmt.Errorf("invalid format for tid")
}
blockNumber, err := strconv.ParseUint(parts[0], 10, 32)
blockNumber, err := strconv.ParseUint(block, 10, 32)
if err != nil {
return err
}
offsetNumber, err := strconv.ParseUint(parts[1], 10, 16)
offsetNumber, err := strconv.ParseUint(offset, 10, 16)
if err != nil {
return err
}

View file

@ -0,0 +1,52 @@
package pgxpool
import (
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
)
type errBatchResults struct {
err error
}
func (br errBatchResults) Exec() (pgconn.CommandTag, error) {
return pgconn.CommandTag{}, br.err
}
func (br errBatchResults) Query() (pgx.Rows, error) {
return errRows{err: br.err}, br.err
}
func (br errBatchResults) QueryRow() pgx.Row {
return errRow{err: br.err}
}
func (br errBatchResults) Close() error {
return br.err
}
type poolBatchResults struct {
br pgx.BatchResults
c *Conn
}
func (br *poolBatchResults) Exec() (pgconn.CommandTag, error) {
return br.br.Exec()
}
func (br *poolBatchResults) Query() (pgx.Rows, error) {
return br.br.Query()
}
func (br *poolBatchResults) QueryRow() pgx.Row {
return br.br.QueryRow()
}
func (br *poolBatchResults) Close() error {
err := br.br.Close()
if br.c != nil {
br.c.Release()
br.c = nil
}
return err
}

130
vendor/github.com/jackc/pgx/v5/pgxpool/conn.go generated vendored Normal file
View file

@ -0,0 +1,130 @@
package pgxpool
import (
"context"
"sync/atomic"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/puddle/v2"
)
// Conn is an acquired *pgx.Conn from a Pool.
type Conn struct {
res *puddle.Resource[*connResource]
p *Pool
}
// Release returns c to the pool it was acquired from. Once Release has been called, other methods must not be called.
// However, it is safe to call Release multiple times. Subsequent calls after the first will be ignored.
func (c *Conn) Release() {
if c.res == nil {
return
}
conn := c.Conn()
res := c.res
c.res = nil
if conn.IsClosed() || conn.PgConn().IsBusy() || conn.PgConn().TxStatus() != 'I' {
res.Destroy()
// Signal to the health check to run since we just destroyed a connections
// and we might be below minConns now
c.p.triggerHealthCheck()
return
}
// If the pool is consistently being used, we might never get to check the
// lifetime of a connection since we only check idle connections in checkConnsHealth
// so we also check the lifetime here and force a health check
if c.p.isExpired(res) {
atomic.AddInt64(&c.p.lifetimeDestroyCount, 1)
res.Destroy()
// Signal to the health check to run since we just destroyed a connections
// and we might be below minConns now
c.p.triggerHealthCheck()
return
}
if c.p.afterRelease == nil {
res.Release()
return
}
go func() {
if c.p.afterRelease(conn) {
res.Release()
} else {
res.Destroy()
// Signal to the health check to run since we just destroyed a connections
// and we might be below minConns now
c.p.triggerHealthCheck()
}
}()
}
// Hijack assumes ownership of the connection from the pool. Caller is responsible for closing the connection. Hijack
// will panic if called on an already released or hijacked connection.
func (c *Conn) Hijack() *pgx.Conn {
if c.res == nil {
panic("cannot hijack already released or hijacked connection")
}
conn := c.Conn()
res := c.res
c.res = nil
res.Hijack()
return conn
}
func (c *Conn) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
return c.Conn().Exec(ctx, sql, arguments...)
}
func (c *Conn) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
return c.Conn().Query(ctx, sql, args...)
}
func (c *Conn) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
return c.Conn().QueryRow(ctx, sql, args...)
}
func (c *Conn) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
return c.Conn().SendBatch(ctx, b)
}
func (c *Conn) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
return c.Conn().CopyFrom(ctx, tableName, columnNames, rowSrc)
}
// Begin starts a transaction block from the *Conn without explicitly setting a transaction mode (see BeginTx with TxOptions if transaction mode is required).
func (c *Conn) Begin(ctx context.Context) (pgx.Tx, error) {
return c.Conn().Begin(ctx)
}
// BeginTx starts a transaction block from the *Conn with txOptions determining the transaction mode.
func (c *Conn) BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error) {
return c.Conn().BeginTx(ctx, txOptions)
}
func (c *Conn) Ping(ctx context.Context) error {
return c.Conn().Ping(ctx)
}
func (c *Conn) Conn() *pgx.Conn {
return c.connResource().conn
}
func (c *Conn) connResource() *connResource {
return c.res.Value()
}
func (c *Conn) getPoolRow(r pgx.Row) *poolRow {
return c.connResource().getPoolRow(c, r)
}
func (c *Conn) getPoolRows(r pgx.Rows) *poolRows {
return c.connResource().getPoolRows(c, r)
}

27
vendor/github.com/jackc/pgx/v5/pgxpool/doc.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
// Package pgxpool is a concurrency-safe connection pool for pgx.
/*
pgxpool implements a nearly identical interface to pgx connections.
Creating a Pool
The primary way of creating a pool is with [pgxpool.New]:
pool, err := pgxpool.New(context.Background(), os.Getenv("DATABASE_URL"))
The database connection string can be in URL or DSN format. PostgreSQL settings, pgx settings, and pool settings can be
specified here. In addition, a config struct can be created by [ParseConfig].
config, err := pgxpool.ParseConfig(os.Getenv("DATABASE_URL"))
if err != nil {
// ...
}
config.AfterConnect = func(ctx context.Context, conn *pgx.Conn) error {
// do something with every new connection
}
pool, err := pgxpool.NewWithConfig(context.Background(), config)
A pool returns without waiting for any connections to be established. Acquire a connection immediately after creating
the pool to check if a connection can successfully be established.
*/
package pgxpool

695
vendor/github.com/jackc/pgx/v5/pgxpool/pool.go generated vendored Normal file
View file

@ -0,0 +1,695 @@
package pgxpool
import (
"context"
"fmt"
"math/rand"
"runtime"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/puddle/v2"
)
var defaultMaxConns = int32(4)
var defaultMinConns = int32(0)
var defaultMaxConnLifetime = time.Hour
var defaultMaxConnIdleTime = time.Minute * 30
var defaultHealthCheckPeriod = time.Minute
type connResource struct {
conn *pgx.Conn
conns []Conn
poolRows []poolRow
poolRowss []poolRows
maxAgeTime time.Time
}
func (cr *connResource) getConn(p *Pool, res *puddle.Resource[*connResource]) *Conn {
if len(cr.conns) == 0 {
cr.conns = make([]Conn, 128)
}
c := &cr.conns[len(cr.conns)-1]
cr.conns = cr.conns[0 : len(cr.conns)-1]
c.res = res
c.p = p
return c
}
func (cr *connResource) getPoolRow(c *Conn, r pgx.Row) *poolRow {
if len(cr.poolRows) == 0 {
cr.poolRows = make([]poolRow, 128)
}
pr := &cr.poolRows[len(cr.poolRows)-1]
cr.poolRows = cr.poolRows[0 : len(cr.poolRows)-1]
pr.c = c
pr.r = r
return pr
}
func (cr *connResource) getPoolRows(c *Conn, r pgx.Rows) *poolRows {
if len(cr.poolRowss) == 0 {
cr.poolRowss = make([]poolRows, 128)
}
pr := &cr.poolRowss[len(cr.poolRowss)-1]
cr.poolRowss = cr.poolRowss[0 : len(cr.poolRowss)-1]
pr.c = c
pr.r = r
return pr
}
// Pool allows for connection reuse.
type Pool struct {
// 64 bit fields accessed with atomics must be at beginning of struct to guarantee alignment for certain 32-bit
// architectures. See BUGS section of https://pkg.go.dev/sync/atomic and https://github.com/jackc/pgx/issues/1288.
newConnsCount int64
lifetimeDestroyCount int64
idleDestroyCount int64
p *puddle.Pool[*connResource]
config *Config
beforeConnect func(context.Context, *pgx.ConnConfig) error
afterConnect func(context.Context, *pgx.Conn) error
beforeAcquire func(context.Context, *pgx.Conn) bool
afterRelease func(*pgx.Conn) bool
beforeClose func(*pgx.Conn)
minConns int32
maxConns int32
maxConnLifetime time.Duration
maxConnLifetimeJitter time.Duration
maxConnIdleTime time.Duration
healthCheckPeriod time.Duration
healthCheckChan chan struct{}
closeOnce sync.Once
closeChan chan struct{}
}
// Config is the configuration struct for creating a pool. It must be created by [ParseConfig] and then it can be
// modified.
type Config struct {
ConnConfig *pgx.ConnConfig
// BeforeConnect is called before a new connection is made. It is passed a copy of the underlying pgx.ConnConfig and
// will not impact any existing open connections.
BeforeConnect func(context.Context, *pgx.ConnConfig) error
// AfterConnect is called after a connection is established, but before it is added to the pool.
AfterConnect func(context.Context, *pgx.Conn) error
// BeforeAcquire is called before a connection is acquired from the pool. It must return true to allow the
// acquisition or false to indicate that the connection should be destroyed and a different connection should be
// acquired.
BeforeAcquire func(context.Context, *pgx.Conn) bool
// AfterRelease is called after a connection is released, but before it is returned to the pool. It must return true to
// return the connection to the pool or false to destroy the connection.
AfterRelease func(*pgx.Conn) bool
// BeforeClose is called right before a connection is closed and removed from the pool.
BeforeClose func(*pgx.Conn)
// MaxConnLifetime is the duration since creation after which a connection will be automatically closed.
MaxConnLifetime time.Duration
// MaxConnLifetimeJitter is the duration after MaxConnLifetime to randomly decide to close a connection.
// This helps prevent all connections from being closed at the exact same time, starving the pool.
MaxConnLifetimeJitter time.Duration
// MaxConnIdleTime is the duration after which an idle connection will be automatically closed by the health check.
MaxConnIdleTime time.Duration
// MaxConns is the maximum size of the pool. The default is the greater of 4 or runtime.NumCPU().
MaxConns int32
// MinConns is the minimum size of the pool. After connection closes, the pool might dip below MinConns. A low
// number of MinConns might mean the pool is empty after MaxConnLifetime until the health check has a chance
// to create new connections.
MinConns int32
// HealthCheckPeriod is the duration between checks of the health of idle connections.
HealthCheckPeriod time.Duration
createdByParseConfig bool // Used to enforce created by ParseConfig rule.
}
// Copy returns a deep copy of the config that is safe to use and modify.
// The only exception is the tls.Config:
// according to the tls.Config docs it must not be modified after creation.
func (c *Config) Copy() *Config {
newConfig := new(Config)
*newConfig = *c
newConfig.ConnConfig = c.ConnConfig.Copy()
return newConfig
}
// ConnString returns the connection string as parsed by pgxpool.ParseConfig into pgxpool.Config.
func (c *Config) ConnString() string { return c.ConnConfig.ConnString() }
// New creates a new Pool. See [ParseConfig] for information on connString format.
func New(ctx context.Context, connString string) (*Pool, error) {
config, err := ParseConfig(connString)
if err != nil {
return nil, err
}
return NewWithConfig(ctx, config)
}
// NewWithConfig creates a new Pool. config must have been created by [ParseConfig].
func NewWithConfig(ctx context.Context, config *Config) (*Pool, error) {
// Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from
// zero values.
if !config.createdByParseConfig {
panic("config must be created by ParseConfig")
}
p := &Pool{
config: config,
beforeConnect: config.BeforeConnect,
afterConnect: config.AfterConnect,
beforeAcquire: config.BeforeAcquire,
afterRelease: config.AfterRelease,
beforeClose: config.BeforeClose,
minConns: config.MinConns,
maxConns: config.MaxConns,
maxConnLifetime: config.MaxConnLifetime,
maxConnLifetimeJitter: config.MaxConnLifetimeJitter,
maxConnIdleTime: config.MaxConnIdleTime,
healthCheckPeriod: config.HealthCheckPeriod,
healthCheckChan: make(chan struct{}, 1),
closeChan: make(chan struct{}),
}
var err error
p.p, err = puddle.NewPool(
&puddle.Config[*connResource]{
Constructor: func(ctx context.Context) (*connResource, error) {
atomic.AddInt64(&p.newConnsCount, 1)
connConfig := p.config.ConnConfig.Copy()
// Connection will continue in background even if Acquire is canceled. Ensure that a connect won't hang forever.
if connConfig.ConnectTimeout <= 0 {
connConfig.ConnectTimeout = 2 * time.Minute
}
if p.beforeConnect != nil {
if err := p.beforeConnect(ctx, connConfig); err != nil {
return nil, err
}
}
conn, err := pgx.ConnectConfig(ctx, connConfig)
if err != nil {
return nil, err
}
if p.afterConnect != nil {
err = p.afterConnect(ctx, conn)
if err != nil {
conn.Close(ctx)
return nil, err
}
}
jitterSecs := rand.Float64() * config.MaxConnLifetimeJitter.Seconds()
maxAgeTime := time.Now().Add(config.MaxConnLifetime).Add(time.Duration(jitterSecs) * time.Second)
cr := &connResource{
conn: conn,
conns: make([]Conn, 64),
poolRows: make([]poolRow, 64),
poolRowss: make([]poolRows, 64),
maxAgeTime: maxAgeTime,
}
return cr, nil
},
Destructor: func(value *connResource) {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
conn := value.conn
if p.beforeClose != nil {
p.beforeClose(conn)
}
conn.Close(ctx)
select {
case <-conn.PgConn().CleanupDone():
case <-ctx.Done():
}
cancel()
},
MaxSize: config.MaxConns,
},
)
if err != nil {
return nil, err
}
go func() {
p.createIdleResources(ctx, int(p.minConns))
p.backgroundHealthCheck()
}()
return p, nil
}
// ParseConfig builds a Config from connString. It parses connString with the same behavior as [pgx.ParseConfig] with the
// addition of the following variables:
//
// - pool_max_conns: integer greater than 0
// - pool_min_conns: integer 0 or greater
// - pool_max_conn_lifetime: duration string
// - pool_max_conn_idle_time: duration string
// - pool_health_check_period: duration string
// - pool_max_conn_lifetime_jitter: duration string
//
// See Config for definitions of these arguments.
//
// # Example DSN
// user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca pool_max_conns=10
//
// # Example URL
// postgres://jack:secret@pg.example.com:5432/mydb?sslmode=verify-ca&pool_max_conns=10
func ParseConfig(connString string) (*Config, error) {
connConfig, err := pgx.ParseConfig(connString)
if err != nil {
return nil, err
}
config := &Config{
ConnConfig: connConfig,
createdByParseConfig: true,
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conns"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_max_conns")
n, err := strconv.ParseInt(s, 10, 32)
if err != nil {
return nil, fmt.Errorf("cannot parse pool_max_conns: %w", err)
}
if n < 1 {
return nil, fmt.Errorf("pool_max_conns too small: %d", n)
}
config.MaxConns = int32(n)
} else {
config.MaxConns = defaultMaxConns
if numCPU := int32(runtime.NumCPU()); numCPU > config.MaxConns {
config.MaxConns = numCPU
}
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_min_conns"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_min_conns")
n, err := strconv.ParseInt(s, 10, 32)
if err != nil {
return nil, fmt.Errorf("cannot parse pool_min_conns: %w", err)
}
config.MinConns = int32(n)
} else {
config.MinConns = defaultMinConns
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_lifetime"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_max_conn_lifetime")
d, err := time.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("invalid pool_max_conn_lifetime: %w", err)
}
config.MaxConnLifetime = d
} else {
config.MaxConnLifetime = defaultMaxConnLifetime
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_idle_time"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_max_conn_idle_time")
d, err := time.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("invalid pool_max_conn_idle_time: %w", err)
}
config.MaxConnIdleTime = d
} else {
config.MaxConnIdleTime = defaultMaxConnIdleTime
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_health_check_period"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_health_check_period")
d, err := time.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("invalid pool_health_check_period: %w", err)
}
config.HealthCheckPeriod = d
} else {
config.HealthCheckPeriod = defaultHealthCheckPeriod
}
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_lifetime_jitter"]; ok {
delete(connConfig.Config.RuntimeParams, "pool_max_conn_lifetime_jitter")
d, err := time.ParseDuration(s)
if err != nil {
return nil, fmt.Errorf("invalid pool_max_conn_lifetime_jitter: %w", err)
}
config.MaxConnLifetimeJitter = d
}
return config, nil
}
// Close closes all connections in the pool and rejects future Acquire calls. Blocks until all connections are returned
// to pool and closed.
func (p *Pool) Close() {
p.closeOnce.Do(func() {
close(p.closeChan)
p.p.Close()
})
}
func (p *Pool) isExpired(res *puddle.Resource[*connResource]) bool {
return time.Now().After(res.Value().maxAgeTime)
}
func (p *Pool) triggerHealthCheck() {
go func() {
// Destroy is asynchronous so we give it time to actually remove itself from
// the pool otherwise we might try to check the pool size too soon
time.Sleep(500 * time.Millisecond)
select {
case p.healthCheckChan <- struct{}{}:
default:
}
}()
}
func (p *Pool) backgroundHealthCheck() {
ticker := time.NewTicker(p.healthCheckPeriod)
defer ticker.Stop()
for {
select {
case <-p.closeChan:
return
case <-p.healthCheckChan:
p.checkHealth()
case <-ticker.C:
p.checkHealth()
}
}
}
func (p *Pool) checkHealth() {
for {
// If checkMinConns failed we don't destroy any connections since we couldn't
// even get to minConns
if err := p.checkMinConns(); err != nil {
// Should we log this error somewhere?
break
}
if !p.checkConnsHealth() {
// Since we didn't destroy any connections we can stop looping
break
}
// Technically Destroy is asynchronous but 500ms should be enough for it to
// remove it from the underlying pool
select {
case <-p.closeChan:
return
case <-time.After(500 * time.Millisecond):
}
}
}
// checkConnsHealth will check all idle connections, destroy a connection if
// it's idle or too old, and returns true if any were destroyed
func (p *Pool) checkConnsHealth() bool {
var destroyed bool
totalConns := p.Stat().TotalConns()
resources := p.p.AcquireAllIdle()
for _, res := range resources {
// We're okay going under minConns if the lifetime is up
if p.isExpired(res) && totalConns >= p.minConns {
atomic.AddInt64(&p.lifetimeDestroyCount, 1)
res.Destroy()
destroyed = true
// Since Destroy is async we manually decrement totalConns.
totalConns--
} else if res.IdleDuration() > p.maxConnIdleTime && totalConns > p.minConns {
atomic.AddInt64(&p.idleDestroyCount, 1)
res.Destroy()
destroyed = true
// Since Destroy is async we manually decrement totalConns.
totalConns--
} else {
res.ReleaseUnused()
}
}
return destroyed
}
func (p *Pool) checkMinConns() error {
// TotalConns can include ones that are being destroyed but we should have
// sleep(500ms) around all of the destroys to help prevent that from throwing
// off this check
toCreate := p.minConns - p.Stat().TotalConns()
if toCreate > 0 {
return p.createIdleResources(context.Background(), int(toCreate))
}
return nil
}
func (p *Pool) createIdleResources(parentCtx context.Context, targetResources int) error {
ctx, cancel := context.WithCancel(parentCtx)
defer cancel()
errs := make(chan error, targetResources)
for i := 0; i < targetResources; i++ {
go func() {
err := p.p.CreateResource(ctx)
// Ignore ErrNotAvailable since it means that the pool has become full since we started creating resource.
if err == puddle.ErrNotAvailable {
err = nil
}
errs <- err
}()
}
var firstError error
for i := 0; i < targetResources; i++ {
err := <-errs
if err != nil && firstError == nil {
cancel()
firstError = err
}
}
return firstError
}
// Acquire returns a connection (*Conn) from the Pool
func (p *Pool) Acquire(ctx context.Context) (*Conn, error) {
for {
res, err := p.p.Acquire(ctx)
if err != nil {
return nil, err
}
cr := res.Value()
if res.IdleDuration() > time.Second {
err := cr.conn.Ping(ctx)
if err != nil {
res.Destroy()
continue
}
}
if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {
return cr.getConn(p, res), nil
}
res.Destroy()
}
}
// AcquireFunc acquires a *Conn and calls f with that *Conn. ctx will only affect the Acquire. It has no effect on the
// call of f. The return value is either an error acquiring the *Conn or the return value of f. The *Conn is
// automatically released after the call of f.
func (p *Pool) AcquireFunc(ctx context.Context, f func(*Conn) error) error {
conn, err := p.Acquire(ctx)
if err != nil {
return err
}
defer conn.Release()
return f(conn)
}
// AcquireAllIdle atomically acquires all currently idle connections. Its intended use is for health check and
// keep-alive functionality. It does not update pool statistics.
func (p *Pool) AcquireAllIdle(ctx context.Context) []*Conn {
resources := p.p.AcquireAllIdle()
conns := make([]*Conn, 0, len(resources))
for _, res := range resources {
cr := res.Value()
if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {
conns = append(conns, cr.getConn(p, res))
} else {
res.Destroy()
}
}
return conns
}
// Reset closes all connections, but leaves the pool open. It is intended for use when an error is detected that would
// disrupt all connections (such as a network interruption or a server state change).
//
// It is safe to reset a pool while connections are checked out. Those connections will be closed when they are returned
// to the pool.
func (p *Pool) Reset() {
p.p.Reset()
}
// Config returns a copy of config that was used to initialize this pool.
func (p *Pool) Config() *Config { return p.config.Copy() }
// Stat returns a pgxpool.Stat struct with a snapshot of Pool statistics.
func (p *Pool) Stat() *Stat {
return &Stat{
s: p.p.Stat(),
newConnsCount: atomic.LoadInt64(&p.newConnsCount),
lifetimeDestroyCount: atomic.LoadInt64(&p.lifetimeDestroyCount),
idleDestroyCount: atomic.LoadInt64(&p.idleDestroyCount),
}
}
// Exec acquires a connection from the Pool and executes the given SQL.
// SQL can be either a prepared statement name or an SQL string.
// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
// The acquired connection is returned to the pool when the Exec function returns.
func (p *Pool) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
c, err := p.Acquire(ctx)
if err != nil {
return pgconn.CommandTag{}, err
}
defer c.Release()
return c.Exec(ctx, sql, arguments...)
}
// Query acquires a connection and executes a query that returns pgx.Rows.
// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
// See pgx.Rows documentation to close the returned Rows and return the acquired connection to the Pool.
//
// If there is an error, the returned pgx.Rows will be returned in an error state.
// If preferred, ignore the error returned from Query and handle errors using the returned pgx.Rows.
//
// For extra control over how the query is executed, the types QuerySimpleProtocol, QueryResultFormats, and
// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
// needed. See the documentation for those types for details.
func (p *Pool) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
c, err := p.Acquire(ctx)
if err != nil {
return errRows{err: err}, err
}
rows, err := c.Query(ctx, sql, args...)
if err != nil {
c.Release()
return errRows{err: err}, err
}
return c.getPoolRows(rows), nil
}
// QueryRow acquires a connection and executes a query that is expected
// to return at most one row (pgx.Row). Errors are deferred until pgx.Row's
// Scan method is called. If the query selects no rows, pgx.Row's Scan will
// return ErrNoRows. Otherwise, pgx.Row's Scan scans the first selected row
// and discards the rest. The acquired connection is returned to the Pool when
// pgx.Row's Scan method is called.
//
// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
//
// For extra control over how the query is executed, the types QuerySimpleProtocol, QueryResultFormats, and
// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
// needed. See the documentation for those types for details.
func (p *Pool) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
c, err := p.Acquire(ctx)
if err != nil {
return errRow{err: err}
}
row := c.QueryRow(ctx, sql, args...)
return c.getPoolRow(row)
}
func (p *Pool) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
c, err := p.Acquire(ctx)
if err != nil {
return errBatchResults{err: err}
}
br := c.SendBatch(ctx, b)
return &poolBatchResults{br: br, c: c}
}
// Begin acquires a connection from the Pool and starts a transaction. Unlike database/sql, the context only affects the begin command. i.e. there is no
// auto-rollback on context cancellation. Begin initiates a transaction block without explicitly setting a transaction mode for the block (see BeginTx with TxOptions if transaction mode is required).
// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
func (p *Pool) Begin(ctx context.Context) (pgx.Tx, error) {
return p.BeginTx(ctx, pgx.TxOptions{})
}
// BeginTx acquires a connection from the Pool and starts a transaction with pgx.TxOptions determining the transaction mode.
// Unlike database/sql, the context only affects the begin command. i.e. there is no auto-rollback on context cancellation.
// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
func (p *Pool) BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error) {
c, err := p.Acquire(ctx)
if err != nil {
return nil, err
}
t, err := c.BeginTx(ctx, txOptions)
if err != nil {
c.Release()
return nil, err
}
return &Tx{t: t, c: c}, nil
}
func (p *Pool) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
c, err := p.Acquire(ctx)
if err != nil {
return 0, err
}
defer c.Release()
return c.Conn().CopyFrom(ctx, tableName, columnNames, rowSrc)
}
// Ping acquires a connection from the Pool and executes an empty sql statement against it.
// If the sql returns without error, the database Ping is considered successful, otherwise, the error is returned.
func (p *Pool) Ping(ctx context.Context) error {
c, err := p.Acquire(ctx)
if err != nil {
return err
}
defer c.Release()
return c.Ping(ctx)
}

116
vendor/github.com/jackc/pgx/v5/pgxpool/rows.go generated vendored Normal file
View file

@ -0,0 +1,116 @@
package pgxpool
import (
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
)
type errRows struct {
err error
}
func (errRows) Close() {}
func (e errRows) Err() error { return e.err }
func (errRows) CommandTag() pgconn.CommandTag { return pgconn.CommandTag{} }
func (errRows) FieldDescriptions() []pgconn.FieldDescription { return nil }
func (errRows) Next() bool { return false }
func (e errRows) Scan(dest ...any) error { return e.err }
func (e errRows) Values() ([]any, error) { return nil, e.err }
func (e errRows) RawValues() [][]byte { return nil }
func (e errRows) Conn() *pgx.Conn { return nil }
type errRow struct {
err error
}
func (e errRow) Scan(dest ...any) error { return e.err }
type poolRows struct {
r pgx.Rows
c *Conn
err error
}
func (rows *poolRows) Close() {
rows.r.Close()
if rows.c != nil {
rows.c.Release()
rows.c = nil
}
}
func (rows *poolRows) Err() error {
if rows.err != nil {
return rows.err
}
return rows.r.Err()
}
func (rows *poolRows) CommandTag() pgconn.CommandTag {
return rows.r.CommandTag()
}
func (rows *poolRows) FieldDescriptions() []pgconn.FieldDescription {
return rows.r.FieldDescriptions()
}
func (rows *poolRows) Next() bool {
if rows.err != nil {
return false
}
n := rows.r.Next()
if !n {
rows.Close()
}
return n
}
func (rows *poolRows) Scan(dest ...any) error {
err := rows.r.Scan(dest...)
if err != nil {
rows.Close()
}
return err
}
func (rows *poolRows) Values() ([]any, error) {
values, err := rows.r.Values()
if err != nil {
rows.Close()
}
return values, err
}
func (rows *poolRows) RawValues() [][]byte {
return rows.r.RawValues()
}
func (rows *poolRows) Conn() *pgx.Conn {
return rows.r.Conn()
}
type poolRow struct {
r pgx.Row
c *Conn
err error
}
func (row *poolRow) Scan(dest ...any) error {
if row.err != nil {
return row.err
}
panicked := true
defer func() {
if panicked && row.c != nil {
row.c.Release()
}
}()
err := row.r.Scan(dest...)
panicked = false
if row.c != nil {
row.c.Release()
}
return err
}

84
vendor/github.com/jackc/pgx/v5/pgxpool/stat.go generated vendored Normal file
View file

@ -0,0 +1,84 @@
package pgxpool
import (
"time"
"github.com/jackc/puddle/v2"
)
// Stat is a snapshot of Pool statistics.
type Stat struct {
s *puddle.Stat
newConnsCount int64
lifetimeDestroyCount int64
idleDestroyCount int64
}
// AcquireCount returns the cumulative count of successful acquires from the pool.
func (s *Stat) AcquireCount() int64 {
return s.s.AcquireCount()
}
// AcquireDuration returns the total duration of all successful acquires from
// the pool.
func (s *Stat) AcquireDuration() time.Duration {
return s.s.AcquireDuration()
}
// AcquiredConns returns the number of currently acquired connections in the pool.
func (s *Stat) AcquiredConns() int32 {
return s.s.AcquiredResources()
}
// CanceledAcquireCount returns the cumulative count of acquires from the pool
// that were canceled by a context.
func (s *Stat) CanceledAcquireCount() int64 {
return s.s.CanceledAcquireCount()
}
// ConstructingConns returns the number of conns with construction in progress in
// the pool.
func (s *Stat) ConstructingConns() int32 {
return s.s.ConstructingResources()
}
// EmptyAcquireCount returns the cumulative count of successful acquires from the pool
// that waited for a resource to be released or constructed because the pool was
// empty.
func (s *Stat) EmptyAcquireCount() int64 {
return s.s.EmptyAcquireCount()
}
// IdleConns returns the number of currently idle conns in the pool.
func (s *Stat) IdleConns() int32 {
return s.s.IdleResources()
}
// MaxConns returns the maximum size of the pool.
func (s *Stat) MaxConns() int32 {
return s.s.MaxResources()
}
// TotalConns returns the total number of resources currently in the pool.
// The value is the sum of ConstructingConns, AcquiredConns, and
// IdleConns.
func (s *Stat) TotalConns() int32 {
return s.s.TotalResources()
}
// NewConnsCount returns the cumulative count of new connections opened.
func (s *Stat) NewConnsCount() int64 {
return s.newConnsCount
}
// MaxLifetimeDestroyCount returns the cumulative count of connections destroyed
// because they exceeded MaxConnLifetime.
func (s *Stat) MaxLifetimeDestroyCount() int64 {
return s.lifetimeDestroyCount
}
// MaxIdleDestroyCount returns the cumulative count of connections destroyed because
// they exceeded MaxConnIdleTime.
func (s *Stat) MaxIdleDestroyCount() int64 {
return s.idleDestroyCount
}

82
vendor/github.com/jackc/pgx/v5/pgxpool/tx.go generated vendored Normal file
View file

@ -0,0 +1,82 @@
package pgxpool
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
)
// Tx represents a database transaction acquired from a Pool.
type Tx struct {
t pgx.Tx
c *Conn
}
// Begin starts a pseudo nested transaction implemented with a savepoint.
func (tx *Tx) Begin(ctx context.Context) (pgx.Tx, error) {
return tx.t.Begin(ctx)
}
// Commit commits the transaction and returns the associated connection back to the Pool. Commit will return ErrTxClosed
// if the Tx is already closed, but is otherwise safe to call multiple times. If the commit fails with a rollback status
// (e.g. the transaction was already in a broken state) then ErrTxCommitRollback will be returned.
func (tx *Tx) Commit(ctx context.Context) error {
err := tx.t.Commit(ctx)
if tx.c != nil {
tx.c.Release()
tx.c = nil
}
return err
}
// Rollback rolls back the transaction and returns the associated connection back to the Pool. Rollback will return ErrTxClosed
// if the Tx is already closed, but is otherwise safe to call multiple times. Hence, defer tx.Rollback() is safe even if
// tx.Commit() will be called first in a non-error condition.
func (tx *Tx) Rollback(ctx context.Context) error {
err := tx.t.Rollback(ctx)
if tx.c != nil {
tx.c.Release()
tx.c = nil
}
return err
}
func (tx *Tx) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
return tx.t.CopyFrom(ctx, tableName, columnNames, rowSrc)
}
func (tx *Tx) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
return tx.t.SendBatch(ctx, b)
}
func (tx *Tx) LargeObjects() pgx.LargeObjects {
return tx.t.LargeObjects()
}
// Prepare creates a prepared statement with name and sql. If the name is empty,
// an anonymous prepared statement will be used. sql can contain placeholders
// for bound parameters. These placeholders are referenced positionally as $1, $2, etc.
//
// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with the same
// name and sql arguments. This allows a code path to Prepare and Query/Exec without
// needing to first check whether the statement has already been prepared.
func (tx *Tx) Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error) {
return tx.t.Prepare(ctx, name, sql)
}
func (tx *Tx) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
return tx.t.Exec(ctx, sql, arguments...)
}
func (tx *Tx) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
return tx.t.Query(ctx, sql, args...)
}
func (tx *Tx) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
return tx.t.QueryRow(ctx, sql, args...)
}
func (tx *Tx) Conn() *pgx.Conn {
return tx.t.Conn()
}

View file

@ -8,7 +8,6 @@ import (
"strings"
"time"
"github.com/jackc/pgx/v5/internal/stmtcache"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgtype"
)
@ -17,7 +16,8 @@ import (
// the *Conn can be used again. Rows are closed by explicitly calling Close(),
// calling Next() until it returns false, or when a fatal error occurs.
//
// Once a Rows is closed the only methods that may be called are Close(), Err(), and CommandTag().
// Once a Rows is closed the only methods that may be called are Close(), Err(),
// and CommandTag().
//
// Rows is an interface instead of a struct to allow tests to mock Query. However,
// adding a method to an interface is technically a breaking change. Because of this
@ -41,8 +41,15 @@ type Rows interface {
FieldDescriptions() []pgconn.FieldDescription
// Next prepares the next row for reading. It returns true if there is another
// row and false if no more rows are available. It automatically closes rows
// when all rows are read.
// row and false if no more rows are available or a fatal error has occurred.
// It automatically closes rows when all rows are read.
//
// Callers should check rows.Err() after rows.Next() returns false to detect
// whether result-set reading ended prematurely due to an error. See
// Conn.Query for details.
//
// For simpler error handling, consider using the higher-level pgx v5
// CollectRows() and ForEachRow() helpers instead.
Next() bool
// Scan reads the values from the current row into dest values positionally.
@ -166,14 +173,12 @@ func (rows *baseRows) Close() {
}
if rows.err != nil && rows.conn != nil && rows.sql != "" {
if stmtcache.IsStatementInvalid(rows.err) {
if sc := rows.conn.statementCache; sc != nil {
sc.Invalidate(rows.sql)
}
if sc := rows.conn.statementCache; sc != nil {
sc.Invalidate(rows.sql)
}
if sc := rows.conn.descriptionCache; sc != nil {
sc.Invalidate(rows.sql)
}
if sc := rows.conn.descriptionCache; sc != nil {
sc.Invalidate(rows.sql)
}
}
@ -457,6 +462,39 @@ func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
return value, rows.Err()
}
// CollectExactlyOneRow calls fn for the first row in rows and returns the result.
// - If no rows are found returns an error where errors.Is(ErrNoRows) is true.
// - If more than 1 row is found returns an error where errors.Is(ErrTooManyRows) is true.
func CollectExactlyOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
defer rows.Close()
var (
err error
value T
)
if !rows.Next() {
if err = rows.Err(); err != nil {
return value, err
}
return value, ErrNoRows
}
value, err = fn(rows)
if err != nil {
return value, err
}
if rows.Next() {
var zero T
return zero, ErrTooManyRows
}
return value, rows.Err()
}
// RowTo returns a T scanned from row.
func RowTo[T any](row CollectableRow) (T, error) {
var value T
@ -496,7 +534,7 @@ func (rs *mapRowScanner) ScanRow(rows Rows) error {
}
// RowToStructByPos returns a T scanned from row. T must be a struct. T must have the same number a public fields as row
// has fields. The row and T fields will by matched by position. If the "db" struct tag is "-" then the field will be
// has fields. The row and T fields will be matched by position. If the "db" struct tag is "-" then the field will be
// ignored.
func RowToStructByPos[T any](row CollectableRow) (T, error) {
var value T
@ -505,7 +543,7 @@ func RowToStructByPos[T any](row CollectableRow) (T, error) {
}
// RowToAddrOfStructByPos returns the address of a T scanned from row. T must be a struct. T must have the same number a
// public fields as row has fields. The row and T fields will by matched by position. If the "db" struct tag is "-" then
// public fields as row has fields. The row and T fields will be matched by position. If the "db" struct tag is "-" then
// the field will be ignored.
func RowToAddrOfStructByPos[T any](row CollectableRow) (*T, error) {
var value T
@ -560,7 +598,7 @@ func (rs *positionalStructRowScanner) appendScanTargets(dstElemValue reflect.Val
}
// RowToStructByName returns a T scanned from row. T must be a struct. T must have the same number of named public
// fields as row has fields. The row and T fields will by matched by name. The match is case-insensitive. The database
// fields as row has fields. The row and T fields will be matched by name. The match is case-insensitive. The database
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func RowToStructByName[T any](row CollectableRow) (T, error) {
var value T
@ -569,7 +607,7 @@ func RowToStructByName[T any](row CollectableRow) (T, error) {
}
// RowToAddrOfStructByName returns the address of a T scanned from row. T must be a struct. T must have the same number
// of named public fields as row has fields. The row and T fields will by matched by name. The match is
// of named public fields as row has fields. The row and T fields will be matched by name. The match is
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
// then the field will be ignored.
func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) {
@ -579,7 +617,7 @@ func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) {
}
// RowToStructByNameLax returns a T scanned from row. T must be a struct. T must have greater than or equal number of named public
// fields as row has fields. The row and T fields will by matched by name. The match is case-insensitive. The database
// fields as row has fields. The row and T fields will be matched by name. The match is case-insensitive. The database
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func RowToStructByNameLax[T any](row CollectableRow) (T, error) {
var value T
@ -588,7 +626,7 @@ func RowToStructByNameLax[T any](row CollectableRow) (T, error) {
}
// RowToAddrOfStructByNameLax returns the address of a T scanned from row. T must be a struct. T must have greater than or
// equal number of named public fields as row has fields. The row and T fields will by matched by name. The match is
// equal number of named public fields as row has fields. The row and T fields will be matched by name. The match is
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
// then the field will be ignored.
func RowToAddrOfStructByNameLax[T any](row CollectableRow) (*T, error) {
@ -650,7 +688,7 @@ func (rs *namedStructRowScanner) appendScanTargets(dstElemValue reflect.Value, s
// Field is unexported, skip it.
continue
}
// Handle anoymous struct embedding, but do not try to handle embedded pointers.
// Handle anonymous struct embedding, but do not try to handle embedded pointers.
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
scanTargets, err = rs.appendScanTargets(dstElemValue.Field(i), scanTargets, fldDescs)
if err != nil {
@ -659,7 +697,7 @@ func (rs *namedStructRowScanner) appendScanTargets(dstElemValue reflect.Value, s
} else {
dbTag, dbTagPresent := sf.Tag.Lookup(structTagKey)
if dbTagPresent {
dbTag = strings.Split(dbTag, ",")[0]
dbTag, _, _ = strings.Cut(dbTag, ",")
}
if dbTag == "-" {
// Field is ignored, skip it.

View file

@ -14,6 +14,18 @@
// return err
// }
//
// Or from a *pgxpool.Pool.
//
// pool, err := pgxpool.New(context.Background(), os.Getenv("DATABASE_URL"))
// if err != nil {
// return err
// }
//
// db, err := stdlib.OpenDBFromPool(pool)
// if err != nil {
// return err
// }
//
// Or a pgx.ConnConfig can be used to set configuration not accessible via connection string. In this case the
// pgx.ConnConfig must first be registered with the driver. This registration returns a connection string which is used
// with sql.Open.
@ -74,6 +86,7 @@ import (
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgtype"
"github.com/jackc/pgx/v5/pgxpool"
)
// Only intrinsic types should be binary format with database/sql.
@ -125,14 +138,14 @@ func contains(list []string, y string) bool {
type OptionOpenDB func(*connector)
// OptionBeforeConnect provides a callback for before connect. It is passed a shallow copy of the ConnConfig that will
// be used to connect, so only its immediate members should be modified.
// be used to connect, so only its immediate members should be modified. Used only if db is opened with *pgx.ConnConfig.
func OptionBeforeConnect(bc func(context.Context, *pgx.ConnConfig) error) OptionOpenDB {
return func(dc *connector) {
dc.BeforeConnect = bc
}
}
// OptionAfterConnect provides a callback for after connect.
// OptionAfterConnect provides a callback for after connect. Used only if db is opened with *pgx.ConnConfig.
func OptionAfterConnect(ac func(context.Context, *pgx.Conn) error) OptionOpenDB {
return func(dc *connector) {
dc.AfterConnect = ac
@ -191,13 +204,42 @@ func GetConnector(config pgx.ConnConfig, opts ...OptionOpenDB) driver.Connector
return c
}
// GetPoolConnector creates a new driver.Connector from the given *pgxpool.Pool. By using this be sure to set the
// maximum idle connections of the *sql.DB created with this connector to zero since they must be managed from the
// *pgxpool.Pool. This is required to avoid acquiring all the connections from the pgxpool and starving any direct
// users of the pgxpool.
func GetPoolConnector(pool *pgxpool.Pool, opts ...OptionOpenDB) driver.Connector {
c := connector{
pool: pool,
ResetSession: func(context.Context, *pgx.Conn) error { return nil }, // noop reset session by default
driver: pgxDriver,
}
for _, opt := range opts {
opt(&c)
}
return c
}
func OpenDB(config pgx.ConnConfig, opts ...OptionOpenDB) *sql.DB {
c := GetConnector(config, opts...)
return sql.OpenDB(c)
}
// OpenDBFromPool creates a new *sql.DB from the given *pgxpool.Pool. Note that this method automatically sets the
// maximum number of idle connections in *sql.DB to zero, since they must be managed from the *pgxpool.Pool. This is
// required to avoid acquiring all the connections from the pgxpool and starving any direct users of the pgxpool.
func OpenDBFromPool(pool *pgxpool.Pool, opts ...OptionOpenDB) *sql.DB {
c := GetPoolConnector(pool, opts...)
db := sql.OpenDB(c)
db.SetMaxIdleConns(0)
return db
}
type connector struct {
pgx.ConnConfig
pool *pgxpool.Pool
BeforeConnect func(context.Context, *pgx.ConnConfig) error // function to call before creation of every new connection
AfterConnect func(context.Context, *pgx.Conn) error // function to call after creation of every new connection
ResetSession func(context.Context, *pgx.Conn) error // function is called before a connection is reused
@ -207,25 +249,53 @@ type connector struct {
// Connect implement driver.Connector interface
func (c connector) Connect(ctx context.Context) (driver.Conn, error) {
var (
err error
conn *pgx.Conn
connConfig pgx.ConnConfig
conn *pgx.Conn
close func(context.Context) error
err error
)
// Create a shallow copy of the config, so that BeforeConnect can safely modify it
connConfig := c.ConnConfig
if err = c.BeforeConnect(ctx, &connConfig); err != nil {
return nil, err
if c.pool == nil {
// Create a shallow copy of the config, so that BeforeConnect can safely modify it
connConfig = c.ConnConfig
if err = c.BeforeConnect(ctx, &connConfig); err != nil {
return nil, err
}
if conn, err = pgx.ConnectConfig(ctx, &connConfig); err != nil {
return nil, err
}
if err = c.AfterConnect(ctx, conn); err != nil {
return nil, err
}
close = conn.Close
} else {
var pconn *pgxpool.Conn
pconn, err = c.pool.Acquire(ctx)
if err != nil {
return nil, err
}
conn = pconn.Conn()
close = func(_ context.Context) error {
pconn.Release()
return nil
}
}
if conn, err = pgx.ConnectConfig(ctx, &connConfig); err != nil {
return nil, err
}
if err = c.AfterConnect(ctx, conn); err != nil {
return nil, err
}
return &Conn{conn: conn, driver: c.driver, connConfig: connConfig, resetSessionFunc: c.ResetSession}, nil
return &Conn{
conn: conn,
close: close,
driver: c.driver,
connConfig: connConfig,
resetSessionFunc: c.ResetSession,
psRefCounts: make(map[*pgconn.StatementDescription]int),
}, nil
}
// Driver implement driver.Connector interface
@ -302,9 +372,11 @@ func (dc *driverConnector) Connect(ctx context.Context) (driver.Conn, error) {
c := &Conn{
conn: conn,
close: conn.Close,
driver: dc.driver,
connConfig: *connConfig,
resetSessionFunc: func(context.Context, *pgx.Conn) error { return nil },
psRefCounts: make(map[*pgconn.StatementDescription]int),
}
return c, nil
@ -326,11 +398,19 @@ func UnregisterConnConfig(connStr string) {
type Conn struct {
conn *pgx.Conn
psCount int64 // Counter used for creating unique prepared statement names
close func(context.Context) error
driver *Driver
connConfig pgx.ConnConfig
resetSessionFunc func(context.Context, *pgx.Conn) error // Function is called before a connection is reused
lastResetSessionTime time.Time
// psRefCounts contains reference counts for prepared statements. Prepare uses the underlying pgx logic to generate
// deterministic statement names from the statement text. If this query has already been prepared then the existing
// *pgconn.StatementDescription will be returned. However, this means that if Close is called on the returned Stmt
// then the underlying prepared statement will be closed even when the underlying prepared statement is still in use
// by another database/sql Stmt. To prevent this psRefCounts keeps track of how many database/sql statements are using
// the same underlying statement and only closes the underlying statement when the reference count reaches 0.
psRefCounts map[*pgconn.StatementDescription]int
}
// Conn returns the underlying *pgx.Conn
@ -347,13 +427,11 @@ func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, e
return nil, driver.ErrBadConn
}
name := fmt.Sprintf("pgx_%d", c.psCount)
c.psCount++
sd, err := c.conn.Prepare(ctx, name, query)
sd, err := c.conn.Prepare(ctx, query, query)
if err != nil {
return nil, err
}
c.psRefCounts[sd]++
return &Stmt{sd: sd, conn: c}, nil
}
@ -361,7 +439,7 @@ func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, e
func (c *Conn) Close() error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
return c.conn.Close(ctx)
return c.close(ctx)
}
func (c *Conn) Begin() (driver.Tx, error) {
@ -470,7 +548,7 @@ func (c *Conn) ResetSession(ctx context.Context) error {
now := time.Now()
if now.Sub(c.lastResetSessionTime) > time.Second {
if err := c.conn.PgConn().CheckConn(); err != nil {
if err := c.conn.PgConn().Ping(ctx); err != nil {
return driver.ErrBadConn
}
}
@ -487,7 +565,16 @@ type Stmt struct {
func (s *Stmt) Close() error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
return s.conn.conn.Deallocate(ctx, s.sd.Name)
refCount := s.conn.psRefCounts[s.sd]
if refCount == 1 {
delete(s.conn.psRefCounts, s.sd)
} else {
s.conn.psRefCounts[s.sd]--
return nil
}
return s.conn.conn.Deallocate(ctx, s.sd.SQL)
}
func (s *Stmt) NumInput() int {
@ -499,7 +586,7 @@ func (s *Stmt) Exec(argsV []driver.Value) (driver.Result, error) {
}
func (s *Stmt) ExecContext(ctx context.Context, argsV []driver.NamedValue) (driver.Result, error) {
return s.conn.ExecContext(ctx, s.sd.Name, argsV)
return s.conn.ExecContext(ctx, s.sd.SQL, argsV)
}
func (s *Stmt) Query(argsV []driver.Value) (driver.Rows, error) {
@ -507,7 +594,7 @@ func (s *Stmt) Query(argsV []driver.Value) (driver.Rows, error) {
}
func (s *Stmt) QueryContext(ctx context.Context, argsV []driver.NamedValue) (driver.Rows, error) {
return s.conn.QueryContext(ctx, s.sd.Name, argsV)
return s.conn.QueryContext(ctx, s.sd.SQL, argsV)
}
type rowValueFunc func(src []byte) (driver.Value, error)

74
vendor/github.com/jackc/puddle/v2/CHANGELOG.md generated vendored Normal file
View file

@ -0,0 +1,74 @@
# 2.2.1 (July 15, 2023)
* Fix: CreateResource cannot overflow pool. This changes documented behavior of CreateResource. Previously,
CreateResource could create a resource even if the pool was full. This could cause the pool to overflow. While this
was documented, it was documenting incorrect behavior. CreateResource now returns an error if the pool is full.
# 2.2.0 (February 11, 2023)
* Use Go 1.19 atomics and drop go.uber.org/atomic dependency
# 2.1.2 (November 12, 2022)
* Restore support to Go 1.18 via go.uber.org/atomic
# 2.1.1 (November 11, 2022)
* Fix create resource concurrently with Stat call race
# 2.1.0 (October 28, 2022)
* Concurrency control is now implemented with a semaphore. This simplifies some internal logic, resolves a few error conditions (including a deadlock), and improves performance. (Jan Dubsky)
* Go 1.19 is now required for the improved atomic support.
# 2.0.1 (October 28, 2022)
* Fix race condition when Close is called concurrently with multiple constructors
# 2.0.0 (September 17, 2022)
* Use generics instead of interface{} (Столяров Владимир Алексеевич)
* Add Reset
* Do not cancel resource construction when Acquire is canceled
* NewPool takes Config
# 1.3.0 (August 27, 2022)
* Acquire creates resources in background to allow creation to continue after Acquire is canceled (James Hartig)
# 1.2.1 (December 2, 2021)
* TryAcquire now does not block when background constructing resource
# 1.2.0 (November 20, 2021)
* Add TryAcquire (A. Jensen)
* Fix: remove memory leak / unintentionally pinned memory when shrinking slices (Alexander Staubo)
* Fix: Do not leave pool locked after panic from nil context
# 1.1.4 (September 11, 2021)
* Fix: Deadlock in CreateResource if pool was closed during resource acquisition (Dmitriy Matrenichev)
# 1.1.3 (December 3, 2020)
* Fix: Failed resource creation could cause concurrent Acquire to hang. (Evgeny Vanslov)
# 1.1.2 (September 26, 2020)
* Fix: Resource.Destroy no longer removes itself from the pool before its destructor has completed.
* Fix: Prevent crash when pool is closed while resource is being created.
# 1.1.1 (April 2, 2020)
* Pool.Close can be safely called multiple times
* AcquireAllIDle immediately returns nil if pool is closed
* CreateResource checks if pool is closed before taking any action
* Fix potential race condition when CreateResource and Close are called concurrently. CreateResource now checks if pool is closed before adding newly created resource to pool.
# 1.1.0 (February 5, 2020)
* Use runtime.nanotime for faster tracking of acquire time and last usage time.
* Track resource idle time to enable client health check logic. (Patrick Ellul)
* Add CreateResource to construct a new resource without acquiring it. (Patrick Ellul)
* Fix deadlock race when acquire is cancelled. (Michael Tharp)

22
vendor/github.com/jackc/puddle/v2/LICENSE generated vendored Normal file
View file

@ -0,0 +1,22 @@
Copyright (c) 2018 Jack Christensen
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

80
vendor/github.com/jackc/puddle/v2/README.md generated vendored Normal file
View file

@ -0,0 +1,80 @@
[![](https://godoc.org/github.com/jackc/puddle?status.svg)](https://godoc.org/github.com/jackc/puddle)
![Build Status](https://github.com/jackc/puddle/actions/workflows/ci.yml/badge.svg)
# Puddle
Puddle is a tiny generic resource pool library for Go that uses the standard
context library to signal cancellation of acquires. It is designed to contain
the minimum functionality required for a resource pool. It can be used directly
or it can be used as the base for a domain specific resource pool. For example,
a database connection pool may use puddle internally and implement health checks
and keep-alive behavior without needing to implement any concurrent code of its
own.
## Features
* Acquire cancellation via context standard library
* Statistics API for monitoring pool pressure
* No dependencies outside of standard library and golang.org/x/sync
* High performance
* 100% test coverage of reachable code
## Example Usage
```go
package main
import (
"context"
"log"
"net"
"github.com/jackc/puddle/v2"
)
func main() {
constructor := func(context.Context) (net.Conn, error) {
return net.Dial("tcp", "127.0.0.1:8080")
}
destructor := func(value net.Conn) {
value.Close()
}
maxPoolSize := int32(10)
pool, err := puddle.NewPool(&puddle.Config[net.Conn]{Constructor: constructor, Destructor: destructor, MaxSize: maxPoolSize})
if err != nil {
log.Fatal(err)
}
// Acquire resource from the pool.
res, err := pool.Acquire(context.Background())
if err != nil {
log.Fatal(err)
}
// Use resource.
_, err = res.Value().Write([]byte{1})
if err != nil {
log.Fatal(err)
}
// Release when done.
res.Release()
}
```
## Status
Puddle is stable and feature complete.
* Bug reports and fixes are welcome.
* New features will usually not be accepted if they can be feasibly implemented in a wrapper.
* Performance optimizations will usually not be accepted unless the performance issue rises to the level of a bug.
## Supported Go Versions
puddle supports the same versions of Go that are supported by the Go project. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases. This means puddle supports Go 1.19 and higher.
## License
MIT

24
vendor/github.com/jackc/puddle/v2/context.go generated vendored Normal file
View file

@ -0,0 +1,24 @@
package puddle
import (
"context"
"time"
)
// valueCancelCtx combines two contexts into one. One context is used for values and the other is used for cancellation.
type valueCancelCtx struct {
valueCtx context.Context
cancelCtx context.Context
}
func (ctx *valueCancelCtx) Deadline() (time.Time, bool) { return ctx.cancelCtx.Deadline() }
func (ctx *valueCancelCtx) Done() <-chan struct{} { return ctx.cancelCtx.Done() }
func (ctx *valueCancelCtx) Err() error { return ctx.cancelCtx.Err() }
func (ctx *valueCancelCtx) Value(key any) any { return ctx.valueCtx.Value(key) }
func newValueCancelCtx(valueCtx, cancelContext context.Context) context.Context {
return &valueCancelCtx{
valueCtx: valueCtx,
cancelCtx: cancelContext,
}
}

11
vendor/github.com/jackc/puddle/v2/doc.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
// Package puddle is a generic resource pool with type-parametrized api.
/*
Puddle is a tiny generic resource pool library for Go that uses the standard
context library to signal cancellation of acquires. It is designed to contain
the minimum functionality a resource pool needs that cannot be implemented
without concurrency concerns. For example, a database connection pool may use
puddle internally and implement health checks and keep-alive behavior without
needing to implement any concurrent code of its own.
*/
package puddle

View file

@ -0,0 +1,85 @@
package genstack
// GenStack implements a generational stack.
//
// GenStack works as common stack except for the fact that all elements in the
// older generation are guaranteed to be popped before any element in the newer
// generation. New elements are always pushed to the current (newest)
// generation.
//
// We could also say that GenStack behaves as a stack in case of a single
// generation, but it behaves as a queue of individual generation stacks.
type GenStack[T any] struct {
// We can represent arbitrary number of generations using 2 stacks. The
// new stack stores all new pushes and the old stack serves all reads.
// Old stack can represent multiple generations. If old == new, then all
// elements pushed in previous (not current) generations have already
// been popped.
old *stack[T]
new *stack[T]
}
// NewGenStack creates a new empty GenStack.
func NewGenStack[T any]() *GenStack[T] {
s := &stack[T]{}
return &GenStack[T]{
old: s,
new: s,
}
}
func (s *GenStack[T]) Pop() (T, bool) {
// Pushes always append to the new stack, so if the old once becomes
// empty, it will remail empty forever.
if s.old.len() == 0 && s.old != s.new {
s.old = s.new
}
if s.old.len() == 0 {
var zero T
return zero, false
}
return s.old.pop(), true
}
// Push pushes a new element at the top of the stack.
func (s *GenStack[T]) Push(v T) { s.new.push(v) }
// NextGen starts a new stack generation.
func (s *GenStack[T]) NextGen() {
if s.old == s.new {
s.new = &stack[T]{}
return
}
// We need to pop from the old stack to the top of the new stack. Let's
// have an example:
//
// Old: <bottom> 4 3 2 1
// New: <bottom> 8 7 6 5
// PopOrder: 1 2 3 4 5 6 7 8
//
//
// To preserve pop order, we have to take all elements from the old
// stack and push them to the top of new stack:
//
// New: 8 7 6 5 4 3 2 1
//
s.new.push(s.old.takeAll()...)
// We have the old stack allocated and empty, so why not to reuse it as
// new new stack.
s.old, s.new = s.new, s.old
}
// Len returns number of elements in the stack.
func (s *GenStack[T]) Len() int {
l := s.old.len()
if s.old != s.new {
l += s.new.len()
}
return l
}

View file

@ -0,0 +1,39 @@
package genstack
// stack is a wrapper around an array implementing a stack.
//
// We cannot use slice to represent the stack because append might change the
// pointer value of the slice. That would be an issue in GenStack
// implementation.
type stack[T any] struct {
arr []T
}
// push pushes a new element at the top of a stack.
func (s *stack[T]) push(vs ...T) { s.arr = append(s.arr, vs...) }
// pop pops the stack top-most element.
//
// If stack length is zero, this method panics.
func (s *stack[T]) pop() T {
idx := s.len() - 1
val := s.arr[idx]
// Avoid memory leak
var zero T
s.arr[idx] = zero
s.arr = s.arr[:idx]
return val
}
// takeAll returns all elements in the stack in order as they are stored - i.e.
// the top-most stack element is the last one.
func (s *stack[T]) takeAll() []T {
arr := s.arr
s.arr = nil
return arr
}
// len returns number of elements in the stack.
func (s *stack[T]) len() int { return len(s.arr) }

32
vendor/github.com/jackc/puddle/v2/log.go generated vendored Normal file
View file

@ -0,0 +1,32 @@
package puddle
import "unsafe"
type ints interface {
int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
}
// log2Int returns log2 of an integer. This function panics if val < 0. For val
// == 0, returns 0.
func log2Int[T ints](val T) uint8 {
if val <= 0 {
panic("log2 of non-positive number does not exist")
}
return log2IntRange(val, 0, uint8(8*unsafe.Sizeof(val)))
}
func log2IntRange[T ints](val T, begin, end uint8) uint8 {
length := end - begin
if length == 1 {
return begin
}
delim := begin + length/2
mask := T(1) << delim
if mask > val {
return log2IntRange(val, begin, delim)
} else {
return log2IntRange(val, delim, end)
}
}

13
vendor/github.com/jackc/puddle/v2/nanotime_time.go generated vendored Normal file
View file

@ -0,0 +1,13 @@
//go:build purego || appengine || js
// This file contains the safe implementation of nanotime using time.Now().
package puddle
import (
"time"
)
func nanotime() int64 {
return time.Now().UnixNano()
}

12
vendor/github.com/jackc/puddle/v2/nanotime_unsafe.go generated vendored Normal file
View file

@ -0,0 +1,12 @@
//go:build !purego && !appengine && !js
// This file contains the implementation of nanotime using runtime.nanotime.
package puddle
import "unsafe"
var _ = unsafe.Sizeof(0)
//go:linkname nanotime runtime.nanotime
func nanotime() int64

696
vendor/github.com/jackc/puddle/v2/pool.go generated vendored Normal file
View file

@ -0,0 +1,696 @@
package puddle
import (
"context"
"errors"
"sync"
"sync/atomic"
"time"
"github.com/jackc/puddle/v2/internal/genstack"
"golang.org/x/sync/semaphore"
)
const (
resourceStatusConstructing = 0
resourceStatusIdle = iota
resourceStatusAcquired = iota
resourceStatusHijacked = iota
)
// ErrClosedPool occurs on an attempt to acquire a connection from a closed pool
// or a pool that is closed while the acquire is waiting.
var ErrClosedPool = errors.New("closed pool")
// ErrNotAvailable occurs on an attempt to acquire a resource from a pool
// that is at maximum capacity and has no available resources.
var ErrNotAvailable = errors.New("resource not available")
// Constructor is a function called by the pool to construct a resource.
type Constructor[T any] func(ctx context.Context) (res T, err error)
// Destructor is a function called by the pool to destroy a resource.
type Destructor[T any] func(res T)
// Resource is the resource handle returned by acquiring from the pool.
type Resource[T any] struct {
value T
pool *Pool[T]
creationTime time.Time
lastUsedNano int64
poolResetCount int
status byte
}
// Value returns the resource value.
func (res *Resource[T]) Value() T {
if !(res.status == resourceStatusAcquired || res.status == resourceStatusHijacked) {
panic("tried to access resource that is not acquired or hijacked")
}
return res.value
}
// Release returns the resource to the pool. res must not be subsequently used.
func (res *Resource[T]) Release() {
if res.status != resourceStatusAcquired {
panic("tried to release resource that is not acquired")
}
res.pool.releaseAcquiredResource(res, nanotime())
}
// ReleaseUnused returns the resource to the pool without updating when it was last used used. i.e. LastUsedNanotime
// will not change. res must not be subsequently used.
func (res *Resource[T]) ReleaseUnused() {
if res.status != resourceStatusAcquired {
panic("tried to release resource that is not acquired")
}
res.pool.releaseAcquiredResource(res, res.lastUsedNano)
}
// Destroy returns the resource to the pool for destruction. res must not be
// subsequently used.
func (res *Resource[T]) Destroy() {
if res.status != resourceStatusAcquired {
panic("tried to destroy resource that is not acquired")
}
go res.pool.destroyAcquiredResource(res)
}
// Hijack assumes ownership of the resource from the pool. Caller is responsible
// for cleanup of resource value.
func (res *Resource[T]) Hijack() {
if res.status != resourceStatusAcquired {
panic("tried to hijack resource that is not acquired")
}
res.pool.hijackAcquiredResource(res)
}
// CreationTime returns when the resource was created by the pool.
func (res *Resource[T]) CreationTime() time.Time {
if !(res.status == resourceStatusAcquired || res.status == resourceStatusHijacked) {
panic("tried to access resource that is not acquired or hijacked")
}
return res.creationTime
}
// LastUsedNanotime returns when Release was last called on the resource measured in nanoseconds from an arbitrary time
// (a monotonic time). Returns creation time if Release has never been called. This is only useful to compare with
// other calls to LastUsedNanotime. In almost all cases, IdleDuration should be used instead.
func (res *Resource[T]) LastUsedNanotime() int64 {
if !(res.status == resourceStatusAcquired || res.status == resourceStatusHijacked) {
panic("tried to access resource that is not acquired or hijacked")
}
return res.lastUsedNano
}
// IdleDuration returns the duration since Release was last called on the resource. This is equivalent to subtracting
// LastUsedNanotime to the current nanotime.
func (res *Resource[T]) IdleDuration() time.Duration {
if !(res.status == resourceStatusAcquired || res.status == resourceStatusHijacked) {
panic("tried to access resource that is not acquired or hijacked")
}
return time.Duration(nanotime() - res.lastUsedNano)
}
// Pool is a concurrency-safe resource pool.
type Pool[T any] struct {
// mux is the pool internal lock. Any modification of shared state of
// the pool (but Acquires of acquireSem) must be performed only by
// holder of the lock. Long running operations are not allowed when mux
// is held.
mux sync.Mutex
// acquireSem provides an allowance to acquire a resource.
//
// Releases are allowed only when caller holds mux. Acquires have to
// happen before mux is locked (doesn't apply to semaphore.TryAcquire in
// AcquireAllIdle).
acquireSem *semaphore.Weighted
destructWG sync.WaitGroup
allResources resList[T]
idleResources *genstack.GenStack[*Resource[T]]
constructor Constructor[T]
destructor Destructor[T]
maxSize int32
acquireCount int64
acquireDuration time.Duration
emptyAcquireCount int64
canceledAcquireCount atomic.Int64
resetCount int
baseAcquireCtx context.Context
cancelBaseAcquireCtx context.CancelFunc
closed bool
}
type Config[T any] struct {
Constructor Constructor[T]
Destructor Destructor[T]
MaxSize int32
}
// NewPool creates a new pool. Panics if maxSize is less than 1.
func NewPool[T any](config *Config[T]) (*Pool[T], error) {
if config.MaxSize < 1 {
return nil, errors.New("MaxSize must be >= 1")
}
baseAcquireCtx, cancelBaseAcquireCtx := context.WithCancel(context.Background())
return &Pool[T]{
acquireSem: semaphore.NewWeighted(int64(config.MaxSize)),
idleResources: genstack.NewGenStack[*Resource[T]](),
maxSize: config.MaxSize,
constructor: config.Constructor,
destructor: config.Destructor,
baseAcquireCtx: baseAcquireCtx,
cancelBaseAcquireCtx: cancelBaseAcquireCtx,
}, nil
}
// Close destroys all resources in the pool and rejects future Acquire calls.
// Blocks until all resources are returned to pool and destroyed.
func (p *Pool[T]) Close() {
defer p.destructWG.Wait()
p.mux.Lock()
defer p.mux.Unlock()
if p.closed {
return
}
p.closed = true
p.cancelBaseAcquireCtx()
for res, ok := p.idleResources.Pop(); ok; res, ok = p.idleResources.Pop() {
p.allResources.remove(res)
go p.destructResourceValue(res.value)
}
}
// Stat is a snapshot of Pool statistics.
type Stat struct {
constructingResources int32
acquiredResources int32
idleResources int32
maxResources int32
acquireCount int64
acquireDuration time.Duration
emptyAcquireCount int64
canceledAcquireCount int64
}
// TotalResources returns the total number of resources currently in the pool.
// The value is the sum of ConstructingResources, AcquiredResources, and
// IdleResources.
func (s *Stat) TotalResources() int32 {
return s.constructingResources + s.acquiredResources + s.idleResources
}
// ConstructingResources returns the number of resources with construction in progress in
// the pool.
func (s *Stat) ConstructingResources() int32 {
return s.constructingResources
}
// AcquiredResources returns the number of currently acquired resources in the pool.
func (s *Stat) AcquiredResources() int32 {
return s.acquiredResources
}
// IdleResources returns the number of currently idle resources in the pool.
func (s *Stat) IdleResources() int32 {
return s.idleResources
}
// MaxResources returns the maximum size of the pool.
func (s *Stat) MaxResources() int32 {
return s.maxResources
}
// AcquireCount returns the cumulative count of successful acquires from the pool.
func (s *Stat) AcquireCount() int64 {
return s.acquireCount
}
// AcquireDuration returns the total duration of all successful acquires from
// the pool.
func (s *Stat) AcquireDuration() time.Duration {
return s.acquireDuration
}
// EmptyAcquireCount returns the cumulative count of successful acquires from the pool
// that waited for a resource to be released or constructed because the pool was
// empty.
func (s *Stat) EmptyAcquireCount() int64 {
return s.emptyAcquireCount
}
// CanceledAcquireCount returns the cumulative count of acquires from the pool
// that were canceled by a context.
func (s *Stat) CanceledAcquireCount() int64 {
return s.canceledAcquireCount
}
// Stat returns the current pool statistics.
func (p *Pool[T]) Stat() *Stat {
p.mux.Lock()
defer p.mux.Unlock()
s := &Stat{
maxResources: p.maxSize,
acquireCount: p.acquireCount,
emptyAcquireCount: p.emptyAcquireCount,
canceledAcquireCount: p.canceledAcquireCount.Load(),
acquireDuration: p.acquireDuration,
}
for _, res := range p.allResources {
switch res.status {
case resourceStatusConstructing:
s.constructingResources += 1
case resourceStatusIdle:
s.idleResources += 1
case resourceStatusAcquired:
s.acquiredResources += 1
}
}
return s
}
// tryAcquireIdleResource checks if there is any idle resource. If there is
// some, this method removes it from idle list and returns it. If the idle pool
// is empty, this method returns nil and doesn't modify the idleResources slice.
//
// WARNING: Caller of this method must hold the pool mutex!
func (p *Pool[T]) tryAcquireIdleResource() *Resource[T] {
res, ok := p.idleResources.Pop()
if !ok {
return nil
}
res.status = resourceStatusAcquired
return res
}
// createNewResource creates a new resource and inserts it into list of pool
// resources.
//
// WARNING: Caller of this method must hold the pool mutex!
func (p *Pool[T]) createNewResource() *Resource[T] {
res := &Resource[T]{
pool: p,
creationTime: time.Now(),
lastUsedNano: nanotime(),
poolResetCount: p.resetCount,
status: resourceStatusConstructing,
}
p.allResources.append(res)
p.destructWG.Add(1)
return res
}
// Acquire gets a resource from the pool. If no resources are available and the pool is not at maximum capacity it will
// create a new resource. If the pool is at maximum capacity it will block until a resource is available. ctx can be
// used to cancel the Acquire.
//
// If Acquire creates a new resource the resource constructor function will receive a context that delegates Value() to
// ctx. Canceling ctx will cause Acquire to return immediately but it will not cancel the resource creation. This avoids
// the problem of it being impossible to create resources when the time to create a resource is greater than any one
// caller of Acquire is willing to wait.
func (p *Pool[T]) Acquire(ctx context.Context) (_ *Resource[T], err error) {
select {
case <-ctx.Done():
p.canceledAcquireCount.Add(1)
return nil, ctx.Err()
default:
}
return p.acquire(ctx)
}
// acquire is a continuation of Acquire function that doesn't check context
// validity.
//
// This function exists solely only for benchmarking purposes.
func (p *Pool[T]) acquire(ctx context.Context) (*Resource[T], error) {
startNano := nanotime()
var waitedForLock bool
if !p.acquireSem.TryAcquire(1) {
waitedForLock = true
err := p.acquireSem.Acquire(ctx, 1)
if err != nil {
p.canceledAcquireCount.Add(1)
return nil, err
}
}
p.mux.Lock()
if p.closed {
p.acquireSem.Release(1)
p.mux.Unlock()
return nil, ErrClosedPool
}
// If a resource is available in the pool.
if res := p.tryAcquireIdleResource(); res != nil {
if waitedForLock {
p.emptyAcquireCount += 1
}
p.acquireCount += 1
p.acquireDuration += time.Duration(nanotime() - startNano)
p.mux.Unlock()
return res, nil
}
if len(p.allResources) >= int(p.maxSize) {
// Unreachable code.
panic("bug: semaphore allowed more acquires than pool allows")
}
// The resource is not idle, but there is enough space to create one.
res := p.createNewResource()
p.mux.Unlock()
res, err := p.initResourceValue(ctx, res)
if err != nil {
return nil, err
}
p.mux.Lock()
defer p.mux.Unlock()
p.emptyAcquireCount += 1
p.acquireCount += 1
p.acquireDuration += time.Duration(nanotime() - startNano)
return res, nil
}
func (p *Pool[T]) initResourceValue(ctx context.Context, res *Resource[T]) (*Resource[T], error) {
// Create the resource in a goroutine to immediately return from Acquire
// if ctx is canceled without also canceling the constructor.
//
// See:
// - https://github.com/jackc/pgx/issues/1287
// - https://github.com/jackc/pgx/issues/1259
constructErrChan := make(chan error)
go func() {
constructorCtx := newValueCancelCtx(ctx, p.baseAcquireCtx)
value, err := p.constructor(constructorCtx)
if err != nil {
p.mux.Lock()
p.allResources.remove(res)
p.destructWG.Done()
// The resource won't be acquired because its
// construction failed. We have to allow someone else to
// take that resouce.
p.acquireSem.Release(1)
p.mux.Unlock()
select {
case constructErrChan <- err:
case <-ctx.Done():
// The caller is cancelled, so no-one awaits the
// error. This branch avoid goroutine leak.
}
return
}
// The resource is already in p.allResources where it might be read. So we need to acquire the lock to update its
// status.
p.mux.Lock()
res.value = value
res.status = resourceStatusAcquired
p.mux.Unlock()
// This select works because the channel is unbuffered.
select {
case constructErrChan <- nil:
case <-ctx.Done():
p.releaseAcquiredResource(res, res.lastUsedNano)
}
}()
select {
case <-ctx.Done():
p.canceledAcquireCount.Add(1)
return nil, ctx.Err()
case err := <-constructErrChan:
if err != nil {
return nil, err
}
return res, nil
}
}
// TryAcquire gets a resource from the pool if one is immediately available. If not, it returns ErrNotAvailable. If no
// resources are available but the pool has room to grow, a resource will be created in the background. ctx is only
// used to cancel the background creation.
func (p *Pool[T]) TryAcquire(ctx context.Context) (*Resource[T], error) {
if !p.acquireSem.TryAcquire(1) {
return nil, ErrNotAvailable
}
p.mux.Lock()
defer p.mux.Unlock()
if p.closed {
p.acquireSem.Release(1)
return nil, ErrClosedPool
}
// If a resource is available now
if res := p.tryAcquireIdleResource(); res != nil {
p.acquireCount += 1
return res, nil
}
if len(p.allResources) >= int(p.maxSize) {
// Unreachable code.
panic("bug: semaphore allowed more acquires than pool allows")
}
res := p.createNewResource()
go func() {
value, err := p.constructor(ctx)
p.mux.Lock()
defer p.mux.Unlock()
// We have to create the resource and only then release the
// semaphore - For the time being there is no resource that
// someone could acquire.
defer p.acquireSem.Release(1)
if err != nil {
p.allResources.remove(res)
p.destructWG.Done()
return
}
res.value = value
res.status = resourceStatusIdle
p.idleResources.Push(res)
}()
return nil, ErrNotAvailable
}
// acquireSemAll tries to acquire num free tokens from sem. This function is
// guaranteed to acquire at least the lowest number of tokens that has been
// available in the semaphore during runtime of this function.
//
// For the time being, semaphore doesn't allow to acquire all tokens atomically
// (see https://github.com/golang/sync/pull/19). We simulate this by trying all
// powers of 2 that are less or equal to num.
//
// For example, let's immagine we have 19 free tokens in the semaphore which in
// total has 24 tokens (i.e. the maxSize of the pool is 24 resources). Then if
// num is 24, the log2Uint(24) is 4 and we try to acquire 16, 8, 4, 2 and 1
// tokens. Out of those, the acquire of 16, 2 and 1 tokens will succeed.
//
// Naturally, Acquires and Releases of the semaphore might take place
// concurrently. For this reason, it's not guaranteed that absolutely all free
// tokens in the semaphore will be acquired. But it's guaranteed that at least
// the minimal number of tokens that has been present over the whole process
// will be acquired. This is sufficient for the use-case we have in this
// package.
//
// TODO: Replace this with acquireSem.TryAcquireAll() if it gets to
// upstream. https://github.com/golang/sync/pull/19
func acquireSemAll(sem *semaphore.Weighted, num int) int {
if sem.TryAcquire(int64(num)) {
return num
}
var acquired int
for i := int(log2Int(num)); i >= 0; i-- {
val := 1 << i
if sem.TryAcquire(int64(val)) {
acquired += val
}
}
return acquired
}
// AcquireAllIdle acquires all currently idle resources. Its intended use is for
// health check and keep-alive functionality. It does not update pool
// statistics.
func (p *Pool[T]) AcquireAllIdle() []*Resource[T] {
p.mux.Lock()
defer p.mux.Unlock()
if p.closed {
return nil
}
numIdle := p.idleResources.Len()
if numIdle == 0 {
return nil
}
// In acquireSemAll we use only TryAcquire and not Acquire. Because
// TryAcquire cannot block, the fact that we hold mutex locked and try
// to acquire semaphore cannot result in dead-lock.
//
// Because the mutex is locked, no parallel Release can run. This
// implies that the number of tokens can only decrease because some
// Acquire/TryAcquire call can consume the semaphore token. Consequently
// acquired is always less or equal to numIdle. Moreover if acquired <
// numIdle, then there are some parallel Acquire/TryAcquire calls that
// will take the remaining idle connections.
acquired := acquireSemAll(p.acquireSem, numIdle)
idle := make([]*Resource[T], acquired)
for i := range idle {
res, _ := p.idleResources.Pop()
res.status = resourceStatusAcquired
idle[i] = res
}
// We have to bump the generation to ensure that Acquire/TryAcquire
// calls running in parallel (those which caused acquired < numIdle)
// will consume old connections and not freshly released connections
// instead.
p.idleResources.NextGen()
return idle
}
// CreateResource constructs a new resource without acquiring it. It goes straight in the IdlePool. If the pool is full
// it returns an error. It can be useful to maintain warm resources under little load.
func (p *Pool[T]) CreateResource(ctx context.Context) error {
if !p.acquireSem.TryAcquire(1) {
return ErrNotAvailable
}
p.mux.Lock()
if p.closed {
p.acquireSem.Release(1)
p.mux.Unlock()
return ErrClosedPool
}
if len(p.allResources) >= int(p.maxSize) {
p.acquireSem.Release(1)
p.mux.Unlock()
return ErrNotAvailable
}
res := p.createNewResource()
p.mux.Unlock()
value, err := p.constructor(ctx)
p.mux.Lock()
defer p.mux.Unlock()
defer p.acquireSem.Release(1)
if err != nil {
p.allResources.remove(res)
p.destructWG.Done()
return err
}
res.value = value
res.status = resourceStatusIdle
// If closed while constructing resource then destroy it and return an error
if p.closed {
go p.destructResourceValue(res.value)
return ErrClosedPool
}
p.idleResources.Push(res)
return nil
}
// Reset destroys all resources, but leaves the pool open. It is intended for use when an error is detected that would
// disrupt all resources (such as a network interruption or a server state change).
//
// It is safe to reset a pool while resources are checked out. Those resources will be destroyed when they are returned
// to the pool.
func (p *Pool[T]) Reset() {
p.mux.Lock()
defer p.mux.Unlock()
p.resetCount++
for res, ok := p.idleResources.Pop(); ok; res, ok = p.idleResources.Pop() {
p.allResources.remove(res)
go p.destructResourceValue(res.value)
}
}
// releaseAcquiredResource returns res to the the pool.
func (p *Pool[T]) releaseAcquiredResource(res *Resource[T], lastUsedNano int64) {
p.mux.Lock()
defer p.mux.Unlock()
defer p.acquireSem.Release(1)
if p.closed || res.poolResetCount != p.resetCount {
p.allResources.remove(res)
go p.destructResourceValue(res.value)
} else {
res.lastUsedNano = lastUsedNano
res.status = resourceStatusIdle
p.idleResources.Push(res)
}
}
// Remove removes res from the pool and closes it. If res is not part of the
// pool Remove will panic.
func (p *Pool[T]) destroyAcquiredResource(res *Resource[T]) {
p.destructResourceValue(res.value)
p.mux.Lock()
defer p.mux.Unlock()
defer p.acquireSem.Release(1)
p.allResources.remove(res)
}
func (p *Pool[T]) hijackAcquiredResource(res *Resource[T]) {
p.mux.Lock()
defer p.mux.Unlock()
defer p.acquireSem.Release(1)
p.allResources.remove(res)
res.status = resourceStatusHijacked
p.destructWG.Done() // not responsible for destructing hijacked resources
}
func (p *Pool[T]) destructResourceValue(value T) {
p.destructor(value)
p.destructWG.Done()
}

28
vendor/github.com/jackc/puddle/v2/resource_list.go generated vendored Normal file
View file

@ -0,0 +1,28 @@
package puddle
type resList[T any] []*Resource[T]
func (l *resList[T]) append(val *Resource[T]) { *l = append(*l, val) }
func (l *resList[T]) popBack() *Resource[T] {
idx := len(*l) - 1
val := (*l)[idx]
(*l)[idx] = nil // Avoid memory leak
*l = (*l)[:idx]
return val
}
func (l *resList[T]) remove(val *Resource[T]) {
for i, elem := range *l {
if elem == val {
lastIdx := len(*l) - 1
(*l)[i] = (*l)[lastIdx]
(*l)[lastIdx] = nil // Avoid memory leak
(*l) = (*l)[:lastIdx]
return
}
}
panic("BUG: removeResource could not find res in slice")
}

27
vendor/golang.org/x/sync/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/sync/PATENTS generated vendored Normal file
View file

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

136
vendor/golang.org/x/sync/semaphore/semaphore.go generated vendored Normal file
View file

@ -0,0 +1,136 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package semaphore provides a weighted semaphore implementation.
package semaphore // import "golang.org/x/sync/semaphore"
import (
"container/list"
"context"
"sync"
)
type waiter struct {
n int64
ready chan<- struct{} // Closed when semaphore acquired.
}
// NewWeighted creates a new weighted semaphore with the given
// maximum combined weight for concurrent access.
func NewWeighted(n int64) *Weighted {
w := &Weighted{size: n}
return w
}
// Weighted provides a way to bound concurrent access to a resource.
// The callers can request access with a given weight.
type Weighted struct {
size int64
cur int64
mu sync.Mutex
waiters list.List
}
// Acquire acquires the semaphore with a weight of n, blocking until resources
// are available or ctx is done. On success, returns nil. On failure, returns
// ctx.Err() and leaves the semaphore unchanged.
//
// If ctx is already done, Acquire may still succeed without blocking.
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
s.mu.Lock()
if s.size-s.cur >= n && s.waiters.Len() == 0 {
s.cur += n
s.mu.Unlock()
return nil
}
if n > s.size {
// Don't make other Acquire calls block on one that's doomed to fail.
s.mu.Unlock()
<-ctx.Done()
return ctx.Err()
}
ready := make(chan struct{})
w := waiter{n: n, ready: ready}
elem := s.waiters.PushBack(w)
s.mu.Unlock()
select {
case <-ctx.Done():
err := ctx.Err()
s.mu.Lock()
select {
case <-ready:
// Acquired the semaphore after we were canceled. Rather than trying to
// fix up the queue, just pretend we didn't notice the cancelation.
err = nil
default:
isFront := s.waiters.Front() == elem
s.waiters.Remove(elem)
// If we're at the front and there're extra tokens left, notify other waiters.
if isFront && s.size > s.cur {
s.notifyWaiters()
}
}
s.mu.Unlock()
return err
case <-ready:
return nil
}
}
// TryAcquire acquires the semaphore with a weight of n without blocking.
// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
func (s *Weighted) TryAcquire(n int64) bool {
s.mu.Lock()
success := s.size-s.cur >= n && s.waiters.Len() == 0
if success {
s.cur += n
}
s.mu.Unlock()
return success
}
// Release releases the semaphore with a weight of n.
func (s *Weighted) Release(n int64) {
s.mu.Lock()
s.cur -= n
if s.cur < 0 {
s.mu.Unlock()
panic("semaphore: released more than held")
}
s.notifyWaiters()
s.mu.Unlock()
}
func (s *Weighted) notifyWaiters() {
for {
next := s.waiters.Front()
if next == nil {
break // No more waiters blocked.
}
w := next.Value.(waiter)
if s.size-s.cur < w.n {
// Not enough tokens for the next waiter. We could keep going (to try to
// find a waiter with a smaller request), but under load that could cause
// starvation for large requests; instead, we leave all remaining waiters
// blocked.
//
// Consider a semaphore used as a read-write lock, with N tokens, N
// readers, and one writer. Each reader can Acquire(1) to obtain a read
// lock. The writer can Acquire(N) to obtain a write lock, excluding all
// of the readers. If we allow the readers to jump ahead in the queue,
// the writer will starve — there is always one token available for every
// reader.
break
}
s.cur += w.n
s.waiters.Remove(next)
close(w.ready)
}
}

10
vendor/modules.txt vendored
View file

@ -315,7 +315,7 @@ github.com/jackc/pgpassfile
# github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a
## explicit; go 1.14
github.com/jackc/pgservicefile
# github.com/jackc/pgx/v5 v5.4.3
# github.com/jackc/pgx/v5 v5.5.0
## explicit; go 1.19
github.com/jackc/pgx/v5
github.com/jackc/pgx/v5/internal/anynil
@ -328,7 +328,12 @@ github.com/jackc/pgx/v5/pgconn/internal/bgreader
github.com/jackc/pgx/v5/pgconn/internal/ctxwatch
github.com/jackc/pgx/v5/pgproto3
github.com/jackc/pgx/v5/pgtype
github.com/jackc/pgx/v5/pgxpool
github.com/jackc/pgx/v5/stdlib
# github.com/jackc/puddle/v2 v2.2.1
## explicit; go 1.19
github.com/jackc/puddle/v2
github.com/jackc/puddle/v2/internal/genstack
# github.com/jinzhu/inflection v1.0.0
## explicit
github.com/jinzhu/inflection
@ -851,6 +856,9 @@ golang.org/x/net/trace
## explicit; go 1.18
golang.org/x/oauth2
golang.org/x/oauth2/internal
# golang.org/x/sync v0.3.0
## explicit; go 1.17
golang.org/x/sync/semaphore
# golang.org/x/sys v0.13.0
## explicit; go 1.17
golang.org/x/sys/cpu