[chore]: Bump github.com/jackc/pgx/v5 from 5.4.1 to 5.4.2 (#1991)

This commit is contained in:
dependabot[bot] 2023-07-21 14:22:20 +00:00 committed by GitHub
parent 83139989b5
commit fa57c699fe
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 119 additions and 476 deletions

2
go.mod
View file

@ -33,7 +33,7 @@ require (
github.com/gorilla/websocket v1.5.0 github.com/gorilla/websocket v1.5.0
github.com/h2non/filetype v1.1.3 github.com/h2non/filetype v1.1.3
github.com/jackc/pgconn v1.14.0 github.com/jackc/pgconn v1.14.0
github.com/jackc/pgx/v5 v5.4.1 github.com/jackc/pgx/v5 v5.4.2
github.com/microcosm-cc/bluemonday v1.0.24 github.com/microcosm-cc/bluemonday v1.0.24
github.com/miekg/dns v1.1.55 github.com/miekg/dns v1.1.55
github.com/minio/minio-go/v7 v7.0.60 github.com/minio/minio-go/v7 v7.0.60

4
go.sum
View file

@ -389,8 +389,8 @@ github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrU
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
github.com/jackc/pgx/v5 v5.4.1 h1:oKfB/FhuVtit1bBM3zNRRsZ925ZkMN3HXL+LgLUM9lE= github.com/jackc/pgx/v5 v5.4.2 h1:u1gmGDwbdRUZiwisBm/Ky2M14uQyUP65bG8+20nnyrg=
github.com/jackc/pgx/v5 v5.4.1/go.mod h1:q6iHT8uDNXWiFNOlRqJzBTaSH3+2xCXkokxHZC5qWFY= github.com/jackc/pgx/v5 v5.4.2/go.mod h1:q6iHT8uDNXWiFNOlRqJzBTaSH3+2xCXkokxHZC5qWFY=
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=

View file

@ -1,3 +1,12 @@
# 5.4.2 (July 11, 2023)
* Fix: RowScanner errors are fatal to Rows
* Fix: Enable failover efforts when pg_hba.conf disallows non-ssl connections (Brandon Kauffman)
* Hstore text codec internal improvements (Evan Jones)
* Fix: Stop timers for background reader when not in use. Fixes memory leak when closing connections (Adrian-Stefan Mares)
* Fix: Stop background reader as soon as possible.
* Add PgConn.SyncConn(). This combined with the above fix makes it safe to directly use the underlying net.Conn.
# 5.4.1 (June 18, 2023) # 5.4.1 (June 18, 2023)
* Fix: concurrency bug with pgtypeDefaultMap and simple protocol (Lev Zakharov) * Fix: concurrency bug with pgtypeDefaultMap and simple protocol (Lev Zakharov)

View file

@ -133,6 +133,7 @@ These adapters can be used with the tracelog package.
* [github.com/jackc/pgx-zap](https://github.com/jackc/pgx-zap) * [github.com/jackc/pgx-zap](https://github.com/jackc/pgx-zap)
* [github.com/jackc/pgx-zerolog](https://github.com/jackc/pgx-zerolog) * [github.com/jackc/pgx-zerolog](https://github.com/jackc/pgx-zerolog)
* [github.com/mcosta74/pgx-slog](https://github.com/mcosta74/pgx-slog) * [github.com/mcosta74/pgx-slog](https://github.com/mcosta74/pgx-slog)
* [github.com/kataras/pgx-golog](https://github.com/kataras/pgx-golog)
## 3rd Party Libraries with PGX Support ## 3rd Party Libraries with PGX Support

View file

@ -9,18 +9,18 @@ import (
) )
const ( const (
bgReaderStatusStopped = iota StatusStopped = iota
bgReaderStatusRunning StatusRunning
bgReaderStatusStopping StatusStopping
) )
// BGReader is an io.Reader that can optionally buffer reads in the background. It is safe for concurrent use. // BGReader is an io.Reader that can optionally buffer reads in the background. It is safe for concurrent use.
type BGReader struct { type BGReader struct {
r io.Reader r io.Reader
cond *sync.Cond cond *sync.Cond
bgReaderStatus int32 status int32
readResults []readResult readResults []readResult
} }
type readResult struct { type readResult struct {
@ -34,14 +34,14 @@ func (r *BGReader) Start() {
r.cond.L.Lock() r.cond.L.Lock()
defer r.cond.L.Unlock() defer r.cond.L.Unlock()
switch r.bgReaderStatus { switch r.status {
case bgReaderStatusStopped: case StatusStopped:
r.bgReaderStatus = bgReaderStatusRunning r.status = StatusRunning
go r.bgRead() go r.bgRead()
case bgReaderStatusRunning: case StatusRunning:
// no-op // no-op
case bgReaderStatusStopping: case StatusStopping:
r.bgReaderStatus = bgReaderStatusRunning r.status = StatusRunning
} }
} }
@ -51,16 +51,23 @@ func (r *BGReader) Stop() {
r.cond.L.Lock() r.cond.L.Lock()
defer r.cond.L.Unlock() defer r.cond.L.Unlock()
switch r.bgReaderStatus { switch r.status {
case bgReaderStatusStopped: case StatusStopped:
// no-op // no-op
case bgReaderStatusRunning: case StatusRunning:
r.bgReaderStatus = bgReaderStatusStopping r.status = StatusStopping
case bgReaderStatusStopping: case StatusStopping:
// no-op // no-op
} }
} }
// Status returns the current status of the background reader.
func (r *BGReader) Status() int32 {
r.cond.L.Lock()
defer r.cond.L.Unlock()
return r.status
}
func (r *BGReader) bgRead() { func (r *BGReader) bgRead() {
keepReading := true keepReading := true
for keepReading { for keepReading {
@ -70,8 +77,8 @@ func (r *BGReader) bgRead() {
r.cond.L.Lock() r.cond.L.Lock()
r.readResults = append(r.readResults, readResult{buf: buf, err: err}) r.readResults = append(r.readResults, readResult{buf: buf, err: err})
if r.bgReaderStatus == bgReaderStatusStopping || err != nil { if r.status == StatusStopping || err != nil {
r.bgReaderStatus = bgReaderStatusStopped r.status = StatusStopped
keepReading = false keepReading = false
} }
r.cond.L.Unlock() r.cond.L.Unlock()
@ -89,7 +96,7 @@ func (r *BGReader) Read(p []byte) (int, error) {
} }
// There are no unread background read results and the background reader is stopped. // There are no unread background read results and the background reader is stopped.
if r.bgReaderStatus == bgReaderStatusStopped { if r.status == StatusStopped {
return r.r.Read(p) return r.r.Read(p)
} }

View file

@ -174,7 +174,7 @@ func ConnectConfig(octx context.Context, config *Config) (pgConn *PgConn, err er
const ERRCODE_INVALID_CATALOG_NAME = "3D000" // db does not exist const ERRCODE_INVALID_CATALOG_NAME = "3D000" // db does not exist
const ERRCODE_INSUFFICIENT_PRIVILEGE = "42501" // missing connect privilege const ERRCODE_INSUFFICIENT_PRIVILEGE = "42501" // missing connect privilege
if pgerr.Code == ERRCODE_INVALID_PASSWORD || if pgerr.Code == ERRCODE_INVALID_PASSWORD ||
pgerr.Code == ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION || pgerr.Code == ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION && fc.TLSConfig != nil ||
pgerr.Code == ERRCODE_INVALID_CATALOG_NAME || pgerr.Code == ERRCODE_INVALID_CATALOG_NAME ||
pgerr.Code == ERRCODE_INSUFFICIENT_PRIVILEGE { pgerr.Code == ERRCODE_INSUFFICIENT_PRIVILEGE {
break break
@ -263,7 +263,8 @@ func expandWithIPs(ctx context.Context, lookupFn LookupFunc, fallbacks []*Fallba
} }
func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig, func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig,
ignoreNotPreferredErr bool) (*PgConn, error) { ignoreNotPreferredErr bool,
) (*PgConn, error) {
pgConn := new(PgConn) pgConn := new(PgConn)
pgConn.config = config pgConn.config = config
pgConn.cleanupDone = make(chan struct{}) pgConn.cleanupDone = make(chan struct{})
@ -298,6 +299,7 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
pgConn.status = connStatusConnecting pgConn.status = connStatusConnecting
pgConn.bgReader = bgreader.New(pgConn.conn) pgConn.bgReader = bgreader.New(pgConn.conn)
pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64), pgConn.bgReader.Start) pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64), pgConn.bgReader.Start)
pgConn.slowWriteTimer.Stop()
pgConn.frontend = config.BuildFrontend(pgConn.bgReader, pgConn.conn) pgConn.frontend = config.BuildFrontend(pgConn.bgReader, pgConn.conn)
startupMsg := pgproto3.StartupMessage{ startupMsg := pgproto3.StartupMessage{
@ -476,7 +478,8 @@ func (pgConn *PgConn) ReceiveMessage(ctx context.Context) (pgproto3.BackendMessa
err = &pgconnError{ err = &pgconnError{
msg: "receive message failed", msg: "receive message failed",
err: normalizeTimeoutError(ctx, err), err: normalizeTimeoutError(ctx, err),
safeToRetry: true} safeToRetry: true,
}
} }
return msg, err return msg, err
} }
@ -553,7 +556,8 @@ func (pgConn *PgConn) receiveMessage() (pgproto3.BackendMessage, error) {
return msg, nil return msg, nil
} }
// Conn returns the underlying net.Conn. This rarely necessary. // Conn returns the underlying net.Conn. This rarely necessary. If the connection will be directly used for reading or
// writing then SyncConn should usually be called before Conn.
func (pgConn *PgConn) Conn() net.Conn { func (pgConn *PgConn) Conn() net.Conn {
return pgConn.conn return pgConn.conn
} }
@ -1336,7 +1340,6 @@ func (mrr *MultiResultReader) ReadAll() ([]*Result, error) {
func (mrr *MultiResultReader) receiveMessage() (pgproto3.BackendMessage, error) { func (mrr *MultiResultReader) receiveMessage() (pgproto3.BackendMessage, error) {
msg, err := mrr.pgConn.receiveMessage() msg, err := mrr.pgConn.receiveMessage()
if err != nil { if err != nil {
mrr.pgConn.contextWatcher.Unwatch() mrr.pgConn.contextWatcher.Unwatch()
mrr.err = normalizeTimeoutError(mrr.ctx, err) mrr.err = normalizeTimeoutError(mrr.ctx, err)
@ -1647,8 +1650,8 @@ func (pgConn *PgConn) ExecBatch(ctx context.Context, batch *Batch) *MultiResultR
batch.buf = (&pgproto3.Sync{}).Encode(batch.buf) batch.buf = (&pgproto3.Sync{}).Encode(batch.buf)
pgConn.enterPotentialWriteReadDeadlock() pgConn.enterPotentialWriteReadDeadlock()
defer pgConn.exitPotentialWriteReadDeadlock()
_, err := pgConn.conn.Write(batch.buf) _, err := pgConn.conn.Write(batch.buf)
pgConn.exitPotentialWriteReadDeadlock()
if err != nil { if err != nil {
multiResult.closed = true multiResult.closed = true
multiResult.err = err multiResult.err = err
@ -1719,23 +1722,50 @@ func (pgConn *PgConn) enterPotentialWriteReadDeadlock() {
// //
// In addition, on Windows the default timer resolution is 15.6ms. So setting the timer to less than that is // In addition, on Windows the default timer resolution is 15.6ms. So setting the timer to less than that is
// ineffective. // ineffective.
pgConn.slowWriteTimer.Reset(15 * time.Millisecond) if pgConn.slowWriteTimer.Reset(15 * time.Millisecond) {
panic("BUG: slow write timer already active")
}
} }
// exitPotentialWriteReadDeadlock must be called after a call to enterPotentialWriteReadDeadlock. // exitPotentialWriteReadDeadlock must be called after a call to enterPotentialWriteReadDeadlock.
func (pgConn *PgConn) exitPotentialWriteReadDeadlock() { func (pgConn *PgConn) exitPotentialWriteReadDeadlock() {
if !pgConn.slowWriteTimer.Reset(time.Duration(math.MaxInt64)) { // The state of the timer is not relevant upon exiting the potential slow write. It may both
pgConn.slowWriteTimer.Stop() // fire (due to a slow write), or not fire (due to a fast write).
} _ = pgConn.slowWriteTimer.Stop()
pgConn.bgReader.Stop()
} }
func (pgConn *PgConn) flushWithPotentialWriteReadDeadlock() error { func (pgConn *PgConn) flushWithPotentialWriteReadDeadlock() error {
pgConn.enterPotentialWriteReadDeadlock() pgConn.enterPotentialWriteReadDeadlock()
defer pgConn.exitPotentialWriteReadDeadlock()
err := pgConn.frontend.Flush() err := pgConn.frontend.Flush()
pgConn.exitPotentialWriteReadDeadlock()
return err return err
} }
// SyncConn prepares the underlying net.Conn for direct use. PgConn may internally buffer reads or use goroutines for
// background IO. This means that any direct use of the underlying net.Conn may be corrupted if a read is already
// buffered or a read is in progress. SyncConn drains read buffers and stops background IO. In some cases this may
// require sending a ping to the server. ctx can be used to cancel this operation. This should be called before any
// operation that will use the underlying net.Conn directly. e.g. Before Conn() or Hijack().
//
// This should not be confused with the PostgreSQL protocol Sync message.
func (pgConn *PgConn) SyncConn(ctx context.Context) error {
for i := 0; i < 10; i++ {
if pgConn.bgReader.Status() == bgreader.StatusStopped && pgConn.frontend.ReadBufferLen() == 0 {
return nil
}
err := pgConn.Ping(ctx)
if err != nil {
return fmt.Errorf("SyncConn: Ping failed while syncing conn: %w", err)
}
}
// This should never happen. Only way I can imagine this occuring is if the server is constantly sending data such as
// LISTEN/NOTIFY or log notifications such that we never can get an empty buffer.
return errors.New("SyncConn: conn never synchronized")
}
// HijackedConn is the result of hijacking a connection. // HijackedConn is the result of hijacking a connection.
// //
// Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning // Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning
@ -1750,9 +1780,9 @@ type HijackedConn struct {
Config *Config Config *Config
} }
// Hijack extracts the internal connection data. pgConn must be in an idle state. pgConn is unusable after hijacking. // Hijack extracts the internal connection data. pgConn must be in an idle state. SyncConn should be called immediately
// Hijacking is typically only useful when using pgconn to establish a connection, but taking complete control of the // before Hijack. pgConn is unusable after hijacking. Hijacking is typically only useful when using pgconn to establish
// raw connection after that (e.g. a load balancer or proxy). // a connection, but taking complete control of the raw connection after that (e.g. a load balancer or proxy).
// //
// Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning // Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning
// compatibility. // compatibility.
@ -1776,6 +1806,8 @@ func (pgConn *PgConn) Hijack() (*HijackedConn, error) {
// Construct created a PgConn from an already established connection to a PostgreSQL server. This is the inverse of // Construct created a PgConn from an already established connection to a PostgreSQL server. This is the inverse of
// PgConn.Hijack. The connection must be in an idle state. // PgConn.Hijack. The connection must be in an idle state.
// //
// hc.Frontend is replaced by a new pgproto3.Frontend built by hc.Config.BuildFrontend.
//
// Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning // Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning
// compatibility. // compatibility.
func Construct(hc *HijackedConn) (*PgConn, error) { func Construct(hc *HijackedConn) (*PgConn, error) {
@ -1796,6 +1828,8 @@ func Construct(hc *HijackedConn) (*PgConn, error) {
pgConn.contextWatcher = newContextWatcher(pgConn.conn) pgConn.contextWatcher = newContextWatcher(pgConn.conn)
pgConn.bgReader = bgreader.New(pgConn.conn) pgConn.bgReader = bgreader.New(pgConn.conn)
pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64), pgConn.bgReader.Start) pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64), pgConn.bgReader.Start)
pgConn.slowWriteTimer.Stop()
pgConn.frontend = hc.Config.BuildFrontend(pgConn.bgReader, pgConn.conn)
return pgConn, nil return pgConn, nil
} }
@ -1997,7 +2031,6 @@ func (p *Pipeline) GetResults() (results any, err error) {
} }
} }
} }
func (p *Pipeline) getResultsPrepare() (*StatementDescription, error) { func (p *Pipeline) getResultsPrepare() (*StatementDescription, error) {

View file

@ -361,3 +361,7 @@ func (f *Frontend) findAuthenticationMessageType(src []byte) (BackendMessage, er
func (f *Frontend) GetAuthType() uint32 { func (f *Frontend) GetAuthType() uint32 {
return f.authType return f.authType
} }
func (f *Frontend) ReadBufferLen() int {
return f.cr.wp - f.cr.rp
}

View file

@ -5,7 +5,6 @@ import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"io" "io"
"reflect"
"strconv" "strconv"
"strings" "strings"
"unicode" "unicode"
@ -375,27 +374,6 @@ func quoteArrayElementIfNeeded(src string) string {
return src return src
} }
func findDimensionsFromValue(value reflect.Value, dimensions []ArrayDimension, elementsLength int) ([]ArrayDimension, int, bool) {
switch value.Kind() {
case reflect.Array:
fallthrough
case reflect.Slice:
length := value.Len()
if 0 == elementsLength {
elementsLength = length
} else {
elementsLength *= length
}
dimensions = append(dimensions, ArrayDimension{Length: int32(length), LowerBound: 1})
for i := 0; i < length; i++ {
if d, l, ok := findDimensionsFromValue(value.Index(i), dimensions, elementsLength); ok {
return d, l, true
}
}
}
return dimensions, elementsLength, true
}
// Array represents a PostgreSQL array for T. It implements the ArrayGetter and ArraySetter interfaces. It preserves // Array represents a PostgreSQL array for T. It implements the ArrayGetter and ArraySetter interfaces. It preserves
// PostgreSQL dimensions and custom lower bounds. Use FlatArray if these are not needed. // PostgreSQL dimensions and custom lower bounds. Use FlatArray if these are not needed.
type Array[T any] struct { type Array[T any] struct {

View file

@ -1,380 +1,9 @@
package pgtype package pgtype
import ( import (
"database/sql"
"fmt"
"math"
"reflect" "reflect"
"time"
) )
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
minInt = -maxInt - 1
)
// underlyingNumberType gets the underlying type that can be converted to Int2, Int4, Int8, Float4, or Float8
func underlyingNumberType(val any) (any, bool) {
refVal := reflect.ValueOf(val)
switch refVal.Kind() {
case reflect.Ptr:
if refVal.IsNil() {
return nil, false
}
convVal := refVal.Elem().Interface()
return convVal, true
case reflect.Int:
convVal := int(refVal.Int())
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.Int8:
convVal := int8(refVal.Int())
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.Int16:
convVal := int16(refVal.Int())
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.Int32:
convVal := int32(refVal.Int())
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.Int64:
convVal := int64(refVal.Int())
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.Uint:
convVal := uint(refVal.Uint())
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.Uint8:
convVal := uint8(refVal.Uint())
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.Uint16:
convVal := uint16(refVal.Uint())
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.Uint32:
convVal := uint32(refVal.Uint())
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.Uint64:
convVal := uint64(refVal.Uint())
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.Float32:
convVal := float32(refVal.Float())
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.Float64:
convVal := refVal.Float()
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.String:
convVal := refVal.String()
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.Bool:
convVal := refVal.Bool()
return convVal, reflect.TypeOf(convVal) != refVal.Type()
}
return nil, false
}
// underlyingBoolType gets the underlying type that can be converted to Bool
func underlyingBoolType(val any) (any, bool) {
refVal := reflect.ValueOf(val)
switch refVal.Kind() {
case reflect.Ptr:
if refVal.IsNil() {
return nil, false
}
convVal := refVal.Elem().Interface()
return convVal, true
case reflect.Bool:
convVal := refVal.Bool()
return convVal, reflect.TypeOf(convVal) != refVal.Type()
}
return nil, false
}
// underlyingBytesType gets the underlying type that can be converted to []byte
func underlyingBytesType(val any) (any, bool) {
refVal := reflect.ValueOf(val)
switch refVal.Kind() {
case reflect.Ptr:
if refVal.IsNil() {
return nil, false
}
convVal := refVal.Elem().Interface()
return convVal, true
case reflect.Slice:
if refVal.Type().Elem().Kind() == reflect.Uint8 {
convVal := refVal.Bytes()
return convVal, reflect.TypeOf(convVal) != refVal.Type()
}
}
return nil, false
}
// underlyingStringType gets the underlying type that can be converted to String
func underlyingStringType(val any) (any, bool) {
refVal := reflect.ValueOf(val)
switch refVal.Kind() {
case reflect.Ptr:
if refVal.IsNil() {
return nil, false
}
convVal := refVal.Elem().Interface()
return convVal, true
case reflect.String:
convVal := refVal.String()
return convVal, reflect.TypeOf(convVal) != refVal.Type()
}
return nil, false
}
// underlyingPtrType dereferences a pointer
func underlyingPtrType(val any) (any, bool) {
refVal := reflect.ValueOf(val)
switch refVal.Kind() {
case reflect.Ptr:
if refVal.IsNil() {
return nil, false
}
convVal := refVal.Elem().Interface()
return convVal, true
}
return nil, false
}
// underlyingTimeType gets the underlying type that can be converted to time.Time
func underlyingTimeType(val any) (any, bool) {
refVal := reflect.ValueOf(val)
switch refVal.Kind() {
case reflect.Ptr:
if refVal.IsNil() {
return nil, false
}
convVal := refVal.Elem().Interface()
return convVal, true
}
timeType := reflect.TypeOf(time.Time{})
if refVal.Type().ConvertibleTo(timeType) {
return refVal.Convert(timeType).Interface(), true
}
return nil, false
}
// underlyingUUIDType gets the underlying type that can be converted to [16]byte
func underlyingUUIDType(val any) (any, bool) {
refVal := reflect.ValueOf(val)
switch refVal.Kind() {
case reflect.Ptr:
if refVal.IsNil() {
return time.Time{}, false
}
convVal := refVal.Elem().Interface()
return convVal, true
}
uuidType := reflect.TypeOf([16]byte{})
if refVal.Type().ConvertibleTo(uuidType) {
return refVal.Convert(uuidType).Interface(), true
}
return nil, false
}
// underlyingSliceType gets the underlying slice type
func underlyingSliceType(val any) (any, bool) {
refVal := reflect.ValueOf(val)
switch refVal.Kind() {
case reflect.Ptr:
if refVal.IsNil() {
return nil, false
}
convVal := refVal.Elem().Interface()
return convVal, true
case reflect.Slice:
baseSliceType := reflect.SliceOf(refVal.Type().Elem())
if refVal.Type().ConvertibleTo(baseSliceType) {
convVal := refVal.Convert(baseSliceType)
return convVal.Interface(), reflect.TypeOf(convVal.Interface()) != refVal.Type()
}
}
return nil, false
}
func int64AssignTo(srcVal int64, srcValid bool, dst any) error {
if srcValid {
switch v := dst.(type) {
case *int:
if srcVal < int64(minInt) {
return fmt.Errorf("%d is less than minimum value for int", srcVal)
} else if srcVal > int64(maxInt) {
return fmt.Errorf("%d is greater than maximum value for int", srcVal)
}
*v = int(srcVal)
case *int8:
if srcVal < math.MinInt8 {
return fmt.Errorf("%d is less than minimum value for int8", srcVal)
} else if srcVal > math.MaxInt8 {
return fmt.Errorf("%d is greater than maximum value for int8", srcVal)
}
*v = int8(srcVal)
case *int16:
if srcVal < math.MinInt16 {
return fmt.Errorf("%d is less than minimum value for int16", srcVal)
} else if srcVal > math.MaxInt16 {
return fmt.Errorf("%d is greater than maximum value for int16", srcVal)
}
*v = int16(srcVal)
case *int32:
if srcVal < math.MinInt32 {
return fmt.Errorf("%d is less than minimum value for int32", srcVal)
} else if srcVal > math.MaxInt32 {
return fmt.Errorf("%d is greater than maximum value for int32", srcVal)
}
*v = int32(srcVal)
case *int64:
if srcVal < math.MinInt64 {
return fmt.Errorf("%d is less than minimum value for int64", srcVal)
} else if srcVal > math.MaxInt64 {
return fmt.Errorf("%d is greater than maximum value for int64", srcVal)
}
*v = int64(srcVal)
case *uint:
if srcVal < 0 {
return fmt.Errorf("%d is less than zero for uint", srcVal)
} else if uint64(srcVal) > uint64(maxUint) {
return fmt.Errorf("%d is greater than maximum value for uint", srcVal)
}
*v = uint(srcVal)
case *uint8:
if srcVal < 0 {
return fmt.Errorf("%d is less than zero for uint8", srcVal)
} else if srcVal > math.MaxUint8 {
return fmt.Errorf("%d is greater than maximum value for uint8", srcVal)
}
*v = uint8(srcVal)
case *uint16:
if srcVal < 0 {
return fmt.Errorf("%d is less than zero for uint16", srcVal)
} else if srcVal > math.MaxUint16 {
return fmt.Errorf("%d is greater than maximum value for uint16", srcVal)
}
*v = uint16(srcVal)
case *uint32:
if srcVal < 0 {
return fmt.Errorf("%d is less than zero for uint32", srcVal)
} else if srcVal > math.MaxUint32 {
return fmt.Errorf("%d is greater than maximum value for uint32", srcVal)
}
*v = uint32(srcVal)
case *uint64:
if srcVal < 0 {
return fmt.Errorf("%d is less than zero for uint64", srcVal)
}
*v = uint64(srcVal)
case sql.Scanner:
return v.Scan(srcVal)
default:
if v := reflect.ValueOf(dst); v.Kind() == reflect.Ptr {
el := v.Elem()
switch el.Kind() {
// if dst is a pointer to pointer, strip the pointer and try again
case reflect.Ptr:
if el.IsNil() {
// allocate destination
el.Set(reflect.New(el.Type().Elem()))
}
return int64AssignTo(srcVal, srcValid, el.Interface())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if el.OverflowInt(int64(srcVal)) {
return fmt.Errorf("cannot put %d into %T", srcVal, dst)
}
el.SetInt(int64(srcVal))
return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if srcVal < 0 {
return fmt.Errorf("%d is less than zero for %T", srcVal, dst)
}
if el.OverflowUint(uint64(srcVal)) {
return fmt.Errorf("cannot put %d into %T", srcVal, dst)
}
el.SetUint(uint64(srcVal))
return nil
}
}
return fmt.Errorf("cannot assign %v into %T", srcVal, dst)
}
return nil
}
// if dst is a pointer to pointer and srcStatus is not Valid, nil it out
if v := reflect.ValueOf(dst); v.Kind() == reflect.Ptr {
el := v.Elem()
if el.Kind() == reflect.Ptr {
el.Set(reflect.Zero(el.Type()))
return nil
}
}
return fmt.Errorf("cannot assign %v %v into %T", srcVal, srcValid, dst)
}
func float64AssignTo(srcVal float64, srcValid bool, dst any) error {
if srcValid {
switch v := dst.(type) {
case *float32:
*v = float32(srcVal)
case *float64:
*v = srcVal
default:
if v := reflect.ValueOf(dst); v.Kind() == reflect.Ptr {
el := v.Elem()
switch el.Kind() {
// if dst is a type alias of a float32 or 64, set dst val
case reflect.Float32, reflect.Float64:
el.SetFloat(srcVal)
return nil
// if dst is a pointer to pointer, strip the pointer and try again
case reflect.Ptr:
if el.IsNil() {
// allocate destination
el.Set(reflect.New(el.Type().Elem()))
}
return float64AssignTo(srcVal, srcValid, el.Interface())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
i64 := int64(srcVal)
if float64(i64) == srcVal {
return int64AssignTo(i64, srcValid, dst)
}
}
}
return fmt.Errorf("cannot assign %v into %T", srcVal, dst)
}
return nil
}
// if dst is a pointer to pointer and srcStatus is not Valid, nil it out
if v := reflect.ValueOf(dst); v.Kind() == reflect.Ptr {
el := v.Elem()
if el.Kind() == reflect.Ptr {
el.Set(reflect.Zero(el.Type()))
return nil
}
}
return fmt.Errorf("cannot assign %v %v into %T", srcVal, srcValid, dst)
}
func NullAssignTo(dst any) error { func NullAssignTo(dst any) error {
dstPtr := reflect.ValueOf(dst) dstPtr := reflect.ValueOf(dst)

View file

@ -121,8 +121,15 @@ func (encodePlanHstoreCodecText) Encode(value any, buf []byte) (newBuf []byte, e
return nil, err return nil, err
} }
if hstore == nil { if len(hstore) == 0 {
return nil, nil // distinguish between empty and nil: Not strictly required by Postgres, since its protocol
// explicitly marks NULL column values separately. However, the Binary codec does this, and
// this means we can "round trip" Encode and Scan without data loss.
// nil: []byte(nil); empty: []byte{}
if hstore == nil {
return nil, nil
}
return []byte{}, nil
} }
firstPair := true firstPair := true
@ -131,7 +138,7 @@ func (encodePlanHstoreCodecText) Encode(value any, buf []byte) (newBuf []byte, e
if firstPair { if firstPair {
firstPair = false firstPair = false
} else { } else {
buf = append(buf, ',') buf = append(buf, ',', ' ')
} }
// unconditionally quote hstore keys/values like Postgres does // unconditionally quote hstore keys/values like Postgres does

View file

@ -25,6 +25,13 @@ func (c JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) Encod
case []byte: case []byte:
return encodePlanJSONCodecEitherFormatByteSlice{} return encodePlanJSONCodecEitherFormatByteSlice{}
// Must come before trying wrap encode plans because a pointer to a struct may be unwrapped to a struct that can be
// marshalled.
//
// https://github.com/jackc/pgx/issues/1681
case json.Marshaler:
return encodePlanJSONCodecEitherFormatMarshal{}
// Cannot rely on driver.Valuer being handled later because anything can be marshalled. // Cannot rely on driver.Valuer being handled later because anything can be marshalled.
// //
// https://github.com/jackc/pgx/issues/1430 // https://github.com/jackc/pgx/issues/1430

View file

@ -33,23 +33,6 @@ var big10 *big.Int = big.NewInt(10)
var big100 *big.Int = big.NewInt(100) var big100 *big.Int = big.NewInt(100)
var big1000 *big.Int = big.NewInt(1000) var big1000 *big.Int = big.NewInt(1000)
var bigMaxInt8 *big.Int = big.NewInt(math.MaxInt8)
var bigMinInt8 *big.Int = big.NewInt(math.MinInt8)
var bigMaxInt16 *big.Int = big.NewInt(math.MaxInt16)
var bigMinInt16 *big.Int = big.NewInt(math.MinInt16)
var bigMaxInt32 *big.Int = big.NewInt(math.MaxInt32)
var bigMinInt32 *big.Int = big.NewInt(math.MinInt32)
var bigMaxInt64 *big.Int = big.NewInt(math.MaxInt64)
var bigMinInt64 *big.Int = big.NewInt(math.MinInt64)
var bigMaxInt *big.Int = big.NewInt(int64(maxInt))
var bigMinInt *big.Int = big.NewInt(int64(minInt))
var bigMaxUint8 *big.Int = big.NewInt(math.MaxUint8)
var bigMaxUint16 *big.Int = big.NewInt(math.MaxUint16)
var bigMaxUint32 *big.Int = big.NewInt(math.MaxUint32)
var bigMaxUint64 *big.Int = (&big.Int{}).SetUint64(uint64(math.MaxUint64))
var bigMaxUint *big.Int = (&big.Int{}).SetUint64(uint64(maxUint))
var bigNBase *big.Int = big.NewInt(nbase) var bigNBase *big.Int = big.NewInt(nbase)
var bigNBaseX2 *big.Int = big.NewInt(nbase * nbase) var bigNBaseX2 *big.Int = big.NewInt(nbase * nbase)
var bigNBaseX3 *big.Int = big.NewInt(nbase * nbase * nbase) var bigNBaseX3 *big.Int = big.NewInt(nbase * nbase * nbase)
@ -241,11 +224,11 @@ func (n Numeric) MarshalJSON() ([]byte, error) {
} }
func (n *Numeric) UnmarshalJSON(src []byte) error { func (n *Numeric) UnmarshalJSON(src []byte) error {
if bytes.Compare(src, []byte(`null`)) == 0 { if bytes.Equal(src, []byte(`null`)) {
*n = Numeric{} *n = Numeric{}
return nil return nil
} }
if bytes.Compare(src, []byte(`"NaN"`)) == 0 { if bytes.Equal(src, []byte(`"NaN"`)) {
*n = Numeric{NaN: true, Valid: true} *n = Numeric{NaN: true, Valid: true}
return nil return nil
} }

View file

@ -1140,25 +1140,6 @@ func (m *Map) Scan(oid uint32, formatCode int16, src []byte, dst any) error {
return plan.Scan(src, dst) return plan.Scan(src, dst)
} }
func scanUnknownType(oid uint32, formatCode int16, buf []byte, dest any) error {
switch dest := dest.(type) {
case *string:
if formatCode == BinaryFormatCode {
return fmt.Errorf("unknown oid %d in binary format cannot be scanned into %T", oid, dest)
}
*dest = string(buf)
return nil
case *[]byte:
*dest = buf
return nil
default:
if nextDst, retry := GetAssignToDstType(dest); retry {
return scanUnknownType(oid, formatCode, buf, nextDst)
}
return fmt.Errorf("unknown oid %d cannot be scanned into %T", oid, dest)
}
}
var ErrScanTargetTypeChanged = errors.New("scan target type changed") var ErrScanTargetTypeChanged = errors.New("scan target type changed")
func codecScan(codec Codec, m *Map, oid uint32, format int16, src []byte, dst any) error { func codecScan(codec Codec, m *Map, oid uint32, format int16, src []byte, dst any) error {

View file

@ -40,7 +40,7 @@ func (p Point) PointValue() (Point, error) {
} }
func parsePoint(src []byte) (*Point, error) { func parsePoint(src []byte) (*Point, error) {
if src == nil || bytes.Compare(src, []byte("null")) == 0 { if src == nil || bytes.Equal(src, []byte("null")) {
return &Point{}, nil return &Point{}, nil
} }

View file

@ -97,7 +97,7 @@ func (src UUID) MarshalJSON() ([]byte, error) {
} }
func (dst *UUID) UnmarshalJSON(src []byte) error { func (dst *UUID) UnmarshalJSON(src []byte) error {
if bytes.Compare(src, []byte("null")) == 0 { if bytes.Equal(src, []byte("null")) {
*dst = UUID{} *dst = UUID{}
return nil return nil
} }

View file

@ -231,7 +231,11 @@ func (rows *baseRows) Scan(dest ...any) error {
if len(dest) == 1 { if len(dest) == 1 {
if rc, ok := dest[0].(RowScanner); ok { if rc, ok := dest[0].(RowScanner); ok {
return rc.ScanRow(rows) err := rc.ScanRow(rows)
if err != nil {
rows.fatal(err)
}
return err
} }
} }

2
vendor/modules.txt vendored
View file

@ -334,7 +334,7 @@ github.com/jackc/pgproto3/v2
# github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a # github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a
## explicit; go 1.14 ## explicit; go 1.14
github.com/jackc/pgservicefile github.com/jackc/pgservicefile
# github.com/jackc/pgx/v5 v5.4.1 # github.com/jackc/pgx/v5 v5.4.2
## explicit; go 1.19 ## explicit; go 1.19
github.com/jackc/pgx/v5 github.com/jackc/pgx/v5
github.com/jackc/pgx/v5/internal/anynil github.com/jackc/pgx/v5/internal/anynil