Vendor Update (#16121)

* update github.com/PuerkitoBio/goquery

* update github.com/alecthomas/chroma

* update github.com/blevesearch/bleve/v2

* update github.com/caddyserver/certmagic

* update github.com/go-enry/go-enry/v2

* update github.com/go-git/go-billy/v5

* update github.com/go-git/go-git/v5

* update github.com/go-redis/redis/v8

* update github.com/go-testfixtures/testfixtures/v3

* update github.com/jaytaylor/html2text

* update github.com/json-iterator/go

* update github.com/klauspost/compress

* update github.com/markbates/goth

* update github.com/mattn/go-isatty

* update github.com/mholt/archiver/v3

* update github.com/microcosm-cc/bluemonday

* update github.com/minio/minio-go/v7

* update github.com/prometheus/client_golang

* update github.com/unrolled/render

* update github.com/xanzy/go-gitlab

* update github.com/yuin/goldmark

* update github.com/yuin/goldmark-highlighting

Co-authored-by: techknowlogick <techknowlogick@gitea.io>
This commit is contained in:
6543 2021-06-10 16:44:25 +02:00 committed by GitHub
parent f088dc4ea1
commit 86e2789960
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
819 changed files with 38072 additions and 34969 deletions

View file

@ -3,6 +3,24 @@
> :heart:
> [**Uptrace.dev** - All-in-one tool to optimize performance and monitor errors & logs](https://uptrace.dev)
## v8.10
- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
decision:
- Traces become smaller and less noisy.
- It may be costly to process those 3 extra spans for each query.
- go-redis no longer depends on OpenTelemetry.
Eventually we hope to replace the information that we no longer collect with OpenTelemetry
Metrics.
## v8.9
- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
`WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
## v8.8
- To make updating easier, extra modules now have the same version as go-redis does. That means that

View file

@ -138,7 +138,7 @@ res, err := rdb.Do(ctx, "set", "key", "value").Result()
go-redis will start a redis-server and run the test cases.
The paths of redis-server bin file and redis config file are definded in `main_test.go`:
The paths of redis-server bin file and redis config file are defined in `main_test.go`:
```
var (
@ -163,7 +163,5 @@ go test
## See also
- [Fast and flexible HTTP router](https://github.com/vmihailenco/treemux)
- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
- [Golang msgpack](https://github.com/vmihailenco/msgpack)
- [Golang message task queue](https://github.com/vmihailenco/taskq)
- [Fast and flexible ORM](https://github.com/uptrace/bun)
- [msgpack for Go](https://github.com/vmihailenco/msgpack)

View file

@ -2,6 +2,7 @@ package redis
import (
"context"
"sync"
"sync/atomic"
)
@ -23,3 +24,76 @@ func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
cmd.val = size
return cmd
}
func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
cmd := NewStringCmd(ctx, "script", "load", script)
mu := &sync.Mutex{}
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
val, err := shard.ScriptLoad(ctx, script).Result()
if err != nil {
return err
}
mu.Lock()
if cmd.Val() == "" {
cmd.val = val
}
mu.Unlock()
return nil
})
if err != nil {
cmd.SetErr(err)
}
return cmd
}
func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
cmd := NewStatusCmd(ctx, "script", "flush")
_ = c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
shard.ScriptFlush(ctx)
return nil
})
return cmd
}
func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
args := make([]interface{}, 2+len(hashes))
args[0] = "script"
args[1] = "exists"
for i, hash := range hashes {
args[2+i] = hash
}
cmd := NewBoolSliceCmd(ctx, args...)
result := make([]bool, len(hashes))
for i := range result {
result[i] = true
}
mu := &sync.Mutex{}
err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
val, err := shard.ScriptExists(ctx, hashes...).Result()
if err != nil {
return err
}
mu.Lock()
for i, v := range val {
result[i] = result[i] && v
}
mu.Unlock()
return nil
})
if err != nil {
cmd.SetErr(err)
}
cmd.val = result
return cmd
}

View file

@ -1512,7 +1512,7 @@ type XInfoConsumer struct {
Idle int64
}
var _ Cmder = (*XInfoGroupsCmd)(nil)
var _ Cmder = (*XInfoConsumersCmd)(nil)
func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
return &XInfoConsumersCmd{
@ -1769,8 +1769,14 @@ func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
info.LastGeneratedID, err = rd.ReadString()
case "first-entry":
info.FirstEntry, err = readXMessage(rd)
if err == Nil {
err = nil
}
case "last-entry":
info.LastEntry, err = readXMessage(rd)
if err == Nil {
err = nil
}
default:
return nil, fmt.Errorf("redis: unexpected content %s "+
"in XINFO STREAM reply", key)
@ -1784,6 +1790,302 @@ func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
//------------------------------------------------------------------------------
type XInfoStreamFullCmd struct {
baseCmd
val *XInfoStreamFull
}
type XInfoStreamFull struct {
Length int64
RadixTreeKeys int64
RadixTreeNodes int64
LastGeneratedID string
Entries []XMessage
Groups []XInfoStreamGroup
}
type XInfoStreamGroup struct {
Name string
LastDeliveredID string
PelCount int64
Pending []XInfoStreamGroupPending
Consumers []XInfoStreamConsumer
}
type XInfoStreamGroupPending struct {
ID string
Consumer string
DeliveryTime time.Time
DeliveryCount int64
}
type XInfoStreamConsumer struct {
Name string
SeenTime time.Time
PelCount int64
Pending []XInfoStreamConsumerPending
}
type XInfoStreamConsumerPending struct {
ID string
DeliveryTime time.Time
DeliveryCount int64
}
var _ Cmder = (*XInfoStreamFullCmd)(nil)
func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
return &XInfoStreamFullCmd{
baseCmd: baseCmd{
ctx: ctx,
args: args,
},
}
}
func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
return cmd.val
}
func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
return cmd.val, cmd.err
}
func (cmd *XInfoStreamFullCmd) String() string {
return cmdString(cmd, cmd.val)
}
func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
n, err := rd.ReadArrayLen()
if err != nil {
return err
}
if n != 12 {
return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
"wanted 12", n)
}
cmd.val = &XInfoStreamFull{}
for i := 0; i < 6; i++ {
key, err := rd.ReadString()
if err != nil {
return err
}
switch key {
case "length":
cmd.val.Length, err = rd.ReadIntReply()
case "radix-tree-keys":
cmd.val.RadixTreeKeys, err = rd.ReadIntReply()
case "radix-tree-nodes":
cmd.val.RadixTreeNodes, err = rd.ReadIntReply()
case "last-generated-id":
cmd.val.LastGeneratedID, err = rd.ReadString()
case "entries":
cmd.val.Entries, err = readXMessageSlice(rd)
case "groups":
cmd.val.Groups, err = readStreamGroups(rd)
default:
return fmt.Errorf("redis: unexpected content %s "+
"in XINFO STREAM reply", key)
}
if err != nil {
return err
}
}
return nil
}
func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
n, err := rd.ReadArrayLen()
if err != nil {
return nil, err
}
groups := make([]XInfoStreamGroup, 0, n)
for i := 0; i < n; i++ {
nn, err := rd.ReadArrayLen()
if err != nil {
return nil, err
}
if nn != 10 {
return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
"wanted 10", nn)
}
group := XInfoStreamGroup{}
for f := 0; f < 5; f++ {
key, err := rd.ReadString()
if err != nil {
return nil, err
}
switch key {
case "name":
group.Name, err = rd.ReadString()
case "last-delivered-id":
group.LastDeliveredID, err = rd.ReadString()
case "pel-count":
group.PelCount, err = rd.ReadIntReply()
case "pending":
group.Pending, err = readXInfoStreamGroupPending(rd)
case "consumers":
group.Consumers, err = readXInfoStreamConsumers(rd)
default:
return nil, fmt.Errorf("redis: unexpected content %s "+
"in XINFO STREAM reply", key)
}
if err != nil {
return nil, err
}
}
groups = append(groups, group)
}
return groups, nil
}
func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
n, err := rd.ReadArrayLen()
if err != nil {
return nil, err
}
pending := make([]XInfoStreamGroupPending, 0, n)
for i := 0; i < n; i++ {
nn, err := rd.ReadArrayLen()
if err != nil {
return nil, err
}
if nn != 4 {
return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
"wanted 4", nn)
}
p := XInfoStreamGroupPending{}
p.ID, err = rd.ReadString()
if err != nil {
return nil, err
}
p.Consumer, err = rd.ReadString()
if err != nil {
return nil, err
}
delivery, err := rd.ReadIntReply()
if err != nil {
return nil, err
}
p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
p.DeliveryCount, err = rd.ReadIntReply()
if err != nil {
return nil, err
}
pending = append(pending, p)
}
return pending, nil
}
func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
n, err := rd.ReadArrayLen()
if err != nil {
return nil, err
}
consumers := make([]XInfoStreamConsumer, 0, n)
for i := 0; i < n; i++ {
nn, err := rd.ReadArrayLen()
if err != nil {
return nil, err
}
if nn != 8 {
return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
"wanted 8", nn)
}
c := XInfoStreamConsumer{}
for f := 0; f < 4; f++ {
cKey, err := rd.ReadString()
if err != nil {
return nil, err
}
switch cKey {
case "name":
c.Name, err = rd.ReadString()
case "seen-time":
seen, err := rd.ReadIntReply()
if err != nil {
return nil, err
}
c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond))
case "pel-count":
c.PelCount, err = rd.ReadIntReply()
case "pending":
pendingNumber, err := rd.ReadArrayLen()
if err != nil {
return nil, err
}
c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
for pn := 0; pn < pendingNumber; pn++ {
nn, err := rd.ReadArrayLen()
if err != nil {
return nil, err
}
if nn != 3 {
return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
"wanted 3", nn)
}
p := XInfoStreamConsumerPending{}
p.ID, err = rd.ReadString()
if err != nil {
return nil, err
}
delivery, err := rd.ReadIntReply()
if err != nil {
return nil, err
}
p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
p.DeliveryCount, err = rd.ReadIntReply()
if err != nil {
return nil, err
}
c.Pending = append(c.Pending, p)
}
default:
return nil, fmt.Errorf("redis: unexpected content %s "+
"in XINFO STREAM reply", cKey)
}
if err != nil {
return nil, err
}
}
consumers = append(consumers, c)
}
return consumers, nil
}
//------------------------------------------------------------------------------
type ZSliceCmd struct {
baseCmd

View file

@ -179,6 +179,7 @@ type Cmdable interface {
LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
LLen(ctx context.Context, key string) *IntCmd
LPop(ctx context.Context, key string) *StringCmd
LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
@ -200,6 +201,7 @@ type Cmdable interface {
SInter(ctx context.Context, keys ...string) *StringSliceCmd
SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
SMembers(ctx context.Context, key string) *StringSliceCmd
SMembersMap(ctx context.Context, key string) *StringStructMapCmd
SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
@ -252,6 +254,8 @@ type Cmdable interface {
ZCount(ctx context.Context, key, min, max string) *IntCmd
ZLexCount(ctx context.Context, key, min, max string) *IntCmd
ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
@ -275,6 +279,9 @@ type Cmdable interface {
ZScore(ctx context.Context, key, member string) *FloatCmd
ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd
ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
PFCount(ctx context.Context, keys ...string) *IntCmd
@ -1313,6 +1320,12 @@ func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
return cmd
}
func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
cmd := NewStringSliceCmd(ctx, "lpop", key, count)
_ = c(ctx, cmd)
return cmd
}
type LPosArgs struct {
Rank, MaxLen int64
}
@ -1508,6 +1521,17 @@ func (c cmdable) SIsMember(ctx context.Context, key string, member interface{})
return cmd
}
// Redis `SMISMEMBER key member [member ...]` command.
func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
args := make([]interface{}, 2, 2+len(members))
args[0] = "smismember"
args[1] = key
args = appendArgs(args, members)
cmd := NewBoolSliceCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
// Redis `SMEMBERS key` command output as a slice.
func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
cmd := NewStringSliceCmd(ctx, "smembers", key)
@ -1752,7 +1776,7 @@ func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSlic
args := make([]interface{}, 0, 8+len(a.Streams))
args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
keyPos := int8(1)
keyPos := int8(4)
if a.Count > 0 {
args = append(args, "count", a.Count)
keyPos += 2
@ -1799,6 +1823,7 @@ func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCm
type XPendingExtArgs struct {
Stream string
Group string
Idle time.Duration
Start string
End string
Count int64
@ -1806,8 +1831,12 @@ type XPendingExtArgs struct {
}
func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
args := make([]interface{}, 0, 7)
args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count)
args := make([]interface{}, 0, 9)
args = append(args, "xpending", a.Stream, a.Group)
if a.Idle != 0 {
args = append(args, "idle", formatMs(ctx, a.Idle))
}
args = append(args, a.Start, a.End, a.Count)
if a.Consumer != "" {
args = append(args, a.Consumer)
}
@ -1882,6 +1911,19 @@ func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
return cmd
}
// XInfoStreamFull XINFO STREAM FULL [COUNT count]
// redis-server >= 6.0.
func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
args := make([]interface{}, 0, 6)
args = append(args, "xinfo", "stream", key, "full")
if count > 0 {
args = append(args, "count", count)
}
cmd := NewXInfoStreamFullCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
//------------------------------------------------------------------------------
// Z represents sorted set member.
@ -1904,6 +1946,17 @@ type ZStore struct {
Aggregate string
}
func (z *ZStore) len() (n int) {
n = len(z.Keys)
if len(z.Weights) > 0 {
n += 1 + len(z.Weights)
}
if z.Aggregate != "" {
n += 2
}
return n
}
// Redis `BZPOPMAX key [key ...] timeout` command.
func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
args := make([]interface{}, 1+len(keys)+1)
@ -2049,7 +2102,7 @@ func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, mem
}
func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
args := make([]interface{}, 0, 3+len(store.Keys))
args := make([]interface{}, 0, 3+store.len())
args = append(args, "zinterstore", destination, len(store.Keys))
for _, key := range store.Keys {
args = append(args, key)
@ -2069,6 +2122,50 @@ func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZSt
return cmd
}
func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
args := make([]interface{}, 0, 2+store.len())
args = append(args, "zinter", len(store.Keys))
for _, key := range store.Keys {
args = append(args, key)
}
if len(store.Weights) > 0 {
args = append(args, "weights")
for _, weights := range store.Weights {
args = append(args, weights)
}
}
if store.Aggregate != "" {
args = append(args, "aggregate", store.Aggregate)
}
cmd := NewStringSliceCmd(ctx, args...)
cmd.setFirstKeyPos(2)
_ = c(ctx, cmd)
return cmd
}
func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
args := make([]interface{}, 0, 3+store.len())
args = append(args, "zinter", len(store.Keys))
for _, key := range store.Keys {
args = append(args, key)
}
if len(store.Weights) > 0 {
args = append(args, "weights")
for _, weights := range store.Weights {
args = append(args, weights)
}
}
if store.Aggregate != "" {
args = append(args, "aggregate", store.Aggregate)
}
args = append(args, "withscores")
cmd := NewZSliceCmd(ctx, args...)
cmd.setFirstKeyPos(2)
_ = c(ctx, cmd)
return cmd
}
func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
args := make([]interface{}, 2+len(members))
args[0] = "zmscore"
@ -2295,7 +2392,7 @@ func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
}
func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
args := make([]interface{}, 0, 3+len(store.Keys))
args := make([]interface{}, 0, 3+store.len())
args = append(args, "zunionstore", dest, len(store.Keys))
for _, key := range store.Keys {
args = append(args, key)
@ -2331,6 +2428,49 @@ func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withSco
return cmd
}
// redis-server version >= 6.2.0.
func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
args := make([]interface{}, 2+len(keys))
args[0] = "zdiff"
args[1] = len(keys)
for i, key := range keys {
args[i+2] = key
}
cmd := NewStringSliceCmd(ctx, args...)
cmd.setFirstKeyPos(2)
_ = c(ctx, cmd)
return cmd
}
// redis-server version >= 6.2.0.
func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
args := make([]interface{}, 3+len(keys))
args[0] = "zdiff"
args[1] = len(keys)
for i, key := range keys {
args[i+2] = key
}
args[len(keys)+2] = "withscores"
cmd := NewZSliceCmd(ctx, args...)
cmd.setFirstKeyPos(2)
_ = c(ctx, cmd)
return cmd
}
// redis-server version >=6.2.0.
func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
args := make([]interface{}, 0, 3+len(keys))
args = append(args, "zdiffstore", destination, len(keys))
for _, key := range keys {
args = append(args, key)
}
cmd := NewIntCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
//------------------------------------------------------------------------------
func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
@ -2590,6 +2730,7 @@ func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *I
args = append(args, "SAMPLES", samples[0])
}
cmd := NewIntCmd(ctx, args...)
cmd.setFirstKeyPos(2)
_ = c(ctx, cmd)
return cmd
}
@ -2606,6 +2747,7 @@ func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ..
}
cmdArgs = appendArgs(cmdArgs, args)
cmd := NewCmd(ctx, cmdArgs...)
cmd.setFirstKeyPos(3)
_ = c(ctx, cmd)
return cmd
}
@ -2620,6 +2762,7 @@ func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args .
}
cmdArgs = appendArgs(cmdArgs, args)
cmd := NewCmd(ctx, cmdArgs...)
cmd.setFirstKeyPos(3)
_ = c(ctx, cmd)
return cmd
}

View file

@ -10,6 +10,7 @@ import (
"github.com/go-redis/redis/v8/internal/proto"
)
// ErrClosed performs any operation on the closed client will return this error.
var ErrClosed = pool.ErrClosed
type Error interface {

View file

@ -7,7 +7,5 @@ require (
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
github.com/onsi/ginkgo v1.15.0
github.com/onsi/gomega v1.10.5
go.opentelemetry.io/otel v0.19.0
go.opentelemetry.io/otel/metric v0.19.0
go.opentelemetry.io/otel/trace v0.19.0
go.opentelemetry.io/otel/metric v0.20.0
)

View file

@ -37,14 +37,14 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/otel v0.19.0 h1:Lenfy7QHRXPZVsw/12CWpxX6d/JkrX8wrx2vO8G80Ng=
go.opentelemetry.io/otel v0.19.0/go.mod h1:j9bF567N9EfomkSidSfmMwIwIBuP37AMAIzVW85OxSg=
go.opentelemetry.io/otel/metric v0.19.0 h1:dtZ1Ju44gkJkYvo+3qGqVXmf88tc+a42edOywypengg=
go.opentelemetry.io/otel/metric v0.19.0/go.mod h1:8f9fglJPRnXuskQmKpnad31lcLJ2VmNNqIsx/uIwBSc=
go.opentelemetry.io/otel/oteltest v0.19.0 h1:YVfA0ByROYqTwOxqHVZYZExzEpfZor+MU1rU+ip2v9Q=
go.opentelemetry.io/otel/oteltest v0.19.0/go.mod h1:tI4yxwh8U21v7JD6R3BcA/2+RBoTKFexE/PJ/nSO7IA=
go.opentelemetry.io/otel/trace v0.19.0 h1:1ucYlenXIDA1OlHVLDZKX0ObXV5RLaq06DtUKz5e5zc=
go.opentelemetry.io/otel/trace v0.19.0/go.mod h1:4IXiNextNOpPnRlI4ryK69mn5iC84bjBWZQA5DXz/qg=
go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=

View file

@ -60,7 +60,7 @@ func RandomSlot() int {
return rand.Intn(slotNumber)
}
// hashSlot returns a consistent slot number between 0 and 16383
// Slot returns a consistent slot number between 0 and 16383
// for any given string key.
func Slot(key string) int {
if key == "" {

View file

@ -19,6 +19,8 @@ func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
_ = l.log.Output(2, fmt.Sprintf(format, v...))
}
// Logger calls Output to print to the stderr.
// Arguments are handled in the manner of fmt.Print.
var Logger Logging = &logger{
log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
}

View file

@ -65,26 +65,17 @@ func (cn *Conn) RemoteAddr() net.Addr {
}
func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
ctx, span := internal.StartSpan(ctx, "redis.with_reader")
defer span.End()
if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
return internal.RecordError(ctx, span, err)
return err
}
if err := fn(cn.rd); err != nil {
return internal.RecordError(ctx, span, err)
}
return nil
return fn(cn.rd)
}
func (cn *Conn) WithWriter(
ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
) error {
ctx, span := internal.StartSpan(ctx, "redis.with_writer")
defer span.End()
if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
return internal.RecordError(ctx, span, err)
return err
}
if cn.bw.Buffered() > 0 {
@ -92,11 +83,11 @@ func (cn *Conn) WithWriter(
}
if err := fn(cn.wr); err != nil {
return internal.RecordError(ctx, span, err)
return err
}
if err := cn.bw.Flush(); err != nil {
return internal.RecordError(ctx, span, err)
return err
}
internal.WritesCounter.Add(ctx, 1)

View file

@ -12,7 +12,10 @@ import (
)
var (
ErrClosed = errors.New("redis: client is closed")
// ErrClosed performs any operation on the closed client will return this error.
ErrClosed = errors.New("redis: client is closed")
// ErrPoolTimeout timed out waiting to get a connection from the connection pool.
ErrPoolTimeout = errors.New("redis: connection pool timeout")
)

View file

@ -8,6 +8,7 @@ import (
"github.com/go-redis/redis/v8/internal/util"
)
// redis resp protocol data type.
const (
ErrorReply = '-'
StatusReply = '+'

View file

@ -4,16 +4,10 @@ import (
"context"
"time"
"github.com/go-redis/redis/v8/internal/proto"
"github.com/go-redis/redis/v8/internal/util"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
)
func Sleep(ctx context.Context, dur time.Duration) error {
_, span := StartSpan(ctx, "time.Sleep")
defer span.End()
t := time.NewTimer(dur)
defer t.Stop()
@ -50,21 +44,3 @@ func isLower(s string) bool {
}
return true
}
//------------------------------------------------------------------------------
var tracer = otel.Tracer("github.com/go-redis/redis")
func StartSpan(ctx context.Context, name string) (context.Context, trace.Span) {
if span := trace.SpanFromContext(ctx); !span.IsRecording() {
return ctx, span
}
return tracer.Start(ctx, name)
}
func RecordError(ctx context.Context, span trace.Span, err error) error {
if err != proto.Nil {
span.RecordError(err)
}
return err
}

View file

@ -12,9 +12,7 @@ import (
"strings"
"time"
"github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/pool"
"go.opentelemetry.io/otel/attribute"
)
// Limiter is the interface of a rate limiter or a circuit breaker.
@ -291,21 +289,7 @@ func getUserPassword(u *url.URL) (string, string) {
func newConnPool(opt *Options) *pool.ConnPool {
return pool.NewConnPool(&pool.Options{
Dialer: func(ctx context.Context) (net.Conn, error) {
ctx, span := internal.StartSpan(ctx, "redis.dial")
defer span.End()
if span.IsRecording() {
span.SetAttributes(
attribute.String("db.connection_string", opt.Addr),
)
}
cn, err := opt.Dialer(ctx, opt.Network, opt.Addr)
if err != nil {
return nil, internal.RecordError(ctx, span, err)
}
return cn, nil
return opt.Dialer(ctx, opt.Network, opt.Addr)
},
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,

View file

@ -2,7 +2,6 @@ package redis
import (
"context"
"errors"
"fmt"
"strings"
"sync"
@ -13,13 +12,6 @@ import (
"github.com/go-redis/redis/v8/internal/proto"
)
const (
pingTimeout = time.Second
chanSendTimeout = time.Minute
)
var errPingTimeout = errors.New("redis: ping timeout")
// PubSub implements Pub/Sub commands as described in
// http://redis.io/topics/pubsub. Message receiving is NOT safe
// for concurrent use by multiple goroutines.
@ -43,9 +35,12 @@ type PubSub struct {
cmd *Cmd
chOnce sync.Once
msgCh chan *Message
allCh chan interface{}
ping chan struct{}
msgCh *channel
allCh *channel
}
func (c *PubSub) init() {
c.exit = make(chan struct{})
}
func (c *PubSub) String() string {
@ -54,10 +49,6 @@ func (c *PubSub) String() string {
return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
}
func (c *PubSub) init() {
c.exit = make(chan struct{})
}
func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
c.mu.Lock()
cn, err := c.conn(ctx, nil)
@ -418,56 +409,6 @@ func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
}
}
// Channel returns a Go channel for concurrently receiving messages.
// The channel is closed together with the PubSub. If the Go channel
// is blocked full for 30 seconds the message is dropped.
// Receive* APIs can not be used after channel is created.
//
// go-redis periodically sends ping messages to test connection health
// and re-subscribes if ping can not not received for 30 seconds.
func (c *PubSub) Channel() <-chan *Message {
return c.ChannelSize(100)
}
// ChannelSize is like Channel, but creates a Go channel
// with specified buffer size.
func (c *PubSub) ChannelSize(size int) <-chan *Message {
c.chOnce.Do(func() {
c.initPing()
c.initMsgChan(size)
})
if c.msgCh == nil {
err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
panic(err)
}
if cap(c.msgCh) != size {
err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
panic(err)
}
return c.msgCh
}
// ChannelWithSubscriptions is like Channel, but message type can be either
// *Subscription or *Message. Subscription messages can be used to detect
// reconnections.
//
// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
func (c *PubSub) ChannelWithSubscriptions(ctx context.Context, size int) <-chan interface{} {
c.chOnce.Do(func() {
c.initPing()
c.initAllChan(size)
})
if c.allCh == nil {
err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
panic(err)
}
if cap(c.allCh) != size {
err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
panic(err)
}
return c.allCh
}
func (c *PubSub) getContext() context.Context {
if c.cmd != nil {
return c.cmd.ctx
@ -475,36 +416,135 @@ func (c *PubSub) getContext() context.Context {
return context.Background()
}
func (c *PubSub) initPing() {
//------------------------------------------------------------------------------
// Channel returns a Go channel for concurrently receiving messages.
// The channel is closed together with the PubSub. If the Go channel
// is blocked full for 30 seconds the message is dropped.
// Receive* APIs can not be used after channel is created.
//
// go-redis periodically sends ping messages to test connection health
// and re-subscribes if ping can not not received for 30 seconds.
func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
c.chOnce.Do(func() {
c.msgCh = newChannel(c, opts...)
c.msgCh.initMsgChan()
})
if c.msgCh == nil {
err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
panic(err)
}
return c.msgCh.msgCh
}
// ChannelSize is like Channel, but creates a Go channel
// with specified buffer size.
//
// Deprecated: use Channel(WithChannelSize(size)), remove in v9.
func (c *PubSub) ChannelSize(size int) <-chan *Message {
return c.Channel(WithChannelSize(size))
}
// ChannelWithSubscriptions is like Channel, but message type can be either
// *Subscription or *Message. Subscription messages can be used to detect
// reconnections.
//
// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
c.chOnce.Do(func() {
c.allCh = newChannel(c, WithChannelSize(size))
c.allCh.initAllChan()
})
if c.allCh == nil {
err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
panic(err)
}
return c.allCh.allCh
}
type ChannelOption func(c *channel)
// WithChannelSize specifies the Go chan size that is used to buffer incoming messages.
//
// The default is 100 messages.
func WithChannelSize(size int) ChannelOption {
return func(c *channel) {
c.chanSize = size
}
}
// WithChannelHealthCheckInterval specifies the health check interval.
// PubSub will ping Redis Server if it does not receive any messages within the interval.
// To disable health check, use zero interval.
//
// The default is 3 seconds.
func WithChannelHealthCheckInterval(d time.Duration) ChannelOption {
return func(c *channel) {
c.checkInterval = d
}
}
// WithChannelSendTimeout specifies the channel send timeout after which
// the message is dropped.
//
// The default is 60 seconds.
func WithChannelSendTimeout(d time.Duration) ChannelOption {
return func(c *channel) {
c.chanSendTimeout = d
}
}
type channel struct {
pubSub *PubSub
msgCh chan *Message
allCh chan interface{}
ping chan struct{}
chanSize int
chanSendTimeout time.Duration
checkInterval time.Duration
}
func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel {
c := &channel{
pubSub: pubSub,
chanSize: 100,
chanSendTimeout: time.Minute,
checkInterval: 3 * time.Second,
}
for _, opt := range opts {
opt(c)
}
if c.checkInterval > 0 {
c.initHealthCheck()
}
return c
}
func (c *channel) initHealthCheck() {
ctx := context.TODO()
c.ping = make(chan struct{}, 1)
go func() {
timer := time.NewTimer(time.Minute)
timer.Stop()
healthy := true
for {
timer.Reset(pingTimeout)
timer.Reset(c.checkInterval)
select {
case <-c.ping:
healthy = true
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
pingErr := c.Ping(ctx)
if healthy {
healthy = false
} else {
if pingErr == nil {
pingErr = errPingTimeout
}
c.mu.Lock()
c.reconnect(ctx, pingErr)
healthy = true
c.mu.Unlock()
if pingErr := c.pubSub.Ping(ctx); pingErr != nil {
c.pubSub.mu.Lock()
c.pubSub.reconnect(ctx, pingErr)
c.pubSub.mu.Unlock()
}
case <-c.exit:
case <-c.pubSub.exit:
return
}
}
@ -512,16 +552,17 @@ func (c *PubSub) initPing() {
}
// initMsgChan must be in sync with initAllChan.
func (c *PubSub) initMsgChan(size int) {
func (c *channel) initMsgChan() {
ctx := context.TODO()
c.msgCh = make(chan *Message, size)
c.msgCh = make(chan *Message, c.chanSize)
go func() {
timer := time.NewTimer(time.Minute)
timer.Stop()
var errCount int
for {
msg, err := c.Receive(ctx)
msg, err := c.pubSub.Receive(ctx)
if err != nil {
if err == pool.ErrClosed {
close(c.msgCh)
@ -548,7 +589,7 @@ func (c *PubSub) initMsgChan(size int) {
case *Pong:
// Ignore.
case *Message:
timer.Reset(chanSendTimeout)
timer.Reset(c.chanSendTimeout)
select {
case c.msgCh <- msg:
if !timer.Stop() {
@ -556,30 +597,28 @@ func (c *PubSub) initMsgChan(size int) {
}
case <-timer.C:
internal.Logger.Printf(
c.getContext(),
"redis: %s channel is full for %s (message is dropped)",
c,
chanSendTimeout,
)
ctx, "redis: %s channel is full for %s (message is dropped)",
c, c.chanSendTimeout)
}
default:
internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
}
}
}()
}
// initAllChan must be in sync with initMsgChan.
func (c *PubSub) initAllChan(size int) {
func (c *channel) initAllChan() {
ctx := context.TODO()
c.allCh = make(chan interface{}, size)
c.allCh = make(chan interface{}, c.chanSize)
go func() {
timer := time.NewTimer(pingTimeout)
timer := time.NewTimer(time.Minute)
timer.Stop()
var errCount int
for {
msg, err := c.Receive(ctx)
msg, err := c.pubSub.Receive(ctx)
if err != nil {
if err == pool.ErrClosed {
close(c.allCh)
@ -601,29 +640,23 @@ func (c *PubSub) initAllChan(size int) {
}
switch msg := msg.(type) {
case *Subscription:
c.sendMessage(msg, timer)
case *Pong:
// Ignore.
case *Message:
c.sendMessage(msg, timer)
case *Subscription, *Message:
timer.Reset(c.chanSendTimeout)
select {
case c.allCh <- msg:
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
internal.Logger.Printf(
ctx, "redis: %s channel is full for %s (message is dropped)",
c, c.chanSendTimeout)
}
default:
internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
}
}
}()
}
func (c *PubSub) sendMessage(msg interface{}, timer *time.Timer) {
timer.Reset(pingTimeout)
select {
case c.allCh <- msg:
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
internal.Logger.Printf(
c.getContext(),
"redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
}
}

View file

@ -10,7 +10,6 @@ import (
"github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/pool"
"github.com/go-redis/redis/v8/internal/proto"
"go.opentelemetry.io/otel/attribute"
)
// Nil reply returned by Redis when key does not exist.
@ -237,9 +236,6 @@ func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
return nil
}
ctx, span := internal.StartSpan(ctx, "redis.init_conn")
defer span.End()
connPool := pool.NewSingleConnPool(c.connPool, cn)
conn := newConn(ctx, c.opt, connPool)
@ -287,20 +283,11 @@ func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error)
func (c *baseClient) withConn(
ctx context.Context, fn func(context.Context, *pool.Conn) error,
) error {
ctx, span := internal.StartSpan(ctx, "redis.with_conn")
defer span.End()
cn, err := c.getConn(ctx)
if err != nil {
return err
}
if span.IsRecording() {
if remoteAddr := cn.RemoteAddr(); remoteAddr != nil {
span.SetAttributes(attribute.String("net.peer.ip", remoteAddr.String()))
}
}
defer func() {
c.releaseConn(ctx, cn, err)
}()

View file

@ -40,8 +40,6 @@ type FailoverOptions struct {
// Now, this option only works in RandomSlaveAddr function.
UseDisconnectedSlaves bool
// Client queries sentinels in a random order
QuerySentinelRandomly bool
// Following options are copied from Options struct.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
@ -221,14 +219,21 @@ func masterSlaveDialer(
failover.trySwitchMaster(ctx, addr)
}
}
if err != nil {
return nil, err
}
if failover.opt.Dialer != nil {
return failover.opt.Dialer(ctx, network, addr)
}
return net.DialTimeout("tcp", addr, failover.opt.DialTimeout)
netDialer := &net.Dialer{
Timeout: failover.opt.DialTimeout,
KeepAlive: 5 * time.Minute,
}
if failover.opt.TLSConfig == nil {
return netDialer.DialContext(ctx, network, addr)
}
return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
}
}