mirror of
https://github.com/aljazceru/kata-containers.git
synced 2025-12-23 17:24:18 +01:00
vendor: update agent to support memory update
Fixes #671 agent Shortlog: 7e8e20b agent: add GetGuestDetails gRPC function 5936600 grpc: grpc.Code is deprecated 2d3b9ac release: Kata Containers 1.3.0-rc0 a6e27d6 client: fix dialer after vendor update cd03e0c vendor: update grpc-go dependency 1d559a7 channel: add serial yamux channel close timeout fcf6fa7 agent: update resources list with the right device major-minor number Signed-off-by: Zichang Lin <linzichang@huawei.com>
This commit is contained in:
10
Gopkg.lock
generated
10
Gopkg.lock
generated
@@ -130,14 +130,14 @@
|
|||||||
revision = "032705ba6aae05a9bf41e296cf89c8529cffb822"
|
revision = "032705ba6aae05a9bf41e296cf89c8529cffb822"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:aec1ed6dbfffe247e5a8a9e3102206fe97552077e10ea44e3e35dce98ab6e5aa"
|
digest = "1:672470f31bc4e50f9ba09a1af7ab6035bf8b1452db64dfd79b1a22614bb30710"
|
||||||
name = "github.com/kata-containers/agent"
|
name = "github.com/kata-containers/agent"
|
||||||
packages = [
|
packages = [
|
||||||
"protocols/client",
|
"protocols/client",
|
||||||
"protocols/grpc",
|
"protocols/grpc",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "7c95a50ef97052bf7f5566dcca53d6611f7458ac"
|
revision = "7e8e20b10b71fe3044a24175b8a686421e9d2c24"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:04054595e5c5a35d1553a7f3464d18577caf597445d643992998643df56d4afd"
|
digest = "1:04054595e5c5a35d1553a7f3464d18577caf597445d643992998643df56d4afd"
|
||||||
@@ -361,16 +361,18 @@
|
|||||||
revision = "2c5e7ac708aaa719366570dd82bda44541ca2a63"
|
revision = "2c5e7ac708aaa719366570dd82bda44541ca2a63"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:abbcaa84ed484e328cef0cd0bc0130641edc52b4bc6bfb0090dadfd2c1033b6f"
|
digest = "1:3d43152515ea791363eb0d1d21378fbf70e7df4a3954fd315898532cf5e64a8c"
|
||||||
name = "google.golang.org/grpc"
|
name = "google.golang.org/grpc"
|
||||||
packages = [
|
packages = [
|
||||||
".",
|
".",
|
||||||
"balancer",
|
"balancer",
|
||||||
|
"balancer/base",
|
||||||
"balancer/roundrobin",
|
"balancer/roundrobin",
|
||||||
"codes",
|
"codes",
|
||||||
"connectivity",
|
"connectivity",
|
||||||
"credentials",
|
"credentials",
|
||||||
"encoding",
|
"encoding",
|
||||||
|
"encoding/proto",
|
||||||
"grpclb/grpc_lb_v1/messages",
|
"grpclb/grpc_lb_v1/messages",
|
||||||
"grpclog",
|
"grpclog",
|
||||||
"internal",
|
"internal",
|
||||||
@@ -387,7 +389,7 @@
|
|||||||
"transport",
|
"transport",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "5a9f7b402fe85096d2e1d0383435ee1876e863d0"
|
revision = "d11072e7ca9811b1100b80ca0269ac831f06d024"
|
||||||
|
|
||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
|
|||||||
@@ -56,7 +56,7 @@
|
|||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/kata-containers/agent"
|
name = "github.com/kata-containers/agent"
|
||||||
revision = "7c95a50ef97052bf7f5566dcca53d6611f7458ac"
|
revision = "7e8e20b10b71fe3044a24175b8a686421e9d2c24"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/containerd/cri-containerd"
|
name = "github.com/containerd/cri-containerd"
|
||||||
|
|||||||
7
vendor/github.com/kata-containers/agent/protocols/client/client.go
generated
vendored
7
vendor/github.com/kata-containers/agent/protocols/client/client.go
generated
vendored
@@ -217,6 +217,10 @@ func agentDialer(addr *url.URL, enableYamux bool) dialer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func unixDialer(sock string, timeout time.Duration) (net.Conn, error) {
|
func unixDialer(sock string, timeout time.Duration) (net.Conn, error) {
|
||||||
|
if strings.HasPrefix(sock, "unix:") {
|
||||||
|
sock = strings.Trim(sock, "unix:")
|
||||||
|
}
|
||||||
|
|
||||||
dialFunc := func() (net.Conn, error) {
|
dialFunc := func() (net.Conn, error) {
|
||||||
return net.DialTimeout("unix", sock, timeout)
|
return net.DialTimeout("unix", sock, timeout)
|
||||||
}
|
}
|
||||||
@@ -285,6 +289,9 @@ func commonDialer(timeout time.Duration, dialFunc func() (net.Conn, error), time
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, timeoutErrMsg
|
return nil, timeoutErrMsg
|
||||||
}
|
}
|
||||||
|
case <-t.C:
|
||||||
|
cancel <- true
|
||||||
|
return nil, timeoutErrMsg
|
||||||
}
|
}
|
||||||
|
|
||||||
return conn, nil
|
return conn, nil
|
||||||
|
|||||||
592
vendor/github.com/kata-containers/agent/protocols/grpc/agent.pb.go
generated
vendored
592
vendor/github.com/kata-containers/agent/protocols/grpc/agent.pb.go
generated
vendored
@@ -55,6 +55,8 @@
|
|||||||
ListRoutesRequest
|
ListRoutesRequest
|
||||||
OnlineCPUMemRequest
|
OnlineCPUMemRequest
|
||||||
ReseedRandomDevRequest
|
ReseedRandomDevRequest
|
||||||
|
GuestDetailsRequest
|
||||||
|
GuestDetailsResponse
|
||||||
Storage
|
Storage
|
||||||
Device
|
Device
|
||||||
StringUser
|
StringUser
|
||||||
@@ -1440,6 +1442,42 @@ func (m *ReseedRandomDevRequest) GetData() []byte {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type GuestDetailsRequest struct {
|
||||||
|
// MemBlockSize asks server to return the system memory block size that can be used
|
||||||
|
// for memory hotplug alignment. Typically the server returns what's in
|
||||||
|
// /sys/devices/system/memory/block_size_bytes.
|
||||||
|
MemBlockSize bool `protobuf:"varint,1,opt,name=mem_block_size,json=memBlockSize,proto3" json:"mem_block_size,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GuestDetailsRequest) Reset() { *m = GuestDetailsRequest{} }
|
||||||
|
func (m *GuestDetailsRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*GuestDetailsRequest) ProtoMessage() {}
|
||||||
|
func (*GuestDetailsRequest) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{45} }
|
||||||
|
|
||||||
|
func (m *GuestDetailsRequest) GetMemBlockSize() bool {
|
||||||
|
if m != nil {
|
||||||
|
return m.MemBlockSize
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type GuestDetailsResponse struct {
|
||||||
|
// MemBlockSizeBytes returns the system memory block size in bytes.
|
||||||
|
MemBlockSizeBytes uint64 `protobuf:"varint,1,opt,name=mem_block_size_bytes,json=memBlockSizeBytes,proto3" json:"mem_block_size_bytes,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GuestDetailsResponse) Reset() { *m = GuestDetailsResponse{} }
|
||||||
|
func (m *GuestDetailsResponse) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*GuestDetailsResponse) ProtoMessage() {}
|
||||||
|
func (*GuestDetailsResponse) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{46} }
|
||||||
|
|
||||||
|
func (m *GuestDetailsResponse) GetMemBlockSizeBytes() uint64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.MemBlockSizeBytes
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
// Storage represents both the rootfs of the container, and any volume that
|
// Storage represents both the rootfs of the container, and any volume that
|
||||||
// could have been defined through the Mount list of the OCI specification.
|
// could have been defined through the Mount list of the OCI specification.
|
||||||
type Storage struct {
|
type Storage struct {
|
||||||
@@ -1473,7 +1511,7 @@ type Storage struct {
|
|||||||
func (m *Storage) Reset() { *m = Storage{} }
|
func (m *Storage) Reset() { *m = Storage{} }
|
||||||
func (m *Storage) String() string { return proto.CompactTextString(m) }
|
func (m *Storage) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Storage) ProtoMessage() {}
|
func (*Storage) ProtoMessage() {}
|
||||||
func (*Storage) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{45} }
|
func (*Storage) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{47} }
|
||||||
|
|
||||||
func (m *Storage) GetDriver() string {
|
func (m *Storage) GetDriver() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@@ -1556,7 +1594,7 @@ type Device struct {
|
|||||||
func (m *Device) Reset() { *m = Device{} }
|
func (m *Device) Reset() { *m = Device{} }
|
||||||
func (m *Device) String() string { return proto.CompactTextString(m) }
|
func (m *Device) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Device) ProtoMessage() {}
|
func (*Device) ProtoMessage() {}
|
||||||
func (*Device) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{46} }
|
func (*Device) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{48} }
|
||||||
|
|
||||||
func (m *Device) GetId() string {
|
func (m *Device) GetId() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@@ -1602,7 +1640,7 @@ type StringUser struct {
|
|||||||
func (m *StringUser) Reset() { *m = StringUser{} }
|
func (m *StringUser) Reset() { *m = StringUser{} }
|
||||||
func (m *StringUser) String() string { return proto.CompactTextString(m) }
|
func (m *StringUser) String() string { return proto.CompactTextString(m) }
|
||||||
func (*StringUser) ProtoMessage() {}
|
func (*StringUser) ProtoMessage() {}
|
||||||
func (*StringUser) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{47} }
|
func (*StringUser) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{49} }
|
||||||
|
|
||||||
func (m *StringUser) GetUid() string {
|
func (m *StringUser) GetUid() string {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
@@ -1671,6 +1709,8 @@ func init() {
|
|||||||
proto.RegisterType((*ListRoutesRequest)(nil), "grpc.ListRoutesRequest")
|
proto.RegisterType((*ListRoutesRequest)(nil), "grpc.ListRoutesRequest")
|
||||||
proto.RegisterType((*OnlineCPUMemRequest)(nil), "grpc.OnlineCPUMemRequest")
|
proto.RegisterType((*OnlineCPUMemRequest)(nil), "grpc.OnlineCPUMemRequest")
|
||||||
proto.RegisterType((*ReseedRandomDevRequest)(nil), "grpc.ReseedRandomDevRequest")
|
proto.RegisterType((*ReseedRandomDevRequest)(nil), "grpc.ReseedRandomDevRequest")
|
||||||
|
proto.RegisterType((*GuestDetailsRequest)(nil), "grpc.GuestDetailsRequest")
|
||||||
|
proto.RegisterType((*GuestDetailsResponse)(nil), "grpc.GuestDetailsResponse")
|
||||||
proto.RegisterType((*Storage)(nil), "grpc.Storage")
|
proto.RegisterType((*Storage)(nil), "grpc.Storage")
|
||||||
proto.RegisterType((*Device)(nil), "grpc.Device")
|
proto.RegisterType((*Device)(nil), "grpc.Device")
|
||||||
proto.RegisterType((*StringUser)(nil), "grpc.StringUser")
|
proto.RegisterType((*StringUser)(nil), "grpc.StringUser")
|
||||||
@@ -1724,6 +1764,7 @@ type AgentServiceClient interface {
|
|||||||
DestroySandbox(ctx context.Context, in *DestroySandboxRequest, opts ...grpc1.CallOption) (*google_protobuf2.Empty, error)
|
DestroySandbox(ctx context.Context, in *DestroySandboxRequest, opts ...grpc1.CallOption) (*google_protobuf2.Empty, error)
|
||||||
OnlineCPUMem(ctx context.Context, in *OnlineCPUMemRequest, opts ...grpc1.CallOption) (*google_protobuf2.Empty, error)
|
OnlineCPUMem(ctx context.Context, in *OnlineCPUMemRequest, opts ...grpc1.CallOption) (*google_protobuf2.Empty, error)
|
||||||
ReseedRandomDev(ctx context.Context, in *ReseedRandomDevRequest, opts ...grpc1.CallOption) (*google_protobuf2.Empty, error)
|
ReseedRandomDev(ctx context.Context, in *ReseedRandomDevRequest, opts ...grpc1.CallOption) (*google_protobuf2.Empty, error)
|
||||||
|
GetGuestDetails(ctx context.Context, in *GuestDetailsRequest, opts ...grpc1.CallOption) (*GuestDetailsResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type agentServiceClient struct {
|
type agentServiceClient struct {
|
||||||
@@ -1968,6 +2009,15 @@ func (c *agentServiceClient) ReseedRandomDev(ctx context.Context, in *ReseedRand
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *agentServiceClient) GetGuestDetails(ctx context.Context, in *GuestDetailsRequest, opts ...grpc1.CallOption) (*GuestDetailsResponse, error) {
|
||||||
|
out := new(GuestDetailsResponse)
|
||||||
|
err := grpc1.Invoke(ctx, "/grpc.AgentService/GetGuestDetails", in, out, c.cc, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Server API for AgentService service
|
// Server API for AgentService service
|
||||||
|
|
||||||
type AgentServiceServer interface {
|
type AgentServiceServer interface {
|
||||||
@@ -2007,6 +2057,7 @@ type AgentServiceServer interface {
|
|||||||
DestroySandbox(context.Context, *DestroySandboxRequest) (*google_protobuf2.Empty, error)
|
DestroySandbox(context.Context, *DestroySandboxRequest) (*google_protobuf2.Empty, error)
|
||||||
OnlineCPUMem(context.Context, *OnlineCPUMemRequest) (*google_protobuf2.Empty, error)
|
OnlineCPUMem(context.Context, *OnlineCPUMemRequest) (*google_protobuf2.Empty, error)
|
||||||
ReseedRandomDev(context.Context, *ReseedRandomDevRequest) (*google_protobuf2.Empty, error)
|
ReseedRandomDev(context.Context, *ReseedRandomDevRequest) (*google_protobuf2.Empty, error)
|
||||||
|
GetGuestDetails(context.Context, *GuestDetailsRequest) (*GuestDetailsResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func RegisterAgentServiceServer(s *grpc1.Server, srv AgentServiceServer) {
|
func RegisterAgentServiceServer(s *grpc1.Server, srv AgentServiceServer) {
|
||||||
@@ -2481,6 +2532,24 @@ func _AgentService_ReseedRandomDev_Handler(srv interface{}, ctx context.Context,
|
|||||||
return interceptor(ctx, in, info, handler)
|
return interceptor(ctx, in, info, handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func _AgentService_GetGuestDetails_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc1.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(GuestDetailsRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(AgentServiceServer).GetGuestDetails(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc1.UnaryServerInfo{
|
||||||
|
Server: srv,
|
||||||
|
FullMethod: "/grpc.AgentService/GetGuestDetails",
|
||||||
|
}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(AgentServiceServer).GetGuestDetails(ctx, req.(*GuestDetailsRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
var _AgentService_serviceDesc = grpc1.ServiceDesc{
|
var _AgentService_serviceDesc = grpc1.ServiceDesc{
|
||||||
ServiceName: "grpc.AgentService",
|
ServiceName: "grpc.AgentService",
|
||||||
HandlerType: (*AgentServiceServer)(nil),
|
HandlerType: (*AgentServiceServer)(nil),
|
||||||
@@ -2589,6 +2658,10 @@ var _AgentService_serviceDesc = grpc1.ServiceDesc{
|
|||||||
MethodName: "ReseedRandomDev",
|
MethodName: "ReseedRandomDev",
|
||||||
Handler: _AgentService_ReseedRandomDev_Handler,
|
Handler: _AgentService_ReseedRandomDev_Handler,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
MethodName: "GetGuestDetails",
|
||||||
|
Handler: _AgentService_GetGuestDetails_Handler,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Streams: []grpc1.StreamDesc{},
|
Streams: []grpc1.StreamDesc{},
|
||||||
Metadata: "agent.proto",
|
Metadata: "agent.proto",
|
||||||
@@ -4299,6 +4372,57 @@ func (m *ReseedRandomDevRequest) MarshalTo(dAtA []byte) (int, error) {
|
|||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *GuestDetailsRequest) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GuestDetailsRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.MemBlockSize {
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
i++
|
||||||
|
if m.MemBlockSize {
|
||||||
|
dAtA[i] = 1
|
||||||
|
} else {
|
||||||
|
dAtA[i] = 0
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GuestDetailsResponse) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GuestDetailsResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.MemBlockSizeBytes != 0 {
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
i++
|
||||||
|
i = encodeVarintAgent(dAtA, i, uint64(m.MemBlockSizeBytes))
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Storage) Marshal() (dAtA []byte, err error) {
|
func (m *Storage) Marshal() (dAtA []byte, err error) {
|
||||||
size := m.Size()
|
size := m.Size()
|
||||||
dAtA = make([]byte, size)
|
dAtA = make([]byte, size)
|
||||||
@@ -5221,6 +5345,24 @@ func (m *ReseedRandomDevRequest) Size() (n int) {
|
|||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *GuestDetailsRequest) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.MemBlockSize {
|
||||||
|
n += 2
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GuestDetailsResponse) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.MemBlockSizeBytes != 0 {
|
||||||
|
n += 1 + sovAgent(uint64(m.MemBlockSizeBytes))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Storage) Size() (n int) {
|
func (m *Storage) Size() (n int) {
|
||||||
var l int
|
var l int
|
||||||
_ = l
|
_ = l
|
||||||
@@ -10844,6 +10986,145 @@ func (m *ReseedRandomDevRequest) Unmarshal(dAtA []byte) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
func (m *GuestDetailsRequest) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAgent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: GuestDetailsRequest: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: GuestDetailsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field MemBlockSize", wireType)
|
||||||
|
}
|
||||||
|
var v int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAgent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
v |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.MemBlockSize = bool(v != 0)
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipAgent(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthAgent
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *GuestDetailsResponse) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAgent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: GuestDetailsResponse: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: GuestDetailsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field MemBlockSizeBytes", wireType)
|
||||||
|
}
|
||||||
|
m.MemBlockSizeBytes = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAgent
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.MemBlockSizeBytes |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipAgent(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthAgent
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
func (m *Storage) Unmarshal(dAtA []byte) error {
|
func (m *Storage) Unmarshal(dAtA []byte) error {
|
||||||
l := len(dAtA)
|
l := len(dAtA)
|
||||||
iNdEx := 0
|
iNdEx := 0
|
||||||
@@ -11508,154 +11789,159 @@ var (
|
|||||||
func init() { proto.RegisterFile("agent.proto", fileDescriptorAgent) }
|
func init() { proto.RegisterFile("agent.proto", fileDescriptorAgent) }
|
||||||
|
|
||||||
var fileDescriptorAgent = []byte{
|
var fileDescriptorAgent = []byte{
|
||||||
// 2381 bytes of a gzipped FileDescriptorProto
|
// 2454 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0xdb, 0x6e, 0x1b, 0xc9,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0xcd, 0x6e, 0x1b, 0xc9,
|
||||||
0xd1, 0xfe, 0x79, 0x10, 0x29, 0x16, 0x49, 0x49, 0x6c, 0xc9, 0x32, 0x4d, 0xfb, 0x77, 0xb4, 0xe3,
|
0x11, 0x0e, 0x7f, 0x44, 0x89, 0xc5, 0x1f, 0x89, 0x2d, 0x59, 0xe6, 0xd2, 0x8e, 0x57, 0x3b, 0x4e,
|
||||||
0xc4, 0xab, 0xec, 0xc6, 0x32, 0x56, 0x36, 0x92, 0x85, 0x0d, 0xc3, 0xb1, 0x65, 0x45, 0x56, 0x76,
|
0xbc, 0xca, 0x6e, 0x2c, 0x63, 0x65, 0x23, 0x59, 0xd8, 0x30, 0x1c, 0xeb, 0x27, 0xb2, 0xb2, 0xeb,
|
||||||
0x1d, 0x33, 0x23, 0x0b, 0x0e, 0x10, 0x04, 0xc4, 0x68, 0xa6, 0x45, 0xf5, 0x9a, 0x33, 0x3d, 0xdb,
|
0x98, 0x19, 0x59, 0x70, 0x80, 0x20, 0x20, 0x46, 0x33, 0x2d, 0xaa, 0xd7, 0x9c, 0xe9, 0xd9, 0xee,
|
||||||
0xdd, 0x23, 0x89, 0x59, 0x20, 0x97, 0x79, 0x8b, 0xbc, 0x40, 0x10, 0xe4, 0x66, 0x5f, 0x21, 0x17,
|
0x1e, 0x49, 0xdc, 0x05, 0x72, 0xcc, 0x5b, 0xe4, 0x05, 0x82, 0x20, 0x97, 0x9c, 0x72, 0xcf, 0x21,
|
||||||
0xb9, 0xcc, 0x13, 0x04, 0x81, 0x5f, 0x20, 0x40, 0x9e, 0x20, 0xe8, 0xd3, 0x1c, 0x78, 0x90, 0x13,
|
0xc7, 0x3c, 0x41, 0x10, 0xf8, 0x11, 0xf2, 0x04, 0x41, 0xff, 0xcd, 0x0f, 0x49, 0xc9, 0x89, 0x56,
|
||||||
0xad, 0x80, 0xdc, 0x90, 0x53, 0xd5, 0xd5, 0x55, 0x5f, 0x55, 0x77, 0x57, 0x57, 0x17, 0x34, 0xbd,
|
0x40, 0x2e, 0x64, 0x57, 0x75, 0x75, 0xd5, 0x57, 0xd5, 0xdd, 0x35, 0xd5, 0x05, 0x0d, 0x6f, 0x88,
|
||||||
0x21, 0x8e, 0xc4, 0x56, 0xcc, 0xa8, 0xa0, 0xa8, 0x3a, 0x64, 0xb1, 0xdf, 0x6b, 0x50, 0x9f, 0x68,
|
0x23, 0xb1, 0x11, 0x33, 0x2a, 0x28, 0xaa, 0x0e, 0x59, 0xec, 0xf7, 0xea, 0xd4, 0x27, 0x9a, 0xd1,
|
||||||
0x46, 0xef, 0xe6, 0x90, 0xd2, 0xe1, 0x08, 0xdf, 0x57, 0xd4, 0x51, 0x72, 0x7c, 0x1f, 0x87, 0xb1,
|
0xbb, 0x35, 0xa4, 0x74, 0x38, 0xc2, 0x0f, 0x14, 0x75, 0x94, 0x1c, 0x3f, 0xc0, 0x61, 0x2c, 0xc6,
|
||||||
0x18, 0xeb, 0x41, 0xe7, 0x0f, 0x65, 0x58, 0xdf, 0x61, 0xd8, 0x13, 0x78, 0x87, 0x46, 0xc2, 0x23,
|
0x7a, 0xd2, 0xf9, 0x43, 0x19, 0x56, 0xb7, 0x19, 0xf6, 0x04, 0xde, 0xa6, 0x91, 0xf0, 0x48, 0x84,
|
||||||
0x11, 0x66, 0x2e, 0xfe, 0x3a, 0xc1, 0x5c, 0xa0, 0x8f, 0xa0, 0xe5, 0x5b, 0xde, 0x80, 0x04, 0xdd,
|
0x99, 0x8b, 0xbf, 0x4e, 0x30, 0x17, 0xe8, 0x23, 0x68, 0xfa, 0x96, 0x37, 0x20, 0x41, 0xb7, 0xb4,
|
||||||
0xd2, 0x46, 0x69, 0xb3, 0xe1, 0x36, 0x53, 0xde, 0x7e, 0x80, 0xae, 0x43, 0x1d, 0x9f, 0x63, 0x5f,
|
0x56, 0x5a, 0xaf, 0xbb, 0x8d, 0x94, 0xb7, 0x1f, 0xa0, 0x9b, 0x30, 0x8f, 0xcf, 0xb1, 0x2f, 0x67,
|
||||||
0x8e, 0x96, 0xd5, 0x68, 0x4d, 0x92, 0xfb, 0x01, 0xfa, 0x0c, 0x9a, 0x5c, 0x30, 0x12, 0x0d, 0x07,
|
0xcb, 0x6a, 0xb6, 0x26, 0xc9, 0xfd, 0x00, 0x7d, 0x06, 0x0d, 0x2e, 0x18, 0x89, 0x86, 0x83, 0x84,
|
||||||
0x09, 0xc7, 0xac, 0x5b, 0xd9, 0x28, 0x6d, 0x36, 0xb7, 0x57, 0xb6, 0x24, 0xb4, 0xad, 0x03, 0x35,
|
0x63, 0xd6, 0xad, 0xac, 0x95, 0xd6, 0x1b, 0x9b, 0x4b, 0x1b, 0x12, 0xda, 0xc6, 0x81, 0x9a, 0x38,
|
||||||
0x70, 0xc8, 0x31, 0x73, 0x81, 0xa7, 0xdf, 0xe8, 0x2e, 0xd4, 0x03, 0x7c, 0x4a, 0x7c, 0xcc, 0xbb,
|
0xe4, 0x98, 0xb9, 0xc0, 0xd3, 0x31, 0xba, 0x07, 0xf3, 0x01, 0x3e, 0x25, 0x3e, 0xe6, 0xdd, 0xea,
|
||||||
0xd5, 0x8d, 0xca, 0x66, 0x73, 0xbb, 0xa5, 0xc5, 0x5f, 0x28, 0xa6, 0x6b, 0x07, 0xd1, 0x0f, 0x61,
|
0x5a, 0x65, 0xbd, 0xb1, 0xd9, 0xd4, 0xe2, 0x3b, 0x8a, 0xe9, 0xda, 0x49, 0xf4, 0x23, 0x58, 0xe0,
|
||||||
0x91, 0x0b, 0xca, 0xbc, 0x21, 0xe6, 0xdd, 0x05, 0x25, 0xd8, 0xb6, 0x7a, 0x15, 0xd7, 0x4d, 0x87,
|
0x82, 0x32, 0x6f, 0x88, 0x79, 0x77, 0x4e, 0x09, 0xb6, 0xac, 0x5e, 0xc5, 0x75, 0xd3, 0x69, 0x74,
|
||||||
0xd1, 0x2d, 0xa8, 0xbc, 0xde, 0xd9, 0xef, 0xd6, 0x94, 0x75, 0x30, 0x52, 0x31, 0xf6, 0x5d, 0xc9,
|
0x1b, 0x2a, 0xaf, 0xb6, 0xf7, 0xbb, 0x35, 0x65, 0x1d, 0x8c, 0x54, 0x8c, 0x7d, 0x57, 0xb2, 0xd1,
|
||||||
0x46, 0x77, 0xa0, 0xcd, 0xbd, 0x28, 0x38, 0xa2, 0xe7, 0x83, 0x98, 0x04, 0x11, 0xef, 0xd6, 0x37,
|
0x5d, 0x68, 0x71, 0x2f, 0x0a, 0x8e, 0xe8, 0xf9, 0x20, 0x26, 0x41, 0xc4, 0xbb, 0xf3, 0x6b, 0xa5,
|
||||||
0x4a, 0x9b, 0x8b, 0x6e, 0xcb, 0x30, 0xfb, 0x92, 0xe7, 0x3c, 0x82, 0x6b, 0x07, 0xc2, 0x63, 0xe2,
|
0xf5, 0x05, 0xb7, 0x69, 0x98, 0x7d, 0xc9, 0x73, 0x1e, 0xc3, 0x8d, 0x03, 0xe1, 0x31, 0x71, 0x85,
|
||||||
0x12, 0xd1, 0x71, 0x0e, 0x61, 0xdd, 0xc5, 0x21, 0x3d, 0xbd, 0x54, 0x68, 0xbb, 0x50, 0x17, 0x24,
|
0xe8, 0x38, 0x87, 0xb0, 0xea, 0xe2, 0x90, 0x9e, 0x5e, 0x29, 0xb4, 0x5d, 0x98, 0x17, 0x24, 0xc4,
|
||||||
0xc4, 0x34, 0x11, 0x2a, 0xb4, 0x6d, 0xd7, 0x92, 0xce, 0x9f, 0x4a, 0x80, 0x76, 0xcf, 0xb1, 0xdf,
|
0x34, 0x11, 0x2a, 0xb4, 0x2d, 0xd7, 0x92, 0xce, 0x9f, 0x4a, 0x80, 0x76, 0xcf, 0xb1, 0xdf, 0x67,
|
||||||
0x67, 0xd4, 0xc7, 0x9c, 0xff, 0x8f, 0x96, 0xeb, 0x63, 0xa8, 0xc7, 0x1a, 0x40, 0xb7, 0xaa, 0xc4,
|
0xd4, 0xc7, 0x9c, 0xff, 0x9f, 0xb6, 0xeb, 0x63, 0x98, 0x8f, 0x35, 0x80, 0x6e, 0x55, 0x89, 0x9b,
|
||||||
0xcd, 0x2a, 0x58, 0x54, 0x76, 0xd4, 0xf9, 0x0a, 0xd6, 0x0e, 0xc8, 0x30, 0xf2, 0x46, 0x57, 0x88,
|
0x5d, 0xb0, 0xa8, 0xec, 0xac, 0xf3, 0x15, 0xac, 0x1c, 0x90, 0x61, 0xe4, 0x8d, 0xae, 0x11, 0xef,
|
||||||
0x77, 0x1d, 0x6a, 0x5c, 0xe9, 0x54, 0x50, 0xdb, 0xae, 0xa1, 0x9c, 0x3e, 0xa0, 0xb7, 0x1e, 0x11,
|
0x2a, 0xd4, 0xb8, 0xd2, 0xa9, 0xa0, 0xb6, 0x5c, 0x43, 0x39, 0x7d, 0x40, 0x6f, 0x3c, 0x22, 0xae,
|
||||||
0x57, 0x67, 0xc9, 0xb9, 0x07, 0xab, 0x05, 0x8d, 0x3c, 0xa6, 0x11, 0xc7, 0x0a, 0x80, 0xf0, 0x44,
|
0xcf, 0x92, 0x73, 0x1f, 0x96, 0x0b, 0x1a, 0x79, 0x4c, 0x23, 0x8e, 0x15, 0x00, 0xe1, 0x89, 0x84,
|
||||||
0xc2, 0x95, 0xb2, 0x05, 0xd7, 0x50, 0x0e, 0x86, 0xb5, 0x2f, 0x09, 0xb7, 0xe2, 0xf8, 0xbf, 0x81,
|
0x2b, 0x65, 0x73, 0xae, 0xa1, 0x1c, 0x0c, 0x2b, 0x5f, 0x12, 0x6e, 0xc5, 0xf1, 0xff, 0x02, 0x61,
|
||||||
0xb0, 0x0e, 0xb5, 0x63, 0xca, 0x42, 0x4f, 0x58, 0x04, 0x9a, 0x42, 0x08, 0xaa, 0x1e, 0x1b, 0xf2,
|
0x15, 0x6a, 0xc7, 0x94, 0x85, 0x9e, 0xb0, 0x08, 0x34, 0x85, 0x10, 0x54, 0x3d, 0x36, 0xe4, 0xdd,
|
||||||
0x6e, 0x65, 0xa3, 0xb2, 0xd9, 0x70, 0xd5, 0xb7, 0xdc, 0x95, 0x13, 0x66, 0x0c, 0xae, 0x8f, 0xa0,
|
0xca, 0x5a, 0x65, 0xbd, 0xee, 0xaa, 0xb1, 0x3c, 0x95, 0x13, 0x66, 0x0c, 0xae, 0x8f, 0xa0, 0x69,
|
||||||
0x65, 0xe2, 0x3e, 0x18, 0x11, 0x2e, 0x94, 0x9d, 0x96, 0xdb, 0x34, 0x3c, 0x39, 0xc7, 0xa1, 0xb0,
|
0xe2, 0x3e, 0x18, 0x11, 0x2e, 0x94, 0x9d, 0xa6, 0xdb, 0x30, 0x3c, 0xb9, 0xc6, 0xa1, 0xb0, 0x7a,
|
||||||
0x7e, 0x18, 0x07, 0x97, 0x3c, 0xf0, 0xdb, 0xd0, 0x60, 0x98, 0xd3, 0x84, 0xc9, 0x63, 0x5a, 0x56,
|
0x18, 0x07, 0x57, 0xbc, 0xf0, 0x9b, 0x50, 0x67, 0x98, 0xd3, 0x84, 0xc9, 0x6b, 0x5a, 0x56, 0xfb,
|
||||||
0xeb, 0xbe, 0xa6, 0xd7, 0xfd, 0x4b, 0x12, 0x25, 0xe7, 0xae, 0x1d, 0x73, 0x33, 0x31, 0x73, 0x84,
|
0xbe, 0xa2, 0xf7, 0xfd, 0x4b, 0x12, 0x25, 0xe7, 0xae, 0x9d, 0x73, 0x33, 0x31, 0x73, 0x85, 0x04,
|
||||||
0x04, 0xbf, 0xcc, 0x11, 0x7a, 0x04, 0xd7, 0xfa, 0x5e, 0xc2, 0x2f, 0x83, 0xd5, 0x79, 0x2c, 0x8f,
|
0xbf, 0xca, 0x15, 0x7a, 0x0c, 0x37, 0xfa, 0x5e, 0xc2, 0xaf, 0x82, 0xd5, 0x79, 0x22, 0xaf, 0x1f,
|
||||||
0x1f, 0x4f, 0xc2, 0x4b, 0x4d, 0xfe, 0x63, 0x09, 0x16, 0x77, 0xe2, 0xe4, 0x90, 0x7b, 0x43, 0x8c,
|
0x4f, 0xc2, 0x2b, 0x2d, 0xfe, 0x63, 0x09, 0x16, 0xb6, 0xe3, 0xe4, 0x90, 0x7b, 0x43, 0x8c, 0x3e,
|
||||||
0xbe, 0x07, 0x4d, 0x41, 0x85, 0x37, 0x1a, 0x24, 0x92, 0x54, 0xe2, 0x55, 0x17, 0x14, 0x4b, 0x0b,
|
0x84, 0x86, 0xa0, 0xc2, 0x1b, 0x0d, 0x12, 0x49, 0x2a, 0xf1, 0xaa, 0x0b, 0x8a, 0xa5, 0x05, 0x64,
|
||||||
0xc8, 0xb0, 0x63, 0xe6, 0xc7, 0x89, 0x91, 0x28, 0x6f, 0x54, 0x36, 0xab, 0x6e, 0x53, 0xf3, 0xb4,
|
0xd8, 0x31, 0xf3, 0xe3, 0xc4, 0x48, 0x94, 0xd7, 0x2a, 0xeb, 0x55, 0xb7, 0xa1, 0x79, 0x5a, 0x64,
|
||||||
0xc8, 0x16, 0xac, 0xaa, 0xb1, 0x01, 0x89, 0x06, 0xef, 0x30, 0x8b, 0xf0, 0x28, 0xa4, 0x01, 0x56,
|
0x03, 0x96, 0xd5, 0xdc, 0x80, 0x44, 0x83, 0xb7, 0x98, 0x45, 0x78, 0x14, 0xd2, 0x00, 0xab, 0xf3,
|
||||||
0xfb, 0xb7, 0xea, 0x76, 0xd4, 0xd0, 0x7e, 0xf4, 0x45, 0x3a, 0x80, 0x3e, 0x81, 0x4e, 0x2a, 0x2f,
|
0x5b, 0x75, 0x3b, 0x6a, 0x6a, 0x3f, 0xfa, 0x22, 0x9d, 0x40, 0x9f, 0x40, 0x27, 0x95, 0x97, 0x97,
|
||||||
0x0f, 0xa5, 0x92, 0xae, 0x2a, 0xe9, 0x65, 0x23, 0x7d, 0x68, 0xd8, 0xce, 0xef, 0x60, 0xe9, 0xcd,
|
0x52, 0x49, 0x57, 0x95, 0xf4, 0xa2, 0x91, 0x3e, 0x34, 0x6c, 0xe7, 0x77, 0xd0, 0x7e, 0x7d, 0xc2,
|
||||||
0x09, 0xa3, 0x42, 0x8c, 0x48, 0x34, 0x7c, 0xe1, 0x09, 0x4f, 0x66, 0x8f, 0x18, 0x33, 0x42, 0x03,
|
0xa8, 0x10, 0x23, 0x12, 0x0d, 0x77, 0x3c, 0xe1, 0xc9, 0xec, 0x11, 0x63, 0x46, 0x68, 0xc0, 0x0d,
|
||||||
0x6e, 0xd0, 0x5a, 0x12, 0x7d, 0x0a, 0x1d, 0xa1, 0x65, 0x71, 0x30, 0xb0, 0x32, 0x65, 0x25, 0xb3,
|
0x5a, 0x4b, 0xa2, 0x4f, 0xa1, 0x23, 0xb4, 0x2c, 0x0e, 0x06, 0x56, 0xa6, 0xac, 0x64, 0x96, 0xd2,
|
||||||
0x92, 0x0e, 0xf4, 0x8d, 0xf0, 0x0f, 0x60, 0x29, 0x13, 0x96, 0xf9, 0xc7, 0xe0, 0x6d, 0xa7, 0xdc,
|
0x89, 0xbe, 0x11, 0xfe, 0x21, 0xb4, 0x33, 0x61, 0x99, 0x7f, 0x0c, 0xde, 0x56, 0xca, 0x7d, 0x4d,
|
||||||
0x37, 0x24, 0xc4, 0xce, 0xa9, 0x8a, 0x95, 0x5a, 0x64, 0xf4, 0x29, 0x34, 0xb2, 0x38, 0x94, 0xd4,
|
0x42, 0xec, 0x9c, 0xaa, 0x58, 0xa9, 0x4d, 0x46, 0x9f, 0x42, 0x3d, 0x8b, 0x43, 0x49, 0x9d, 0x90,
|
||||||
0x0e, 0x59, 0xd2, 0x3b, 0xc4, 0x86, 0xd3, 0x5d, 0x4c, 0x83, 0xf2, 0x04, 0x96, 0x45, 0x0a, 0x7c,
|
0xb6, 0x3e, 0x21, 0x36, 0x9c, 0xee, 0x42, 0x1a, 0x94, 0xa7, 0xb0, 0x28, 0x52, 0xe0, 0x83, 0xc0,
|
||||||
0x10, 0x78, 0xc2, 0x2b, 0x6e, 0xaa, 0xa2, 0x57, 0xee, 0x92, 0x28, 0xd0, 0xce, 0x63, 0x68, 0xf4,
|
0x13, 0x5e, 0xf1, 0x50, 0x15, 0xbd, 0x72, 0xdb, 0xa2, 0x40, 0x3b, 0x4f, 0xa0, 0xde, 0x27, 0x01,
|
||||||
0x49, 0xc0, 0xb5, 0xe1, 0x2e, 0xd4, 0xfd, 0x84, 0x31, 0x1c, 0x09, 0xeb, 0xb2, 0x21, 0xd1, 0x1a,
|
0xd7, 0x86, 0xbb, 0x30, 0xef, 0x27, 0x8c, 0xe1, 0x48, 0x58, 0x97, 0x0d, 0x89, 0x56, 0x60, 0x6e,
|
||||||
0x2c, 0x8c, 0x48, 0x48, 0x84, 0x71, 0x53, 0x13, 0x0e, 0x05, 0x78, 0x85, 0x43, 0xca, 0xc6, 0x2a,
|
0x44, 0x42, 0x22, 0x8c, 0x9b, 0x9a, 0x70, 0x28, 0xc0, 0x4b, 0x1c, 0x52, 0x36, 0x56, 0x01, 0x5b,
|
||||||
0x60, 0x6b, 0xb0, 0x90, 0x5f, 0x5c, 0x4d, 0xa0, 0x9b, 0xd0, 0x08, 0xbd, 0xf3, 0x74, 0x51, 0xe5,
|
0x81, 0xb9, 0xfc, 0xe6, 0x6a, 0x02, 0xdd, 0x82, 0x7a, 0xe8, 0x9d, 0xa7, 0x9b, 0x2a, 0x67, 0x16,
|
||||||
0xc8, 0x62, 0xe8, 0x9d, 0x6b, 0xf0, 0x5d, 0xa8, 0x1f, 0x7b, 0x64, 0xe4, 0x47, 0xc2, 0x44, 0xc5,
|
0x42, 0xef, 0x5c, 0x83, 0xef, 0xc2, 0xfc, 0xb1, 0x47, 0x46, 0x7e, 0x24, 0x4c, 0x54, 0x2c, 0x99,
|
||||||
0x92, 0x99, 0xc1, 0x6a, 0xde, 0xe0, 0x5f, 0xca, 0xd0, 0xd4, 0x16, 0x35, 0xe0, 0x35, 0x58, 0xf0,
|
0x19, 0xac, 0xe6, 0x0d, 0xfe, 0xad, 0x0c, 0x0d, 0x6d, 0x51, 0x03, 0x5e, 0x81, 0x39, 0xdf, 0xf3,
|
||||||
0x3d, 0xff, 0x24, 0x35, 0xa9, 0x08, 0x74, 0xd7, 0x02, 0x29, 0xe7, 0x93, 0x70, 0x86, 0xd4, 0x42,
|
0x4f, 0x52, 0x93, 0x8a, 0x40, 0xf7, 0x2c, 0x90, 0x72, 0x3e, 0x09, 0x67, 0x48, 0x2d, 0xb4, 0x07,
|
||||||
0xbb, 0x0f, 0xc0, 0xcf, 0xbc, 0xd8, 0x60, 0xab, 0xcc, 0x11, 0x6e, 0x48, 0x19, 0x0d, 0xf7, 0x01,
|
0x00, 0xfc, 0xcc, 0x8b, 0x0d, 0xb6, 0xca, 0x05, 0xc2, 0x75, 0x29, 0xa3, 0xe1, 0x3e, 0x84, 0xa6,
|
||||||
0xb4, 0xf4, 0xbe, 0x33, 0x53, 0xaa, 0x73, 0xa6, 0x34, 0xb5, 0x94, 0x9e, 0x74, 0x07, 0xda, 0x09,
|
0x3e, 0x77, 0x66, 0x49, 0xf5, 0x82, 0x25, 0x0d, 0x2d, 0xa5, 0x17, 0xdd, 0x85, 0x56, 0xc2, 0xf1,
|
||||||
0xc7, 0x83, 0x13, 0x82, 0x99, 0xc7, 0xfc, 0x93, 0x71, 0x77, 0x41, 0xdf, 0x91, 0x09, 0xc7, 0x2f,
|
0xe0, 0x84, 0x60, 0xe6, 0x31, 0xff, 0x64, 0xdc, 0x9d, 0xd3, 0xdf, 0xc8, 0x84, 0xe3, 0x17, 0x96,
|
||||||
0x2d, 0x0f, 0x6d, 0xc3, 0x82, 0x4c, 0x7f, 0xbc, 0x5b, 0x53, 0xd7, 0xf1, 0xad, 0xbc, 0x4a, 0xe5,
|
0x87, 0x36, 0x61, 0x4e, 0xa6, 0x3f, 0xde, 0xad, 0xa9, 0xcf, 0xf1, 0xed, 0xbc, 0x4a, 0xe5, 0xea,
|
||||||
0xea, 0x96, 0xfa, 0xdd, 0x8d, 0x04, 0x1b, 0xbb, 0x5a, 0xb4, 0xf7, 0x39, 0x40, 0xc6, 0x44, 0x2b,
|
0x86, 0xfa, 0xdd, 0x8d, 0x04, 0x1b, 0xbb, 0x5a, 0xb4, 0xf7, 0x39, 0x40, 0xc6, 0x44, 0x4b, 0x50,
|
||||||
0x50, 0x79, 0x87, 0xc7, 0xe6, 0x1c, 0xca, 0x4f, 0x19, 0x9c, 0x53, 0x6f, 0x94, 0xd8, 0xa8, 0x6b,
|
0x79, 0x8b, 0xc7, 0xe6, 0x1e, 0xca, 0xa1, 0x0c, 0xce, 0xa9, 0x37, 0x4a, 0x6c, 0xd4, 0x35, 0xf1,
|
||||||
0xe2, 0x51, 0xf9, 0xf3, 0x92, 0xe3, 0xc3, 0xf2, 0xf3, 0xd1, 0x3b, 0x42, 0x73, 0xd3, 0xd7, 0x60,
|
0xb8, 0xfc, 0x79, 0xc9, 0xf1, 0x61, 0x71, 0x6b, 0xf4, 0x96, 0xd0, 0xdc, 0xf2, 0x15, 0x98, 0x0b,
|
||||||
0x21, 0xf4, 0xbe, 0xa2, 0xcc, 0x46, 0x52, 0x11, 0x8a, 0x4b, 0x22, 0xca, 0xac, 0x0a, 0x45, 0xa0,
|
0xbd, 0xaf, 0x28, 0xb3, 0x91, 0x54, 0x84, 0xe2, 0x92, 0x88, 0x32, 0xab, 0x42, 0x11, 0xa8, 0x0d,
|
||||||
0x25, 0x28, 0xd3, 0x58, 0xc5, 0xab, 0xe1, 0x96, 0x69, 0x9c, 0x19, 0xaa, 0xe6, 0x0c, 0x39, 0x7f,
|
0x65, 0x1a, 0xab, 0x78, 0xd5, 0xdd, 0x32, 0x8d, 0x33, 0x43, 0xd5, 0x9c, 0x21, 0xe7, 0x9f, 0x55,
|
||||||
0xaf, 0x02, 0x64, 0x56, 0x90, 0x0b, 0x3d, 0x42, 0x07, 0x1c, 0x33, 0x59, 0x82, 0x0c, 0x8e, 0xc6,
|
0x80, 0xcc, 0x0a, 0x72, 0xa1, 0x47, 0xe8, 0x80, 0x63, 0x26, 0x4b, 0x90, 0xc1, 0xd1, 0x58, 0x60,
|
||||||
0x02, 0xf3, 0x01, 0xc3, 0x7e, 0xc2, 0x38, 0x39, 0x95, 0xeb, 0x27, 0xdd, 0xbe, 0xa6, 0xdd, 0x9e,
|
0x3e, 0x60, 0xd8, 0x4f, 0x18, 0x27, 0xa7, 0x72, 0xff, 0xa4, 0xdb, 0x37, 0xb4, 0xdb, 0x13, 0xd8,
|
||||||
0xc0, 0xe6, 0x5e, 0x27, 0xf4, 0x40, 0xcf, 0x7b, 0x2e, 0xa7, 0xb9, 0x76, 0x16, 0xda, 0x87, 0x6b,
|
0xdc, 0x9b, 0x84, 0x1e, 0xe8, 0x75, 0x5b, 0x72, 0x99, 0x6b, 0x57, 0xa1, 0x7d, 0xb8, 0x91, 0xe9,
|
||||||
0x99, 0xce, 0x20, 0xa7, 0xae, 0x7c, 0x91, 0xba, 0xd5, 0x54, 0x5d, 0x90, 0xa9, 0xda, 0x85, 0x55,
|
0x0c, 0x72, 0xea, 0xca, 0x97, 0xa9, 0x5b, 0x4e, 0xd5, 0x05, 0x99, 0xaa, 0x5d, 0x58, 0x26, 0x74,
|
||||||
0x42, 0x07, 0x5f, 0x27, 0x38, 0x29, 0x28, 0xaa, 0x5c, 0xa4, 0xa8, 0x43, 0xe8, 0x2f, 0xd5, 0x84,
|
0xf0, 0x75, 0x82, 0x93, 0x82, 0xa2, 0xca, 0x65, 0x8a, 0x3a, 0x84, 0xfe, 0x4a, 0x2d, 0xc8, 0xd4,
|
||||||
0x4c, 0x4d, 0x1f, 0x6e, 0xe4, 0xbc, 0x94, 0xc7, 0x3d, 0xa7, 0xac, 0x7a, 0x91, 0xb2, 0xf5, 0x14,
|
0xf4, 0xe1, 0x83, 0x9c, 0x97, 0xf2, 0xba, 0xe7, 0x94, 0x55, 0x2f, 0x53, 0xb6, 0x9a, 0xa2, 0x92,
|
||||||
0x95, 0xcc, 0x07, 0x99, 0xc6, 0x9f, 0xc3, 0x3a, 0xa1, 0x83, 0x33, 0x8f, 0x88, 0x49, 0x75, 0x0b,
|
0xf9, 0x20, 0xd3, 0xf8, 0x0b, 0x58, 0x25, 0x74, 0x70, 0xe6, 0x11, 0x31, 0xa9, 0x6e, 0xee, 0x3d,
|
||||||
0x1f, 0x70, 0x52, 0x5e, 0xba, 0x45, 0x5d, 0xda, 0xc9, 0x10, 0xb3, 0x61, 0xc1, 0xc9, 0xda, 0x07,
|
0x4e, 0xca, 0x8f, 0x6e, 0x51, 0x97, 0x76, 0x32, 0xc4, 0x6c, 0x58, 0x70, 0xb2, 0xf6, 0x1e, 0x27,
|
||||||
0x9c, 0x7c, 0xa5, 0x26, 0x64, 0x6a, 0x9e, 0x41, 0x87, 0xd0, 0x49, 0x34, 0xf5, 0x8b, 0x94, 0x2c,
|
0x5f, 0xaa, 0x05, 0x99, 0x9a, 0xe7, 0xd0, 0x21, 0x74, 0x12, 0xcd, 0xfc, 0x65, 0x4a, 0x16, 0x09,
|
||||||
0x13, 0x5a, 0x44, 0xf2, 0x1c, 0x3a, 0x1c, 0xfb, 0x82, 0xb2, 0xfc, 0x26, 0x58, 0xbc, 0x48, 0xc5,
|
0x2d, 0x22, 0xd9, 0x82, 0x0e, 0xc7, 0xbe, 0xa0, 0x2c, 0x7f, 0x08, 0x16, 0x2e, 0x53, 0xb1, 0x64,
|
||||||
0x8a, 0x91, 0x4f, 0x75, 0x38, 0xbf, 0x86, 0xd6, 0xcb, 0x64, 0x88, 0xc5, 0xe8, 0x28, 0x4d, 0x06,
|
0xe4, 0x53, 0x1d, 0xce, 0x6f, 0xa0, 0xf9, 0x22, 0x19, 0x62, 0x31, 0x3a, 0x4a, 0x93, 0xc1, 0xb5,
|
||||||
0x57, 0x96, 0x7f, 0x9c, 0x7f, 0x95, 0xa1, 0xb9, 0x33, 0x64, 0x34, 0x89, 0x0b, 0x39, 0x59, 0x1f,
|
0xe5, 0x1f, 0xe7, 0xdf, 0x65, 0x68, 0x6c, 0x0f, 0x19, 0x4d, 0xe2, 0x42, 0x4e, 0xd6, 0x97, 0x74,
|
||||||
0xd2, 0xc9, 0x9c, 0xac, 0x44, 0x54, 0x4e, 0xd6, 0xc2, 0x0f, 0xa1, 0x15, 0xaa, 0xa3, 0x6b, 0xe4,
|
0x32, 0x27, 0x2b, 0x11, 0x95, 0x93, 0xb5, 0xf0, 0x23, 0x68, 0x86, 0xea, 0xea, 0x1a, 0x79, 0x9d,
|
||||||
0x75, 0x1e, 0xea, 0x4c, 0x1d, 0x6a, 0xb7, 0x19, 0xe6, 0x92, 0xd9, 0x16, 0x40, 0x4c, 0x02, 0x6e,
|
0x87, 0x3a, 0x53, 0x97, 0xda, 0x6d, 0x84, 0xb9, 0x64, 0xb6, 0x01, 0x10, 0x93, 0x80, 0x9b, 0x35,
|
||||||
0xe6, 0xe8, 0x74, 0xb4, 0x6c, 0x2a, 0x42, 0x9b, 0xa2, 0xdd, 0x46, 0x9c, 0x66, 0xeb, 0xcf, 0xa0,
|
0x3a, 0x1d, 0x2d, 0x9a, 0x8a, 0xd0, 0xa6, 0x68, 0xb7, 0x1e, 0xa7, 0xd9, 0xfa, 0x33, 0x68, 0x1c,
|
||||||
0x79, 0x24, 0x83, 0x64, 0x26, 0x14, 0x92, 0x51, 0x16, 0x3d, 0x17, 0x8e, 0xb2, 0x43, 0xf8, 0x12,
|
0xc9, 0x20, 0x99, 0x05, 0x85, 0x64, 0x94, 0x45, 0xcf, 0x85, 0xa3, 0xec, 0x12, 0xbe, 0x80, 0xd6,
|
||||||
0xda, 0x27, 0x3a, 0x64, 0x66, 0x92, 0xde, 0x43, 0x77, 0x8c, 0x27, 0x99, 0xbf, 0x5b, 0xf9, 0xc8,
|
0x89, 0x0e, 0x99, 0x59, 0xa4, 0xcf, 0xd0, 0x5d, 0xe3, 0x49, 0xe6, 0xef, 0x46, 0x3e, 0xb2, 0x7a,
|
||||||
0xea, 0x05, 0x68, 0x9d, 0xe4, 0x58, 0xbd, 0x03, 0xe8, 0x4c, 0x89, 0xcc, 0xc8, 0x41, 0x9b, 0xf9,
|
0x03, 0x9a, 0x27, 0x39, 0x56, 0xef, 0x00, 0x3a, 0x53, 0x22, 0x33, 0x72, 0xd0, 0x7a, 0x3e, 0x07,
|
||||||
0x1c, 0xd4, 0xdc, 0x46, 0xda, 0x50, 0x7e, 0x66, 0x3e, 0x2f, 0xfd, 0x02, 0xd6, 0x27, 0xcb, 0x1c,
|
0x35, 0x36, 0x91, 0x36, 0x94, 0x5f, 0x99, 0xcf, 0x4b, 0xbf, 0x84, 0xd5, 0xc9, 0x32, 0xc7, 0x14,
|
||||||
0x53, 0x94, 0x3d, 0x84, 0x96, 0xaf, 0xd0, 0x15, 0x56, 0xa0, 0x33, 0x85, 0xdb, 0x6d, 0xfa, 0x19,
|
0x65, 0x8f, 0xa0, 0xe9, 0x2b, 0x74, 0x85, 0x1d, 0xe8, 0x4c, 0xe1, 0x76, 0x1b, 0x7e, 0x46, 0x38,
|
||||||
0xe1, 0x04, 0x80, 0xde, 0x32, 0x22, 0xf0, 0x81, 0x60, 0xd8, 0x0b, 0xaf, 0xa2, 0x6a, 0x46, 0x50,
|
0x01, 0xa0, 0x37, 0x8c, 0x08, 0x7c, 0x20, 0x18, 0xf6, 0xc2, 0xeb, 0xa8, 0x9a, 0x11, 0x54, 0xd5,
|
||||||
0x55, 0x57, 0x6c, 0x45, 0x15, 0x85, 0xea, 0xdb, 0xf9, 0x18, 0x56, 0x0b, 0x56, 0x0c, 0xe4, 0x15,
|
0x27, 0xb6, 0xa2, 0x8a, 0x42, 0x35, 0x76, 0x3e, 0x86, 0xe5, 0x82, 0x15, 0x03, 0x79, 0x09, 0x2a,
|
||||||
0xa8, 0x8c, 0x70, 0xa4, 0xb4, 0xb7, 0x5d, 0xf9, 0xe9, 0x78, 0xd0, 0x71, 0xb1, 0x17, 0x5c, 0x1d,
|
0x23, 0x1c, 0x29, 0xed, 0x2d, 0x57, 0x0e, 0x1d, 0x0f, 0x3a, 0x2e, 0xf6, 0x82, 0xeb, 0x43, 0x63,
|
||||||
0x1a, 0x63, 0xa2, 0x92, 0x99, 0xd8, 0x04, 0x94, 0x37, 0x61, 0xa0, 0x58, 0xd4, 0xa5, 0x1c, 0xea,
|
0x4c, 0x54, 0x32, 0x13, 0xeb, 0x80, 0xf2, 0x26, 0x0c, 0x14, 0x8b, 0xba, 0x94, 0x43, 0xfd, 0x0a,
|
||||||
0xd7, 0xd0, 0xd9, 0x19, 0x51, 0x8e, 0x0f, 0x44, 0x40, 0xa2, 0xab, 0x28, 0xf3, 0xbf, 0x81, 0xd5,
|
0x3a, 0xdb, 0x23, 0xca, 0xf1, 0x81, 0x08, 0x48, 0x74, 0x1d, 0x65, 0xfe, 0xb7, 0xb0, 0xfc, 0x5a,
|
||||||
0x37, 0x62, 0xfc, 0x56, 0x2a, 0xe3, 0xe4, 0xb7, 0xf8, 0x8a, 0xfc, 0x63, 0xf4, 0xcc, 0xfa, 0xc7,
|
0x8c, 0xdf, 0x48, 0x65, 0x9c, 0x7c, 0x83, 0xaf, 0xc9, 0x3f, 0x46, 0xcf, 0xac, 0x7f, 0x8c, 0x9e,
|
||||||
0xe8, 0x99, 0xac, 0xf0, 0x7d, 0x3a, 0x4a, 0xc2, 0x48, 0x6d, 0xf7, 0xb6, 0x6b, 0x28, 0xe7, 0xdb,
|
0xc9, 0x0a, 0xdf, 0xa7, 0xa3, 0x24, 0x8c, 0xd4, 0x71, 0x6f, 0xb9, 0x86, 0x72, 0xfe, 0x52, 0x82,
|
||||||
0x12, 0xac, 0xe9, 0x37, 0xf8, 0x81, 0x7e, 0x7a, 0x5a, 0xf3, 0x3d, 0x58, 0x3c, 0xa1, 0x5c, 0x44,
|
0x15, 0xfd, 0x06, 0x3f, 0xd0, 0x4f, 0x4f, 0x6b, 0xbe, 0x07, 0x0b, 0x27, 0x94, 0x8b, 0xc8, 0x0b,
|
||||||
0x5e, 0x88, 0x8d, 0xe9, 0x94, 0x96, 0xea, 0xe5, 0x9b, 0xb5, 0xac, 0x5e, 0x05, 0xf2, 0xb3, 0xf0,
|
0xb1, 0x31, 0x9d, 0xd2, 0x52, 0xbd, 0x7c, 0xb3, 0x96, 0xd5, 0xab, 0x40, 0x0e, 0x0b, 0x0f, 0xe3,
|
||||||
0x30, 0xae, 0x5c, 0xfc, 0x30, 0x9e, 0x7a, 0xfa, 0x56, 0xa7, 0x9f, 0xbe, 0xe8, 0xff, 0x01, 0xac,
|
0xca, 0xe5, 0x0f, 0xe3, 0xa9, 0xa7, 0x6f, 0x75, 0xfa, 0xe9, 0x8b, 0xbe, 0x0f, 0x60, 0x85, 0x48,
|
||||||
0x10, 0x09, 0xd4, 0xc5, 0xdf, 0x70, 0x1b, 0x86, 0xb3, 0x1f, 0x38, 0xd7, 0xe1, 0xda, 0x0b, 0xcc,
|
0xa0, 0x3e, 0xfc, 0x75, 0xb7, 0x6e, 0x38, 0xfb, 0x81, 0x73, 0x13, 0x6e, 0xec, 0x60, 0x2e, 0x18,
|
||||||
0x05, 0xa3, 0xe3, 0x22, 0x6a, 0xc7, 0x83, 0xc6, 0x7e, 0xff, 0x59, 0x10, 0x30, 0xcc, 0x39, 0xba,
|
0x1d, 0x17, 0x51, 0x3b, 0x1e, 0xd4, 0xf7, 0xfb, 0xcf, 0x83, 0x80, 0x61, 0xce, 0xd1, 0x3d, 0xa8,
|
||||||
0x0b, 0xb5, 0x63, 0x2f, 0x24, 0x23, 0x7d, 0xb0, 0x96, 0x6c, 0xde, 0xd9, 0xef, 0xff, 0x4c, 0x71,
|
0x1d, 0x7b, 0x21, 0x19, 0xe9, 0x8b, 0xd5, 0xb6, 0x79, 0x67, 0xbf, 0xff, 0x73, 0xc5, 0x75, 0xcd,
|
||||||
0x5d, 0x33, 0x2a, 0x93, 0x99, 0xa7, 0xa7, 0x98, 0x30, 0x5a, 0x52, 0xae, 0x7f, 0xe8, 0xf1, 0x77,
|
0xac, 0x4c, 0x66, 0x9e, 0x5e, 0x62, 0xc2, 0x68, 0x49, 0xb9, 0xff, 0xa1, 0xc7, 0xdf, 0x9a, 0x4f,
|
||||||
0xe6, 0xca, 0x56, 0xdf, 0xce, 0x9f, 0x4b, 0xd0, 0xd8, 0x8f, 0x04, 0x66, 0xc7, 0x9e, 0xaf, 0x1e,
|
0xb6, 0x1a, 0x3b, 0x7f, 0x2e, 0x41, 0x7d, 0x3f, 0x12, 0x98, 0x1d, 0x7b, 0xbe, 0x7a, 0x8c, 0xe9,
|
||||||
0x63, 0xba, 0x39, 0x60, 0x82, 0x64, 0x28, 0x39, 0x53, 0x85, 0x4e, 0x2b, 0x54, 0xdf, 0x32, 0xef,
|
0xe6, 0x80, 0x09, 0x92, 0xa1, 0xe4, 0x4a, 0x15, 0x3a, 0xad, 0x50, 0x8d, 0x65, 0xde, 0x49, 0xc1,
|
||||||
0xa4, 0xe0, 0xd2, 0x38, 0x2d, 0x5b, 0x50, 0x66, 0xc0, 0xcd, 0xcb, 0xc8, 0x48, 0x87, 0x22, 0x31,
|
0xa5, 0x71, 0x5a, 0xb4, 0xa0, 0xcc, 0x84, 0x9b, 0x97, 0x91, 0x91, 0x0e, 0x45, 0x62, 0xea, 0x03,
|
||||||
0xf5, 0x81, 0xfc, 0x94, 0x06, 0x4f, 0xce, 0xa4, 0x80, 0x89, 0x8a, 0xa1, 0x54, 0xd5, 0xed, 0x13,
|
0x39, 0x94, 0x06, 0x4f, 0xce, 0xa4, 0x80, 0x89, 0x8a, 0xa1, 0x54, 0xd5, 0xed, 0x13, 0x35, 0x51,
|
||||||
0x35, 0x50, 0xd3, 0x4e, 0x18, 0xd2, 0x79, 0x02, 0x90, 0xe2, 0xe5, 0xb2, 0x76, 0xcb, 0x28, 0x53,
|
0xd3, 0x4e, 0x18, 0xd2, 0x79, 0x0a, 0x90, 0xe2, 0xe5, 0xb2, 0x76, 0xcb, 0x28, 0x53, 0x3e, 0x58,
|
||||||
0x3e, 0x58, 0x0c, 0x96, 0xef, 0xe6, 0x44, 0x9c, 0x6f, 0x60, 0xc1, 0xa5, 0x89, 0xd0, 0x87, 0x01,
|
0x0c, 0x96, 0xef, 0xe6, 0x44, 0x9c, 0x6f, 0x61, 0xce, 0xa5, 0x89, 0xd0, 0x97, 0x01, 0x9b, 0x77,
|
||||||
0x9b, 0x77, 0x5d, 0xc3, 0x55, 0xdf, 0xd2, 0xea, 0xd0, 0x13, 0xf8, 0xcc, 0x1b, 0xdb, 0xd0, 0x19,
|
0x5d, 0xdd, 0x55, 0x63, 0x69, 0x75, 0xe8, 0x09, 0x7c, 0xe6, 0x8d, 0x6d, 0xe8, 0x0c, 0x99, 0x0b,
|
||||||
0x32, 0x17, 0x98, 0x4a, 0x21, 0x30, 0xf2, 0xf5, 0xaa, 0x1e, 0x67, 0xca, 0xa9, 0x86, 0x6b, 0x28,
|
0x4c, 0xa5, 0x10, 0x18, 0xf9, 0x7a, 0x55, 0x8f, 0x33, 0xe5, 0x54, 0xdd, 0x35, 0x94, 0xfc, 0x08,
|
||||||
0x79, 0x09, 0x71, 0x9f, 0xc6, 0x58, 0xb9, 0xd5, 0x76, 0x35, 0xe1, 0xdc, 0x83, 0x9a, 0x32, 0x2e,
|
0x71, 0x9f, 0xc6, 0x58, 0xb9, 0xd5, 0x72, 0x35, 0xe1, 0xdc, 0x87, 0x9a, 0x32, 0x2e, 0x8f, 0x8d,
|
||||||
0xb7, 0x8d, 0xf9, 0x32, 0x98, 0x9b, 0x1a, 0xb3, 0xe2, 0xb9, 0x66, 0xc8, 0xd9, 0xb3, 0xef, 0xcb,
|
0x19, 0x19, 0xcc, 0x0d, 0x8d, 0x59, 0xf1, 0x5c, 0x33, 0xe5, 0xec, 0xd9, 0xf7, 0x65, 0xe6, 0x8a,
|
||||||
0xcc, 0x15, 0xb3, 0x9d, 0xef, 0x41, 0x83, 0x58, 0x9e, 0x49, 0x82, 0x53, 0x5e, 0x67, 0x12, 0xce,
|
0x39, 0xce, 0xf7, 0xa1, 0x4e, 0x2c, 0xcf, 0x24, 0xc1, 0x29, 0xaf, 0x33, 0x09, 0x67, 0x07, 0x96,
|
||||||
0x0b, 0x58, 0x7d, 0x16, 0x04, 0xdf, 0x55, 0xcb, 0x9e, 0x6d, 0xc2, 0x7c, 0x57, 0x45, 0x8f, 0x61,
|
0x9f, 0x07, 0xc1, 0x77, 0xd5, 0xb2, 0x67, 0x9b, 0x30, 0xdf, 0x55, 0xd1, 0x13, 0x58, 0xd6, 0x7e,
|
||||||
0x55, 0xfb, 0xa5, 0xfd, 0xb4, 0x5a, 0xbe, 0x0f, 0x35, 0x66, 0x63, 0x52, 0xca, 0xba, 0x56, 0x46,
|
0x69, 0x3f, 0xad, 0x96, 0x1f, 0x40, 0x8d, 0xd9, 0x98, 0x94, 0xb2, 0xae, 0x95, 0x11, 0x32, 0x73,
|
||||||
0xc8, 0x8c, 0xc9, 0xc3, 0x22, 0x1f, 0xdf, 0xd9, 0x92, 0xda, 0xc3, 0xb2, 0x0a, 0x1d, 0x39, 0x50,
|
0xf2, 0xb2, 0xc8, 0xc7, 0x77, 0xb6, 0xa5, 0xf6, 0xb2, 0x2c, 0x43, 0x47, 0x4e, 0x14, 0x74, 0x3a,
|
||||||
0xd0, 0xe9, 0xfc, 0x06, 0x56, 0x5f, 0x47, 0x23, 0x12, 0xe1, 0x9d, 0xfe, 0xe1, 0x2b, 0x9c, 0x66,
|
0xbf, 0x85, 0xe5, 0x57, 0xd1, 0x88, 0x44, 0x78, 0xbb, 0x7f, 0xf8, 0x12, 0xa7, 0xd9, 0x16, 0x41,
|
||||||
0x5b, 0x04, 0x55, 0x59, 0x4a, 0x29, 0x43, 0x8b, 0xae, 0xfa, 0x96, 0xe9, 0x27, 0x3a, 0x1a, 0xf8,
|
0x55, 0x96, 0x52, 0xca, 0xd0, 0x82, 0xab, 0xc6, 0x32, 0xfd, 0x44, 0x47, 0x03, 0x3f, 0x4e, 0xb8,
|
||||||
0x71, 0xc2, 0x4d, 0x9b, 0xa8, 0x16, 0x1d, 0xed, 0xc4, 0x09, 0x47, 0x37, 0x40, 0x5e, 0xe9, 0x03,
|
0x69, 0x13, 0xd5, 0xa2, 0xa3, 0xed, 0x38, 0xe1, 0xe8, 0x03, 0x90, 0x9f, 0xf4, 0x01, 0x8d, 0x46,
|
||||||
0x1a, 0x8d, 0xc6, 0x6a, 0xf5, 0x17, 0xdd, 0xba, 0x1f, 0x27, 0xaf, 0xa3, 0xd1, 0xd8, 0xf9, 0x91,
|
0x63, 0xb5, 0xfb, 0x0b, 0xee, 0xbc, 0x1f, 0x27, 0xaf, 0xa2, 0xd1, 0xd8, 0xf9, 0xb1, 0x7a, 0x18,
|
||||||
0x7a, 0x18, 0x63, 0x1c, 0xb8, 0x5e, 0x14, 0xd0, 0xf0, 0x05, 0x3e, 0xcd, 0x59, 0x48, 0x1f, 0x61,
|
0x63, 0x1c, 0xb8, 0x5e, 0x14, 0xd0, 0x70, 0x07, 0x9f, 0xe6, 0x2c, 0xa4, 0x8f, 0x30, 0x9b, 0x6b,
|
||||||
0x36, 0xd7, 0x7e, 0x5b, 0x82, 0xba, 0xc9, 0x20, 0x6a, 0x43, 0x31, 0x72, 0x8a, 0x59, 0x7a, 0xd2,
|
0x9f, 0xc0, 0xf2, 0x9e, 0x9c, 0xdc, 0xc1, 0xc2, 0x23, 0xa3, 0x9c, 0xdf, 0xed, 0x10, 0x87, 0x83,
|
||||||
0x14, 0x25, 0xdf, 0x89, 0xfa, 0x6b, 0x40, 0x63, 0x41, 0x68, 0x9a, 0x97, 0xda, 0x9a, 0xfb, 0x5a,
|
0xa3, 0x11, 0xf5, 0xdf, 0x0e, 0x64, 0xce, 0x34, 0xb0, 0x64, 0xf1, 0xb0, 0x25, 0x99, 0x07, 0xe4,
|
||||||
0x33, 0x73, 0xfb, 0xae, 0x52, 0xd8, 0x77, 0xeb, 0x50, 0x3b, 0xe6, 0x62, 0x1c, 0xa7, 0xfb, 0x51,
|
0x1b, 0x19, 0xfd, 0x95, 0xe2, 0x62, 0x93, 0xd4, 0x1f, 0xc0, 0x4a, 0x71, 0xb5, 0x2e, 0xaa, 0x4d,
|
||||||
0x53, 0x72, 0x67, 0x5b, 0x7d, 0x0b, 0x4a, 0x9f, 0x25, 0xe5, 0x8b, 0x3c, 0xa4, 0x49, 0x24, 0x06,
|
0xf5, 0xd3, 0xc9, 0xeb, 0x50, 0x65, 0xb3, 0xcc, 0x91, 0xf3, 0x26, 0x8f, 0xa9, 0x63, 0xcd, 0xc8,
|
||||||
0x31, 0x25, 0x91, 0x30, 0xa7, 0x0d, 0x14, 0xab, 0x2f, 0x39, 0xce, 0xef, 0x4b, 0x50, 0xd3, 0x9d,
|
0x29, 0x66, 0xe9, 0x7d, 0x57, 0x94, 0x7c, 0xad, 0xea, 0xd1, 0x80, 0xc6, 0x82, 0xd0, 0x34, 0x3b,
|
||||||
0x43, 0x59, 0xf1, 0xa7, 0xa9, 0xbb, 0x4c, 0xd4, 0x35, 0xa8, 0x6c, 0x99, 0xb4, 0xa0, 0x2c, 0x5d,
|
0xb6, 0x34, 0xf7, 0x95, 0x66, 0xe6, 0x4e, 0x7f, 0xa5, 0x70, 0xfa, 0x57, 0xa1, 0x76, 0xcc, 0xc5,
|
||||||
0x87, 0xfa, 0x69, 0x38, 0x88, 0x3d, 0x71, 0x62, 0xa1, 0x9d, 0x86, 0x7d, 0x4f, 0x9c, 0x48, 0xcf,
|
0x38, 0x4e, 0x6f, 0x85, 0xa6, 0xe4, 0xfd, 0xb2, 0xfa, 0xe6, 0x94, 0x3e, 0x4b, 0xa2, 0x0f, 0xa1,
|
||||||
0xb2, 0x1b, 0x40, 0x8d, 0x6b, 0x88, 0xed, 0x94, 0xab, 0xc4, 0xe6, 0x22, 0x75, 0x7e, 0x25, 0x1f,
|
0x11, 0xd2, 0x24, 0x12, 0x83, 0x98, 0x92, 0x48, 0x98, 0x3b, 0x0f, 0x8a, 0xd5, 0x97, 0x1c, 0xe7,
|
||||||
0x3a, 0x69, 0xd7, 0x6c, 0x05, 0x2a, 0x49, 0x0a, 0x46, 0x7e, 0x4a, 0xce, 0x30, 0xbd, 0x3b, 0xe4,
|
0xf7, 0x25, 0xa8, 0xe9, 0xfe, 0xa5, 0x7c, 0x77, 0xa4, 0x1f, 0x90, 0x32, 0x51, 0x1f, 0x63, 0x65,
|
||||||
0x27, 0xba, 0x0b, 0x4b, 0x5e, 0x10, 0x10, 0x39, 0xdd, 0x1b, 0xed, 0x91, 0xc0, 0xb6, 0x7e, 0x26,
|
0xcb, 0x24, 0x27, 0x65, 0xe9, 0x26, 0xcc, 0x9f, 0x86, 0x83, 0xd8, 0x13, 0x27, 0x16, 0xda, 0x69,
|
||||||
0xb8, 0x9f, 0xf4, 0x60, 0xd1, 0xa6, 0x51, 0x54, 0x83, 0xf2, 0xe9, 0xc3, 0x95, 0xff, 0x53, 0xff,
|
0xd8, 0xf7, 0xc4, 0x89, 0xf4, 0x2c, 0xfb, 0x0e, 0xa9, 0x79, 0x0d, 0xb1, 0x95, 0x72, 0x95, 0xd8,
|
||||||
0x3f, 0x5e, 0x29, 0x6d, 0xff, 0xb3, 0x0d, 0xad, 0x67, 0x43, 0x1c, 0x09, 0x53, 0x96, 0xa3, 0x3d,
|
0x85, 0x48, 0x9d, 0x5f, 0xcb, 0xe7, 0x56, 0xda, 0xbb, 0x5b, 0x82, 0x4a, 0x92, 0x82, 0x91, 0x43,
|
||||||
0x58, 0x9e, 0x68, 0xf3, 0x22, 0xf3, 0x4e, 0x9b, 0xdd, 0xfd, 0xed, 0xad, 0x6f, 0xe9, 0xb6, 0xf1,
|
0xc9, 0x19, 0xa6, 0x5f, 0x30, 0x39, 0x44, 0xf7, 0xa0, 0xed, 0x05, 0x01, 0x91, 0xcb, 0xbd, 0xd1,
|
||||||
0x96, 0x6d, 0x1b, 0x6f, 0xed, 0x86, 0xb1, 0x18, 0xa3, 0x5d, 0x58, 0x2a, 0x36, 0x44, 0xd1, 0x4d,
|
0x1e, 0x09, 0x6c, 0x03, 0x6a, 0x82, 0xfb, 0x49, 0x0f, 0x16, 0x6c, 0x32, 0x47, 0x35, 0x28, 0x9f,
|
||||||
0x7b, 0xcb, 0xcc, 0x68, 0x93, 0xce, 0x55, 0xb3, 0x07, 0xcb, 0x13, 0xbd, 0x51, 0x8b, 0x67, 0x76,
|
0x3e, 0x5a, 0xfa, 0x9e, 0xfa, 0xff, 0xc9, 0x52, 0x69, 0xf3, 0xaf, 0x6d, 0x68, 0x3e, 0x1f, 0xe2,
|
||||||
0xcb, 0x74, 0xae, 0xa2, 0xa7, 0xd0, 0xcc, 0x35, 0x43, 0x51, 0x57, 0x2b, 0x99, 0xee, 0x8f, 0xce,
|
0x48, 0x98, 0xc7, 0x01, 0xda, 0x83, 0xc5, 0x89, 0x66, 0x33, 0x32, 0xaf, 0xc5, 0xd9, 0x3d, 0xe8,
|
||||||
0x55, 0xb0, 0x03, 0xed, 0x42, 0x7f, 0x12, 0xf5, 0x8c, 0x3f, 0x33, 0x9a, 0x96, 0x73, 0x95, 0x3c,
|
0xde, 0xea, 0x86, 0x6e, 0x5e, 0x6f, 0xd8, 0xe6, 0xf5, 0xc6, 0x6e, 0x18, 0x8b, 0x31, 0xda, 0x85,
|
||||||
0x87, 0x66, 0xae, 0x4d, 0x68, 0x51, 0x4c, 0xf7, 0x22, 0x7b, 0x37, 0x66, 0x8c, 0x98, 0x42, 0xe7,
|
0x76, 0xb1, 0x2d, 0x8b, 0x6e, 0xd9, 0x6f, 0xdd, 0x8c, 0x66, 0xed, 0x85, 0x6a, 0xf6, 0x60, 0x71,
|
||||||
0x25, 0xb4, 0x0b, 0x4d, 0x3d, 0x0b, 0x64, 0x56, 0x43, 0xb1, 0x77, 0x73, 0xe6, 0x98, 0xd1, 0xb4,
|
0xa2, 0x43, 0x6b, 0xf1, 0xcc, 0x6e, 0xdc, 0x5e, 0xa8, 0xe8, 0x19, 0x34, 0x72, 0x2d, 0x59, 0xd4,
|
||||||
0x07, 0xcb, 0x13, 0x2d, 0x3e, 0x1b, 0xdc, 0xd9, 0x9d, 0xbf, 0xb9, 0x6e, 0x7d, 0xa1, 0x16, 0x3b,
|
0xd5, 0x4a, 0xa6, 0xbb, 0xb4, 0x17, 0x2a, 0xd8, 0x86, 0x56, 0xa1, 0x4b, 0x8a, 0x7a, 0xc6, 0x9f,
|
||||||
0x57, 0xd3, 0xe6, 0x16, 0x7b, 0xba, 0xa1, 0xd7, 0xbb, 0x35, 0x7b, 0xd0, 0xa0, 0xda, 0x85, 0xa5,
|
0x19, 0xad, 0xd3, 0x0b, 0x95, 0x6c, 0x41, 0x23, 0xd7, 0xac, 0xb4, 0x28, 0xa6, 0x3b, 0xa2, 0xbd,
|
||||||
0x62, 0x2f, 0xcf, 0x2a, 0x9b, 0xd9, 0xe1, 0xbb, 0x78, 0xe7, 0x14, 0xda, 0x7a, 0xd9, 0xce, 0x99,
|
0x0f, 0x66, 0xcc, 0x98, 0x9b, 0xf9, 0x02, 0x5a, 0x85, 0xd6, 0xa2, 0x05, 0x32, 0xab, 0xad, 0xd9,
|
||||||
0xd5, 0xed, 0x9b, 0xab, 0xe8, 0x19, 0x80, 0x29, 0x7d, 0x03, 0x12, 0xa5, 0x4b, 0x36, 0x55, 0x72,
|
0xbb, 0x35, 0x73, 0xce, 0x68, 0xda, 0x83, 0xc5, 0x89, 0x46, 0xa3, 0x0d, 0xee, 0xec, 0xfe, 0xe3,
|
||||||
0xa7, 0x4b, 0x36, 0xa3, 0x4c, 0x7e, 0x0a, 0xa0, 0x2b, 0xd6, 0x80, 0x26, 0x02, 0x5d, 0xb7, 0x30,
|
0x85, 0x6e, 0x7d, 0xa1, 0x36, 0x3b, 0x57, 0x59, 0xe7, 0x36, 0x7b, 0xba, 0xad, 0xd8, 0xbb, 0x3d,
|
||||||
0x26, 0xca, 0xe4, 0x5e, 0x77, 0x7a, 0x60, 0x4a, 0x01, 0x66, 0xec, 0x32, 0x0a, 0x9e, 0x00, 0x64,
|
0x7b, 0xd2, 0xa0, 0xda, 0x85, 0x76, 0xb1, 0xa3, 0x68, 0x95, 0xcd, 0xec, 0x33, 0x5e, 0x7e, 0x72,
|
||||||
0x95, 0xb0, 0x55, 0x30, 0x55, 0x1b, 0x5f, 0x10, 0x83, 0x56, 0xbe, 0xee, 0x45, 0xc6, 0xd7, 0x19,
|
0x0a, 0xcd, 0xc5, 0xec, 0xe4, 0xcc, 0xea, 0x39, 0x5e, 0xa8, 0xe8, 0x39, 0x80, 0x29, 0xc0, 0x03,
|
||||||
0xb5, 0xf0, 0x5c, 0x15, 0x8f, 0xa0, 0x95, 0xbf, 0xa6, 0xad, 0x8a, 0x19, 0x57, 0x77, 0x6f, 0xf2,
|
0x12, 0xa5, 0x5b, 0x36, 0x55, 0xf8, 0xa7, 0x5b, 0x36, 0xa3, 0x58, 0x7f, 0x06, 0xa0, 0xeb, 0xe6,
|
||||||
0x7a, 0x45, 0x3f, 0xb5, 0x1b, 0x35, 0x63, 0x15, 0x36, 0xea, 0x7f, 0xa4, 0x61, 0xe2, 0x7a, 0x2f,
|
0x80, 0x26, 0x02, 0xdd, 0xb4, 0x30, 0x26, 0x8a, 0xf5, 0x5e, 0x77, 0x7a, 0x62, 0x4a, 0x01, 0x66,
|
||||||
0xe6, 0x91, 0x0f, 0x6b, 0xf8, 0x09, 0xb4, 0xf2, 0xf7, 0xba, 0xc5, 0x3f, 0xe3, 0xae, 0xef, 0x15,
|
0xec, 0x2a, 0x0a, 0x9e, 0x02, 0x64, 0xf5, 0xb8, 0x55, 0x30, 0x55, 0xa1, 0x5f, 0x12, 0x83, 0x66,
|
||||||
0xee, 0x76, 0xf4, 0x14, 0x96, 0x8a, 0x77, 0x3a, 0xca, 0x1d, 0xca, 0xa9, 0x9b, 0xbe, 0xb7, 0x32,
|
0xbe, 0xfa, 0x46, 0xc6, 0xd7, 0x19, 0x15, 0xf9, 0x85, 0x2a, 0x1e, 0x43, 0x33, 0x5f, 0x2c, 0x58,
|
||||||
0x61, 0x98, 0xa3, 0x07, 0x00, 0xd9, 0xdd, 0x6f, 0xd7, 0x6e, 0xaa, 0x1a, 0x98, 0xb0, 0xba, 0x03,
|
0x15, 0x33, 0x0a, 0x88, 0xde, 0xe4, 0x47, 0x1e, 0xfd, 0xcc, 0x1e, 0xd4, 0x8c, 0x55, 0x38, 0xa8,
|
||||||
0xed, 0xc2, 0x5b, 0xc1, 0x66, 0x89, 0x59, 0x0f, 0x88, 0x8b, 0x92, 0x78, 0xb1, 0x76, 0xb7, 0xd0,
|
0xff, 0x95, 0x86, 0x89, 0x22, 0xa3, 0x98, 0x47, 0xde, 0xaf, 0xe1, 0xa7, 0xd0, 0xcc, 0x57, 0x17,
|
||||||
0x67, 0x56, 0xf4, 0x17, 0xed, 0x9e, 0x7c, 0x9d, 0x62, 0x43, 0x37, 0xa3, 0x76, 0xf9, 0xc0, 0x69,
|
0x16, 0xff, 0x8c, 0x8a, 0xa3, 0x57, 0xa8, 0x30, 0xd0, 0x33, 0x68, 0x17, 0x2b, 0x0b, 0x94, 0xbb,
|
||||||
0xce, 0xd7, 0x22, 0xb9, 0xd3, 0x3c, 0xa3, 0x44, 0x99, 0xa7, 0xe8, 0x79, 0xeb, 0xaf, 0xef, 0x6f,
|
0x94, 0x53, 0xf5, 0x46, 0x6f, 0x69, 0xc2, 0x30, 0x47, 0x0f, 0x01, 0xb2, 0x0a, 0xc4, 0xee, 0xdd,
|
||||||
0x97, 0xfe, 0xf6, 0xfe, 0x76, 0xe9, 0x1f, 0xef, 0x6f, 0x97, 0x8e, 0x6a, 0x6a, 0xf4, 0xc1, 0xbf,
|
0x54, 0x4d, 0x32, 0x61, 0x75, 0x1b, 0x5a, 0x85, 0x17, 0x8b, 0xcd, 0x12, 0xb3, 0x9e, 0x31, 0x97,
|
||||||
0x03, 0x00, 0x00, 0xff, 0xff, 0x9d, 0xb6, 0xaf, 0x46, 0x1a, 0x1d, 0x00, 0x00,
|
0x25, 0xf1, 0xe2, 0x0b, 0xc2, 0x42, 0x9f, 0xf9, 0xae, 0xb8, 0xec, 0xf4, 0xe4, 0xab, 0x25, 0x1b,
|
||||||
|
0xba, 0x19, 0x15, 0xd4, 0x7b, 0x6e, 0x73, 0xbe, 0x22, 0xca, 0xdd, 0xe6, 0x19, 0x85, 0xd2, 0x85,
|
||||||
|
0x8a, 0x5e, 0xc0, 0xe2, 0x1e, 0x16, 0xf9, 0x92, 0xc7, 0xc2, 0x99, 0x51, 0x43, 0xf5, 0x7a, 0xb3,
|
||||||
|
0xa6, 0xf4, 0x95, 0xda, 0x6a, 0xfe, 0xfd, 0xdd, 0x9d, 0xd2, 0x3f, 0xde, 0xdd, 0x29, 0xfd, 0xeb,
|
||||||
|
0xdd, 0x9d, 0xd2, 0x51, 0x4d, 0xd9, 0x79, 0xf8, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x40, 0x35,
|
||||||
|
0xb4, 0xc7, 0xea, 0x1d, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|||||||
3
vendor/google.golang.org/grpc/balancer.go
generated
vendored
3
vendor/google.golang.org/grpc/balancer.go
generated
vendored
@@ -28,6 +28,7 @@ import (
|
|||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/naming"
|
"google.golang.org/grpc/naming"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Address represents a server the client connects to.
|
// Address represents a server the client connects to.
|
||||||
@@ -310,7 +311,7 @@ func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Ad
|
|||||||
if !opts.BlockingWait {
|
if !opts.BlockingWait {
|
||||||
if len(rr.addrs) == 0 {
|
if len(rr.addrs) == 0 {
|
||||||
rr.mu.Unlock()
|
rr.mu.Unlock()
|
||||||
err = Errorf(codes.Unavailable, "there is no address available")
|
err = status.Errorf(codes.Unavailable, "there is no address available")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Returns the next addr on rr.addrs for failfast RPCs.
|
// Returns the next addr on rr.addrs for failfast RPCs.
|
||||||
|
|||||||
26
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
26
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
@@ -23,6 +23,7 @@ package balancer
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
@@ -36,15 +37,17 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Register registers the balancer builder to the balancer map.
|
// Register registers the balancer builder to the balancer map.
|
||||||
// b.Name will be used as the name registered with this builder.
|
// b.Name (lowercased) will be used as the name registered with
|
||||||
|
// this builder.
|
||||||
func Register(b Builder) {
|
func Register(b Builder) {
|
||||||
m[b.Name()] = b
|
m[strings.ToLower(b.Name())] = b
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the resolver builder registered with the given name.
|
// Get returns the resolver builder registered with the given name.
|
||||||
|
// Note that the compare is done in a case-insenstive fashion.
|
||||||
// If no builder is register with the name, nil will be returned.
|
// If no builder is register with the name, nil will be returned.
|
||||||
func Get(name string) Builder {
|
func Get(name string) Builder {
|
||||||
if b, ok := m[name]; ok {
|
if b, ok := m[strings.ToLower(name)]; ok {
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -63,6 +66,11 @@ func Get(name string) Builder {
|
|||||||
// When the connection encounters an error, it will reconnect immediately.
|
// When the connection encounters an error, it will reconnect immediately.
|
||||||
// When the connection becomes IDLE, it will not reconnect unless Connect is
|
// When the connection becomes IDLE, it will not reconnect unless Connect is
|
||||||
// called.
|
// called.
|
||||||
|
//
|
||||||
|
// This interface is to be implemented by gRPC. Users should not need a
|
||||||
|
// brand new implementation of this interface. For the situations like
|
||||||
|
// testing, the new implementation should embed this interface. This allows
|
||||||
|
// gRPC to add new methods to this interface.
|
||||||
type SubConn interface {
|
type SubConn interface {
|
||||||
// UpdateAddresses updates the addresses used in this SubConn.
|
// UpdateAddresses updates the addresses used in this SubConn.
|
||||||
// gRPC checks if currently-connected address is still in the new list.
|
// gRPC checks if currently-connected address is still in the new list.
|
||||||
@@ -80,6 +88,11 @@ type SubConn interface {
|
|||||||
type NewSubConnOptions struct{}
|
type NewSubConnOptions struct{}
|
||||||
|
|
||||||
// ClientConn represents a gRPC ClientConn.
|
// ClientConn represents a gRPC ClientConn.
|
||||||
|
//
|
||||||
|
// This interface is to be implemented by gRPC. Users should not need a
|
||||||
|
// brand new implementation of this interface. For the situations like
|
||||||
|
// testing, the new implementation should embed this interface. This allows
|
||||||
|
// gRPC to add new methods to this interface.
|
||||||
type ClientConn interface {
|
type ClientConn interface {
|
||||||
// NewSubConn is called by balancer to create a new SubConn.
|
// NewSubConn is called by balancer to create a new SubConn.
|
||||||
// It doesn't block and wait for the connections to be established.
|
// It doesn't block and wait for the connections to be established.
|
||||||
@@ -96,6 +109,9 @@ type ClientConn interface {
|
|||||||
// on the new picker to pick new SubConn.
|
// on the new picker to pick new SubConn.
|
||||||
UpdateBalancerState(s connectivity.State, p Picker)
|
UpdateBalancerState(s connectivity.State, p Picker)
|
||||||
|
|
||||||
|
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
|
||||||
|
ResolveNow(resolver.ResolveNowOption)
|
||||||
|
|
||||||
// Target returns the dial target for this ClientConn.
|
// Target returns the dial target for this ClientConn.
|
||||||
Target() string
|
Target() string
|
||||||
}
|
}
|
||||||
@@ -128,6 +144,10 @@ type PickOptions struct{}
|
|||||||
type DoneInfo struct {
|
type DoneInfo struct {
|
||||||
// Err is the rpc error the RPC finished with. It could be nil.
|
// Err is the rpc error the RPC finished with. It could be nil.
|
||||||
Err error
|
Err error
|
||||||
|
// BytesSent indicates if any bytes have been sent to the server.
|
||||||
|
BytesSent bool
|
||||||
|
// BytesReceived indicates if any byte has been received from the server.
|
||||||
|
BytesReceived bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
209
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
Normal file
209
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2017 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package base
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/connectivity"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
)
|
||||||
|
|
||||||
|
type baseBuilder struct {
|
||||||
|
name string
|
||||||
|
pickerBuilder PickerBuilder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||||
|
return &baseBalancer{
|
||||||
|
cc: cc,
|
||||||
|
pickerBuilder: bb.pickerBuilder,
|
||||||
|
|
||||||
|
subConns: make(map[resolver.Address]balancer.SubConn),
|
||||||
|
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||||
|
csEvltr: &connectivityStateEvaluator{},
|
||||||
|
// Initialize picker to a picker that always return
|
||||||
|
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||||
|
// may call UpdateBalancerState with this picker.
|
||||||
|
picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bb *baseBuilder) Name() string {
|
||||||
|
return bb.name
|
||||||
|
}
|
||||||
|
|
||||||
|
type baseBalancer struct {
|
||||||
|
cc balancer.ClientConn
|
||||||
|
pickerBuilder PickerBuilder
|
||||||
|
|
||||||
|
csEvltr *connectivityStateEvaluator
|
||||||
|
state connectivity.State
|
||||||
|
|
||||||
|
subConns map[resolver.Address]balancer.SubConn
|
||||||
|
scStates map[balancer.SubConn]connectivity.State
|
||||||
|
picker balancer.Picker
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs)
|
||||||
|
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
|
||||||
|
addrsSet := make(map[resolver.Address]struct{})
|
||||||
|
for _, a := range addrs {
|
||||||
|
addrsSet[a] = struct{}{}
|
||||||
|
if _, ok := b.subConns[a]; !ok {
|
||||||
|
// a is a new address (not existing in b.subConns).
|
||||||
|
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
b.subConns[a] = sc
|
||||||
|
b.scStates[sc] = connectivity.Idle
|
||||||
|
sc.Connect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for a, sc := range b.subConns {
|
||||||
|
// a was removed by resolver.
|
||||||
|
if _, ok := addrsSet[a]; !ok {
|
||||||
|
b.cc.RemoveSubConn(sc)
|
||||||
|
delete(b.subConns, a)
|
||||||
|
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
||||||
|
// The entry will be deleted in HandleSubConnStateChange.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// regeneratePicker takes a snapshot of the balancer, and generates a picker
|
||||||
|
// from it. The picker is
|
||||||
|
// - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
|
||||||
|
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||||
|
func (b *baseBalancer) regeneratePicker() {
|
||||||
|
if b.state == connectivity.TransientFailure {
|
||||||
|
b.picker = NewErrPicker(balancer.ErrTransientFailure)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
readySCs := make(map[resolver.Address]balancer.SubConn)
|
||||||
|
|
||||||
|
// Filter out all ready SCs from full subConn map.
|
||||||
|
for addr, sc := range b.subConns {
|
||||||
|
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||||
|
readySCs[addr] = sc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.picker = b.pickerBuilder.Build(readySCs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||||
|
grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
|
||||||
|
oldS, ok := b.scStates[sc]
|
||||||
|
if !ok {
|
||||||
|
grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.scStates[sc] = s
|
||||||
|
switch s {
|
||||||
|
case connectivity.Idle:
|
||||||
|
sc.Connect()
|
||||||
|
case connectivity.Shutdown:
|
||||||
|
// When an address was removed by resolver, b called RemoveSubConn but
|
||||||
|
// kept the sc's state in scStates. Remove state for this sc here.
|
||||||
|
delete(b.scStates, sc)
|
||||||
|
}
|
||||||
|
|
||||||
|
oldAggrState := b.state
|
||||||
|
b.state = b.csEvltr.recordTransition(oldS, s)
|
||||||
|
|
||||||
|
// Regenerate picker when one of the following happens:
|
||||||
|
// - this sc became ready from not-ready
|
||||||
|
// - this sc became not-ready from ready
|
||||||
|
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
|
||||||
|
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
|
||||||
|
if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
|
||||||
|
(b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
|
||||||
|
b.regeneratePicker()
|
||||||
|
}
|
||||||
|
|
||||||
|
b.cc.UpdateBalancerState(b.state, b.picker)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||||
|
// and it doesn't need to call RemoveSubConn for the SubConns.
|
||||||
|
func (b *baseBalancer) Close() {
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewErrPicker returns a picker that always returns err on Pick().
|
||||||
|
func NewErrPicker(err error) balancer.Picker {
|
||||||
|
return &errPicker{err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
type errPicker struct {
|
||||||
|
err error // Pick() always returns this err.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||||
|
return nil, nil, p.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// connectivityStateEvaluator gets updated by addrConns when their
|
||||||
|
// states transition, based on which it evaluates the state of
|
||||||
|
// ClientConn.
|
||||||
|
type connectivityStateEvaluator struct {
|
||||||
|
numReady uint64 // Number of addrConns in ready state.
|
||||||
|
numConnecting uint64 // Number of addrConns in connecting state.
|
||||||
|
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
||||||
|
}
|
||||||
|
|
||||||
|
// recordTransition records state change happening in every subConn and based on
|
||||||
|
// that it evaluates what aggregated state should be.
|
||||||
|
// It can only transition between Ready, Connecting and TransientFailure. Other states,
|
||||||
|
// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
|
||||||
|
// before any subConn is created ClientConn is in idle state. In the end when ClientConn
|
||||||
|
// closes it is in Shutdown state.
|
||||||
|
//
|
||||||
|
// recordTransition should only be called synchronously from the same goroutine.
|
||||||
|
func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||||
|
// Update counters.
|
||||||
|
for idx, state := range []connectivity.State{oldState, newState} {
|
||||||
|
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||||
|
switch state {
|
||||||
|
case connectivity.Ready:
|
||||||
|
cse.numReady += updateVal
|
||||||
|
case connectivity.Connecting:
|
||||||
|
cse.numConnecting += updateVal
|
||||||
|
case connectivity.TransientFailure:
|
||||||
|
cse.numTransientFailure += updateVal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evaluate.
|
||||||
|
if cse.numReady > 0 {
|
||||||
|
return connectivity.Ready
|
||||||
|
}
|
||||||
|
if cse.numConnecting > 0 {
|
||||||
|
return connectivity.Connecting
|
||||||
|
}
|
||||||
|
return connectivity.TransientFailure
|
||||||
|
}
|
||||||
52
vendor/google.golang.org/grpc/balancer/base/base.go
generated
vendored
Normal file
52
vendor/google.golang.org/grpc/balancer/base/base.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2017 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package base defines a balancer base that can be used to build balancers with
|
||||||
|
// different picking algorithms.
|
||||||
|
//
|
||||||
|
// The base balancer creates a new SubConn for each resolved address. The
|
||||||
|
// provided picker will only be notified about READY SubConns.
|
||||||
|
//
|
||||||
|
// This package is the base of round_robin balancer, its purpose is to be used
|
||||||
|
// to build round_robin like balancers with complex picking algorithms.
|
||||||
|
// Balancers with more complicated logic should try to implement a balancer
|
||||||
|
// builder from scratch.
|
||||||
|
//
|
||||||
|
// All APIs in this package are experimental.
|
||||||
|
package base
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PickerBuilder creates balancer.Picker.
|
||||||
|
type PickerBuilder interface {
|
||||||
|
// Build takes a slice of ready SubConns, and returns a picker that will be
|
||||||
|
// used by gRPC to pick a SubConn.
|
||||||
|
Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBalancerBuilder returns a balancer builder. The balancers
|
||||||
|
// built by this builder will use the picker builder to build pickers.
|
||||||
|
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
|
||||||
|
return &baseBuilder{
|
||||||
|
name: name,
|
||||||
|
pickerBuilder: pb,
|
||||||
|
}
|
||||||
|
}
|
||||||
194
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
194
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
@@ -26,145 +26,37 @@ import (
|
|||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/balancer/base"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Name is the name of round_robin balancer.
|
||||||
|
const Name = "round_robin"
|
||||||
|
|
||||||
// newBuilder creates a new roundrobin balancer builder.
|
// newBuilder creates a new roundrobin balancer builder.
|
||||||
func newBuilder() balancer.Builder {
|
func newBuilder() balancer.Builder {
|
||||||
return &rrBuilder{}
|
return base.NewBalancerBuilder(Name, &rrPickerBuilder{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
balancer.Register(newBuilder())
|
balancer.Register(newBuilder())
|
||||||
}
|
}
|
||||||
|
|
||||||
type rrBuilder struct{}
|
type rrPickerBuilder struct{}
|
||||||
|
|
||||||
func (*rrBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
|
||||||
return &rrBalancer{
|
grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
|
||||||
cc: cc,
|
var scs []balancer.SubConn
|
||||||
subConns: make(map[resolver.Address]balancer.SubConn),
|
for _, sc := range readySCs {
|
||||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
scs = append(scs, sc)
|
||||||
csEvltr: &connectivityStateEvaluator{},
|
}
|
||||||
// Initialize picker to a picker that always return
|
return &rrPicker{
|
||||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
subConns: scs,
|
||||||
// may call UpdateBalancerState with this picker.
|
|
||||||
picker: newPicker([]balancer.SubConn{}, nil),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*rrBuilder) Name() string {
|
type rrPicker struct {
|
||||||
return "round_robin"
|
|
||||||
}
|
|
||||||
|
|
||||||
type rrBalancer struct {
|
|
||||||
cc balancer.ClientConn
|
|
||||||
|
|
||||||
csEvltr *connectivityStateEvaluator
|
|
||||||
state connectivity.State
|
|
||||||
|
|
||||||
subConns map[resolver.Address]balancer.SubConn
|
|
||||||
scStates map[balancer.SubConn]connectivity.State
|
|
||||||
picker *picker
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *rrBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Infof("roundrobin.rrBalancer: HandleResolvedAddrs called with error %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
grpclog.Infoln("roundrobin.rrBalancer: got new resolved addresses: ", addrs)
|
|
||||||
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
|
|
||||||
addrsSet := make(map[resolver.Address]struct{})
|
|
||||||
for _, a := range addrs {
|
|
||||||
addrsSet[a] = struct{}{}
|
|
||||||
if _, ok := b.subConns[a]; !ok {
|
|
||||||
// a is a new address (not existing in b.subConns).
|
|
||||||
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Warningf("roundrobin.rrBalancer: failed to create new SubConn: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
b.subConns[a] = sc
|
|
||||||
b.scStates[sc] = connectivity.Idle
|
|
||||||
sc.Connect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for a, sc := range b.subConns {
|
|
||||||
// a was removed by resolver.
|
|
||||||
if _, ok := addrsSet[a]; !ok {
|
|
||||||
b.cc.RemoveSubConn(sc)
|
|
||||||
delete(b.subConns, a)
|
|
||||||
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
|
||||||
// The entry will be deleted in HandleSubConnStateChange.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// regeneratePicker takes a snapshot of the balancer, and generates a picker
|
|
||||||
// from it. The picker
|
|
||||||
// - always returns ErrTransientFailure if the balancer is in TransientFailure,
|
|
||||||
// - or does round robin selection of all READY SubConns otherwise.
|
|
||||||
func (b *rrBalancer) regeneratePicker() {
|
|
||||||
if b.state == connectivity.TransientFailure {
|
|
||||||
b.picker = newPicker(nil, balancer.ErrTransientFailure)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var readySCs []balancer.SubConn
|
|
||||||
for sc, st := range b.scStates {
|
|
||||||
if st == connectivity.Ready {
|
|
||||||
readySCs = append(readySCs, sc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b.picker = newPicker(readySCs, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *rrBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
|
||||||
grpclog.Infof("roundrobin.rrBalancer: handle SubConn state change: %p, %v", sc, s)
|
|
||||||
oldS, ok := b.scStates[sc]
|
|
||||||
if !ok {
|
|
||||||
grpclog.Infof("roundrobin.rrBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.scStates[sc] = s
|
|
||||||
switch s {
|
|
||||||
case connectivity.Idle:
|
|
||||||
sc.Connect()
|
|
||||||
case connectivity.Shutdown:
|
|
||||||
// When an address was removed by resolver, b called RemoveSubConn but
|
|
||||||
// kept the sc's state in scStates. Remove state for this sc here.
|
|
||||||
delete(b.scStates, sc)
|
|
||||||
}
|
|
||||||
|
|
||||||
oldAggrState := b.state
|
|
||||||
b.state = b.csEvltr.recordTransition(oldS, s)
|
|
||||||
|
|
||||||
// Regenerate picker when one of the following happens:
|
|
||||||
// - this sc became ready from not-ready
|
|
||||||
// - this sc became not-ready from ready
|
|
||||||
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
|
|
||||||
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
|
|
||||||
if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
|
|
||||||
(b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
|
|
||||||
b.regeneratePicker()
|
|
||||||
}
|
|
||||||
|
|
||||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close is a nop because roundrobin balancer doesn't internal state to clean
|
|
||||||
// up, and it doesn't need to call RemoveSubConn for the SubConns.
|
|
||||||
func (b *rrBalancer) Close() {
|
|
||||||
}
|
|
||||||
|
|
||||||
type picker struct {
|
|
||||||
// If err is not nil, Pick always returns this err. It's immutable after
|
|
||||||
// picker is created.
|
|
||||||
err error
|
|
||||||
|
|
||||||
// subConns is the snapshot of the roundrobin balancer when this picker was
|
// subConns is the snapshot of the roundrobin balancer when this picker was
|
||||||
// created. The slice is immutable. Each Get() will do a round robin
|
// created. The slice is immutable. Each Get() will do a round robin
|
||||||
// selection from it and return the selected SubConn.
|
// selection from it and return the selected SubConn.
|
||||||
@@ -174,20 +66,7 @@ type picker struct {
|
|||||||
next int
|
next int
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPicker(scs []balancer.SubConn, err error) *picker {
|
func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||||
grpclog.Infof("roundrobinPicker: newPicker called with scs: %v, %v", scs, err)
|
|
||||||
if err != nil {
|
|
||||||
return &picker{err: err}
|
|
||||||
}
|
|
||||||
return &picker{
|
|
||||||
subConns: scs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
|
||||||
if p.err != nil {
|
|
||||||
return nil, nil, p.err
|
|
||||||
}
|
|
||||||
if len(p.subConns) <= 0 {
|
if len(p.subConns) <= 0 {
|
||||||
return nil, nil, balancer.ErrNoSubConnAvailable
|
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||||
}
|
}
|
||||||
@@ -198,44 +77,3 @@ func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.
|
|||||||
p.mu.Unlock()
|
p.mu.Unlock()
|
||||||
return sc, nil, nil
|
return sc, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// connectivityStateEvaluator gets updated by addrConns when their
|
|
||||||
// states transition, based on which it evaluates the state of
|
|
||||||
// ClientConn.
|
|
||||||
type connectivityStateEvaluator struct {
|
|
||||||
numReady uint64 // Number of addrConns in ready state.
|
|
||||||
numConnecting uint64 // Number of addrConns in connecting state.
|
|
||||||
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
|
||||||
}
|
|
||||||
|
|
||||||
// recordTransition records state change happening in every subConn and based on
|
|
||||||
// that it evaluates what aggregated state should be.
|
|
||||||
// It can only transition between Ready, Connecting and TransientFailure. Other states,
|
|
||||||
// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
|
|
||||||
// before any subConn is created ClientConn is in idle state. In the end when ClientConn
|
|
||||||
// closes it is in Shutdown state.
|
|
||||||
//
|
|
||||||
// recordTransition should only be called synchronously from the same goroutine.
|
|
||||||
func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
|
|
||||||
// Update counters.
|
|
||||||
for idx, state := range []connectivity.State{oldState, newState} {
|
|
||||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
|
||||||
switch state {
|
|
||||||
case connectivity.Ready:
|
|
||||||
cse.numReady += updateVal
|
|
||||||
case connectivity.Connecting:
|
|
||||||
cse.numConnecting += updateVal
|
|
||||||
case connectivity.TransientFailure:
|
|
||||||
cse.numTransientFailure += updateVal
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Evaluate.
|
|
||||||
if cse.numReady > 0 {
|
|
||||||
return connectivity.Ready
|
|
||||||
}
|
|
||||||
if cse.numConnecting > 0 {
|
|
||||||
return connectivity.Connecting
|
|
||||||
}
|
|
||||||
return connectivity.TransientFailure
|
|
||||||
}
|
|
||||||
|
|||||||
34
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
34
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
@@ -19,6 +19,7 @@
|
|||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
@@ -97,6 +98,7 @@ type ccBalancerWrapper struct {
|
|||||||
resolverUpdateCh chan *resolverUpdate
|
resolverUpdateCh chan *resolverUpdate
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
subConns map[*acBalancerWrapper]struct{}
|
subConns map[*acBalancerWrapper]struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -141,7 +143,11 @@ func (ccb *ccBalancerWrapper) watcher() {
|
|||||||
select {
|
select {
|
||||||
case <-ccb.done:
|
case <-ccb.done:
|
||||||
ccb.balancer.Close()
|
ccb.balancer.Close()
|
||||||
for acbw := range ccb.subConns {
|
ccb.mu.Lock()
|
||||||
|
scs := ccb.subConns
|
||||||
|
ccb.subConns = nil
|
||||||
|
ccb.mu.Unlock()
|
||||||
|
for acbw := range scs {
|
||||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@@ -183,6 +189,14 @@ func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||||
|
if len(addrs) <= 0 {
|
||||||
|
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
||||||
|
}
|
||||||
|
ccb.mu.Lock()
|
||||||
|
defer ccb.mu.Unlock()
|
||||||
|
if ccb.subConns == nil {
|
||||||
|
return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
|
||||||
|
}
|
||||||
ac, err := ccb.cc.newAddrConn(addrs)
|
ac, err := ccb.cc.newAddrConn(addrs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -200,15 +214,29 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
ccb.mu.Lock()
|
||||||
|
defer ccb.mu.Unlock()
|
||||||
|
if ccb.subConns == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
delete(ccb.subConns, acbw)
|
delete(ccb.subConns, acbw)
|
||||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
|
func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
|
||||||
|
ccb.mu.Lock()
|
||||||
|
defer ccb.mu.Unlock()
|
||||||
|
if ccb.subConns == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
ccb.cc.csMgr.updateState(s)
|
ccb.cc.csMgr.updateState(s)
|
||||||
ccb.cc.blockingpicker.updatePicker(p)
|
ccb.cc.blockingpicker.updatePicker(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
|
||||||
|
ccb.cc.resolveNow(o)
|
||||||
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) Target() string {
|
func (ccb *ccBalancerWrapper) Target() string {
|
||||||
return ccb.cc.target
|
return ccb.cc.target
|
||||||
}
|
}
|
||||||
@@ -223,6 +251,10 @@ type acBalancerWrapper struct {
|
|||||||
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||||
acbw.mu.Lock()
|
acbw.mu.Lock()
|
||||||
defer acbw.mu.Unlock()
|
defer acbw.mu.Unlock()
|
||||||
|
if len(addrs) <= 0 {
|
||||||
|
acbw.ac.tearDown(errConnDrain)
|
||||||
|
return
|
||||||
|
}
|
||||||
if !acbw.ac.tryUpdateAddrs(addrs) {
|
if !acbw.ac.tryUpdateAddrs(addrs) {
|
||||||
cc := acbw.ac.cc
|
cc := acbw.ac.cc
|
||||||
acbw.ac.mu.Lock()
|
acbw.ac.mu.Lock()
|
||||||
|
|||||||
7
vendor/google.golang.org/grpc/balancer_v1_wrapper.go
generated
vendored
7
vendor/google.golang.org/grpc/balancer_v1_wrapper.go
generated
vendored
@@ -28,6 +28,7 @@ import (
|
|||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
type balancerWrapperBuilder struct {
|
type balancerWrapperBuilder struct {
|
||||||
@@ -173,10 +174,10 @@ func (bw *balancerWrapper) lbWatcher() {
|
|||||||
sc.Connect()
|
sc.Connect()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
oldSC.UpdateAddresses(newAddrs)
|
|
||||||
bw.mu.Lock()
|
bw.mu.Lock()
|
||||||
bw.connSt[oldSC].addr = addrs[0]
|
bw.connSt[oldSC].addr = addrs[0]
|
||||||
bw.mu.Unlock()
|
bw.mu.Unlock()
|
||||||
|
oldSC.UpdateAddresses(newAddrs)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var (
|
var (
|
||||||
@@ -317,12 +318,12 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions)
|
|||||||
Metadata: a.Metadata,
|
Metadata: a.Metadata,
|
||||||
}]
|
}]
|
||||||
if !ok && failfast {
|
if !ok && failfast {
|
||||||
return nil, nil, Errorf(codes.Unavailable, "there is no connection available")
|
return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
|
||||||
}
|
}
|
||||||
if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
|
if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
|
||||||
// If the returned sc is not ready and RPC is failfast,
|
// If the returned sc is not ready and RPC is failfast,
|
||||||
// return error, and this RPC will fail.
|
// return error, and this RPC will fail.
|
||||||
return nil, nil, Errorf(codes.Unavailable, "there is no connection available")
|
return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
319
vendor/google.golang.org/grpc/call.go
generated
vendored
319
vendor/google.golang.org/grpc/call.go
generated
vendored
@@ -19,137 +19,39 @@
|
|||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/net/trace"
|
|
||||||
"google.golang.org/grpc/balancer"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/encoding"
|
|
||||||
"google.golang.org/grpc/peer"
|
|
||||||
"google.golang.org/grpc/stats"
|
|
||||||
"google.golang.org/grpc/transport"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// recvResponse receives and parses an RPC response.
|
|
||||||
// On error, it returns the error and indicates whether the call should be retried.
|
|
||||||
//
|
|
||||||
// TODO(zhaoq): Check whether the received message sequence is valid.
|
|
||||||
// TODO ctx is used for stats collection and processing. It is the context passed from the application.
|
|
||||||
func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) {
|
|
||||||
// Try to acquire header metadata from the server if there is any.
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
if _, ok := err.(transport.ConnectionError); !ok {
|
|
||||||
t.CloseStream(stream, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
c.headerMD, err = stream.Header()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p := &parser{r: stream}
|
|
||||||
var inPayload *stats.InPayload
|
|
||||||
if dopts.copts.StatsHandler != nil {
|
|
||||||
inPayload = &stats.InPayload{
|
|
||||||
Client: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
if c.maxReceiveMessageSize == nil {
|
|
||||||
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set dc if it exists and matches the message compression type used,
|
|
||||||
// otherwise set comp if a registered compressor exists for it.
|
|
||||||
var comp encoding.Compressor
|
|
||||||
var dc Decompressor
|
|
||||||
if rc := stream.RecvCompress(); dopts.dc != nil && dopts.dc.Type() == rc {
|
|
||||||
dc = dopts.dc
|
|
||||||
} else if rc != "" && rc != encoding.Identity {
|
|
||||||
comp = encoding.GetCompressor(rc)
|
|
||||||
}
|
|
||||||
if err = recv(p, dopts.codec, stream, dc, reply, *c.maxReceiveMessageSize, inPayload, comp); err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK {
|
|
||||||
// TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
|
|
||||||
// Fix the order if necessary.
|
|
||||||
dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
|
|
||||||
}
|
|
||||||
c.trailerMD = stream.Trailer()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendRequest writes out various information of an RPC such as Context and Message.
|
|
||||||
func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) {
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
// If err is connection error, t will be closed, no need to close stream here.
|
|
||||||
if _, ok := err.(transport.ConnectionError); !ok {
|
|
||||||
t.CloseStream(stream, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
var (
|
|
||||||
outPayload *stats.OutPayload
|
|
||||||
)
|
|
||||||
if dopts.copts.StatsHandler != nil {
|
|
||||||
outPayload = &stats.OutPayload{
|
|
||||||
Client: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Set comp and clear compressor if a registered compressor matches the type
|
|
||||||
// specified via UseCompressor. (And error if a matching compressor is not
|
|
||||||
// registered.)
|
|
||||||
var comp encoding.Compressor
|
|
||||||
if ct := c.compressorType; ct != "" && ct != encoding.Identity {
|
|
||||||
compressor = nil // Disable the legacy compressor.
|
|
||||||
comp = encoding.GetCompressor(ct)
|
|
||||||
if comp == nil {
|
|
||||||
return Errorf(codes.Internal, "grpc: Compressor is not installed for grpc-encoding %q", ct)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hdr, data, err := encode(dopts.codec, args, compressor, outPayload, comp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if c.maxSendMessageSize == nil {
|
|
||||||
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
|
|
||||||
}
|
|
||||||
if len(data) > *c.maxSendMessageSize {
|
|
||||||
return Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize)
|
|
||||||
}
|
|
||||||
err = t.Write(stream, hdr, data, opts)
|
|
||||||
if err == nil && outPayload != nil {
|
|
||||||
outPayload.SentTime = time.Now()
|
|
||||||
dopts.copts.StatsHandler.HandleRPC(ctx, outPayload)
|
|
||||||
}
|
|
||||||
// t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
|
|
||||||
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
|
|
||||||
// recvResponse to get the final status.
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Sent successfully.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Invoke sends the RPC request on the wire and returns after response is
|
// Invoke sends the RPC request on the wire and returns after response is
|
||||||
// received. This is typically called by generated code.
|
// received. This is typically called by generated code.
|
||||||
|
//
|
||||||
|
// All errors returned by Invoke are compatible with the status package.
|
||||||
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
|
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
|
||||||
|
// allow interceptor to see all applicable call options, which means those
|
||||||
|
// configured as defaults from dial option as well as per-call options
|
||||||
|
opts = combine(cc.dopts.callOptions, opts)
|
||||||
|
|
||||||
if cc.dopts.unaryInt != nil {
|
if cc.dopts.unaryInt != nil {
|
||||||
return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
|
return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
|
||||||
}
|
}
|
||||||
return invoke(ctx, method, args, reply, cc, opts...)
|
return invoke(ctx, method, args, reply, cc, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func combine(o1 []CallOption, o2 []CallOption) []CallOption {
|
||||||
|
// we don't use append because o1 could have extra capacity whose
|
||||||
|
// elements would be overwritten, which could cause inadvertent
|
||||||
|
// sharing (and race connditions) between concurrent calls
|
||||||
|
if len(o1) == 0 {
|
||||||
|
return o2
|
||||||
|
} else if len(o2) == 0 {
|
||||||
|
return o1
|
||||||
|
}
|
||||||
|
ret := make([]CallOption, len(o1)+len(o2))
|
||||||
|
copy(ret, o1)
|
||||||
|
copy(ret[len(o1):], o2)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
// Invoke sends the RPC request on the wire and returns after response is
|
// Invoke sends the RPC request on the wire and returns after response is
|
||||||
// received. This is typically called by generated code.
|
// received. This is typically called by generated code.
|
||||||
//
|
//
|
||||||
@@ -158,187 +60,34 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||||||
return cc.Invoke(ctx, method, args, reply, opts...)
|
return cc.Invoke(ctx, method, args, reply, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
|
var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
|
||||||
c := defaultCallInfo()
|
|
||||||
mc := cc.GetMethodConfig(method)
|
|
||||||
if mc.WaitForReady != nil {
|
|
||||||
c.failFast = !*mc.WaitForReady
|
|
||||||
}
|
|
||||||
|
|
||||||
if mc.Timeout != nil && *mc.Timeout >= 0 {
|
func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
||||||
var cancel context.CancelFunc
|
// TODO: implement retries in clientStream and make this simply
|
||||||
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
|
// newClientStream, SendMsg, RecvMsg.
|
||||||
defer cancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
opts = append(cc.dopts.callOptions, opts...)
|
|
||||||
for _, o := range opts {
|
|
||||||
if err := o.before(c); err != nil {
|
|
||||||
return toRPCErr(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
for _, o := range opts {
|
|
||||||
o.after(c)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
|
|
||||||
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
|
|
||||||
|
|
||||||
if EnableTracing {
|
|
||||||
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
|
|
||||||
defer c.traceInfo.tr.Finish()
|
|
||||||
c.traceInfo.firstLine.client = true
|
|
||||||
if deadline, ok := ctx.Deadline(); ok {
|
|
||||||
c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
|
|
||||||
}
|
|
||||||
c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
|
|
||||||
// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
|
|
||||||
defer func() {
|
|
||||||
if e != nil {
|
|
||||||
c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true)
|
|
||||||
c.traceInfo.tr.SetError()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
ctx = newContextWithRPCInfo(ctx, c.failFast)
|
|
||||||
sh := cc.dopts.copts.StatsHandler
|
|
||||||
if sh != nil {
|
|
||||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
|
|
||||||
begin := &stats.Begin{
|
|
||||||
Client: true,
|
|
||||||
BeginTime: time.Now(),
|
|
||||||
FailFast: c.failFast,
|
|
||||||
}
|
|
||||||
sh.HandleRPC(ctx, begin)
|
|
||||||
defer func() {
|
|
||||||
end := &stats.End{
|
|
||||||
Client: true,
|
|
||||||
EndTime: time.Now(),
|
|
||||||
Error: e,
|
|
||||||
}
|
|
||||||
sh.HandleRPC(ctx, end)
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
topts := &transport.Options{
|
|
||||||
Last: true,
|
|
||||||
Delay: false,
|
|
||||||
}
|
|
||||||
callHdr := &transport.CallHdr{
|
|
||||||
Host: cc.authority,
|
|
||||||
Method: method,
|
|
||||||
}
|
|
||||||
if c.creds != nil {
|
|
||||||
callHdr.Creds = c.creds
|
|
||||||
}
|
|
||||||
if c.compressorType != "" {
|
|
||||||
callHdr.SendCompress = c.compressorType
|
|
||||||
} else if cc.dopts.cp != nil {
|
|
||||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
|
||||||
}
|
|
||||||
firstAttempt := true
|
firstAttempt := true
|
||||||
|
|
||||||
for {
|
for {
|
||||||
// Check to make sure the context has expired. This will prevent us from
|
csInt, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
|
||||||
// looping forever if an error occurs for wait-for-ready RPCs where no data
|
|
||||||
// is sent on the wire.
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return toRPCErr(ctx.Err())
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record the done handler from Balancer.Get(...). It is called once the
|
|
||||||
// RPC has completed or failed.
|
|
||||||
t, done, err := cc.getTransport(ctx, c.failFast)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
stream, err := t.NewStream(ctx, callHdr)
|
cs := csInt.(*clientStream)
|
||||||
if err != nil {
|
if err := cs.SendMsg(req); err != nil {
|
||||||
if done != nil {
|
if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt {
|
||||||
done(balancer.DoneInfo{Err: err})
|
|
||||||
}
|
|
||||||
// In the event of any error from NewStream, we never attempted to write
|
|
||||||
// anything to the wire, so we can retry indefinitely for non-fail-fast
|
|
||||||
// RPCs.
|
|
||||||
if !c.failFast {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return toRPCErr(err)
|
|
||||||
}
|
|
||||||
if peer, ok := peer.FromContext(stream.Context()); ok {
|
|
||||||
c.peer = peer
|
|
||||||
}
|
|
||||||
if c.traceInfo.tr != nil {
|
|
||||||
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
|
|
||||||
}
|
|
||||||
err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts)
|
|
||||||
if err != nil {
|
|
||||||
if done != nil {
|
|
||||||
updateRPCInfoInContext(ctx, rpcInfo{
|
|
||||||
bytesSent: true,
|
|
||||||
bytesReceived: stream.BytesReceived(),
|
|
||||||
})
|
|
||||||
done(balancer.DoneInfo{Err: err})
|
|
||||||
}
|
|
||||||
// Retry a non-failfast RPC when
|
|
||||||
// i) the server started to drain before this RPC was initiated.
|
|
||||||
// ii) the server refused the stream.
|
|
||||||
if !c.failFast && stream.Unprocessed() {
|
|
||||||
// In this case, the server did not receive the data, but we still
|
|
||||||
// created wire traffic, so we should not retry indefinitely.
|
|
||||||
if firstAttempt {
|
|
||||||
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
||||||
firstAttempt = false
|
firstAttempt = false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Otherwise, give up and return an error anyway.
|
return err
|
||||||
}
|
}
|
||||||
return toRPCErr(err)
|
if err := cs.RecvMsg(reply); err != nil {
|
||||||
}
|
if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt {
|
||||||
err = recvResponse(ctx, cc.dopts, t, c, stream, reply)
|
|
||||||
if err != nil {
|
|
||||||
if done != nil {
|
|
||||||
updateRPCInfoInContext(ctx, rpcInfo{
|
|
||||||
bytesSent: true,
|
|
||||||
bytesReceived: stream.BytesReceived(),
|
|
||||||
})
|
|
||||||
done(balancer.DoneInfo{Err: err})
|
|
||||||
}
|
|
||||||
if !c.failFast && stream.Unprocessed() {
|
|
||||||
// In these cases, the server did not receive the data, but we still
|
|
||||||
// created wire traffic, so we should not retry indefinitely.
|
|
||||||
if firstAttempt {
|
|
||||||
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
||||||
firstAttempt = false
|
firstAttempt = false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Otherwise, give up and return an error anyway.
|
return err
|
||||||
}
|
}
|
||||||
return toRPCErr(err)
|
return nil
|
||||||
}
|
|
||||||
if c.traceInfo.tr != nil {
|
|
||||||
c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
|
|
||||||
}
|
|
||||||
t.CloseStream(stream, nil)
|
|
||||||
if done != nil {
|
|
||||||
updateRPCInfoInContext(ctx, rpcInfo{
|
|
||||||
bytesSent: true,
|
|
||||||
bytesReceived: stream.BytesReceived(),
|
|
||||||
})
|
|
||||||
done(balancer.DoneInfo{Err: err})
|
|
||||||
}
|
|
||||||
if !c.failFast && stream.Unprocessed() {
|
|
||||||
// In these cases, the server did not receive the data, but we still
|
|
||||||
// created wire traffic, so we should not retry indefinitely.
|
|
||||||
if firstAttempt {
|
|
||||||
// TODO: Add a field to header for grpc-transparent-retry-attempts
|
|
||||||
firstAttempt = false
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return stream.Status().Err()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
435
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
435
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
@@ -32,6 +32,7 @@ import (
|
|||||||
"golang.org/x/net/trace"
|
"golang.org/x/net/trace"
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
|
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
@@ -40,17 +41,22 @@ import (
|
|||||||
_ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
|
_ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
|
||||||
_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
|
_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
|
||||||
"google.golang.org/grpc/stats"
|
"google.golang.org/grpc/stats"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/grpc/transport"
|
"google.golang.org/grpc/transport"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// minimum time to give a connection to complete
|
||||||
|
minConnectTimeout = 20 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrClientConnClosing indicates that the operation is illegal because
|
// ErrClientConnClosing indicates that the operation is illegal because
|
||||||
// the ClientConn is closing.
|
// the ClientConn is closing.
|
||||||
ErrClientConnClosing = errors.New("grpc: the client connection is closing")
|
//
|
||||||
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
|
// Deprecated: this error should not be relied upon by users; use the status
|
||||||
// underlying connections within the specified timeout.
|
// code of Canceled instead.
|
||||||
// DEPRECATED: Please use context.DeadlineExceeded instead.
|
ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing")
|
||||||
ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
|
|
||||||
// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
|
// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
|
||||||
errConnDrain = errors.New("grpc: the connection is drained")
|
errConnDrain = errors.New("grpc: the connection is drained")
|
||||||
// errConnClosing indicates that the connection is closing.
|
// errConnClosing indicates that the connection is closing.
|
||||||
@@ -59,8 +65,11 @@ var (
|
|||||||
errConnUnavailable = errors.New("grpc: the connection is unavailable")
|
errConnUnavailable = errors.New("grpc: the connection is unavailable")
|
||||||
// errBalancerClosed indicates that the balancer is closed.
|
// errBalancerClosed indicates that the balancer is closed.
|
||||||
errBalancerClosed = errors.New("grpc: balancer is closed")
|
errBalancerClosed = errors.New("grpc: balancer is closed")
|
||||||
// minimum time to give a connection to complete
|
// We use an accessor so that minConnectTimeout can be
|
||||||
minConnectTimeout = 20 * time.Second
|
// atomically read and updated while testing.
|
||||||
|
getMinConnectTimeout = func() time.Duration {
|
||||||
|
return minConnectTimeout
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// The following errors are returned from Dial and DialContext
|
// The following errors are returned from Dial and DialContext
|
||||||
@@ -85,7 +94,6 @@ var (
|
|||||||
type dialOptions struct {
|
type dialOptions struct {
|
||||||
unaryInt UnaryClientInterceptor
|
unaryInt UnaryClientInterceptor
|
||||||
streamInt StreamClientInterceptor
|
streamInt StreamClientInterceptor
|
||||||
codec Codec
|
|
||||||
cp Compressor
|
cp Compressor
|
||||||
dc Decompressor
|
dc Decompressor
|
||||||
bs backoffStrategy
|
bs backoffStrategy
|
||||||
@@ -95,8 +103,12 @@ type dialOptions struct {
|
|||||||
scChan <-chan ServiceConfig
|
scChan <-chan ServiceConfig
|
||||||
copts transport.ConnectOptions
|
copts transport.ConnectOptions
|
||||||
callOptions []CallOption
|
callOptions []CallOption
|
||||||
// This is to support v1 balancer.
|
// This is used by v1 balancer dial option WithBalancer to support v1
|
||||||
|
// balancer, and also by WithBalancerName dial option.
|
||||||
balancerBuilder balancer.Builder
|
balancerBuilder balancer.Builder
|
||||||
|
// This is to support grpclb.
|
||||||
|
resolverBuilder resolver.Builder
|
||||||
|
waitForHandshake bool
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -107,6 +119,15 @@ const (
|
|||||||
// DialOption configures how we set up the connection.
|
// DialOption configures how we set up the connection.
|
||||||
type DialOption func(*dialOptions)
|
type DialOption func(*dialOptions)
|
||||||
|
|
||||||
|
// WithWaitForHandshake blocks until the initial settings frame is received from the
|
||||||
|
// server before assigning RPCs to the connection.
|
||||||
|
// Experimental API.
|
||||||
|
func WithWaitForHandshake() DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.waitForHandshake = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched
|
// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched
|
||||||
// before doing a write on the wire.
|
// before doing a write on the wire.
|
||||||
func WithWriteBufferSize(s int) DialOption {
|
func WithWriteBufferSize(s int) DialOption {
|
||||||
@@ -152,10 +173,10 @@ func WithDefaultCallOptions(cos ...CallOption) DialOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
|
// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
|
||||||
|
//
|
||||||
|
// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead.
|
||||||
func WithCodec(c Codec) DialOption {
|
func WithCodec(c Codec) DialOption {
|
||||||
return func(o *dialOptions) {
|
return WithDefaultCallOptions(CallCustomCodec(c))
|
||||||
o.codec = c
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithCompressor returns a DialOption which sets a Compressor to use for
|
// WithCompressor returns a DialOption which sets a Compressor to use for
|
||||||
@@ -186,7 +207,8 @@ func WithDecompressor(dc Decompressor) DialOption {
|
|||||||
|
|
||||||
// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
|
// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
|
||||||
// Name resolver will be ignored if this DialOption is specified.
|
// Name resolver will be ignored if this DialOption is specified.
|
||||||
// Deprecated: use the new balancer APIs in balancer package instead.
|
//
|
||||||
|
// Deprecated: use the new balancer APIs in balancer package and WithBalancerName.
|
||||||
func WithBalancer(b Balancer) DialOption {
|
func WithBalancer(b Balancer) DialOption {
|
||||||
return func(o *dialOptions) {
|
return func(o *dialOptions) {
|
||||||
o.balancerBuilder = &balancerWrapperBuilder{
|
o.balancerBuilder = &balancerWrapperBuilder{
|
||||||
@@ -195,12 +217,28 @@ func WithBalancer(b Balancer) DialOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithBalancerBuilder is for testing only. Users using custom balancers should
|
// WithBalancerName sets the balancer that the ClientConn will be initialized
|
||||||
// register their balancer and use service config to choose the balancer to use.
|
// with. Balancer registered with balancerName will be used. This function
|
||||||
func WithBalancerBuilder(b balancer.Builder) DialOption {
|
// panics if no balancer was registered by balancerName.
|
||||||
// TODO(bar) remove this when switching balancer is done.
|
//
|
||||||
|
// The balancer cannot be overridden by balancer option specified by service
|
||||||
|
// config.
|
||||||
|
//
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
func WithBalancerName(balancerName string) DialOption {
|
||||||
|
builder := balancer.Get(balancerName)
|
||||||
|
if builder == nil {
|
||||||
|
panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
|
||||||
|
}
|
||||||
return func(o *dialOptions) {
|
return func(o *dialOptions) {
|
||||||
o.balancerBuilder = b
|
o.balancerBuilder = builder
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// withResolverBuilder is only for grpclb.
|
||||||
|
func withResolverBuilder(b resolver.Builder) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.resolverBuilder = b
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -231,7 +269,7 @@ func WithBackoffConfig(b BackoffConfig) DialOption {
|
|||||||
return withBackoff(b)
|
return withBackoff(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// withBackoff sets the backoff strategy used for retries after a
|
// withBackoff sets the backoff strategy used for connectRetryNum after a
|
||||||
// failed connection attempt.
|
// failed connection attempt.
|
||||||
//
|
//
|
||||||
// This can be exported if arbitrary backoff strategies are allowed by gRPC.
|
// This can be exported if arbitrary backoff strategies are allowed by gRPC.
|
||||||
@@ -283,18 +321,23 @@ func WithTimeout(d time.Duration) DialOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
|
||||||
|
return func(o *dialOptions) {
|
||||||
|
o.copts.Dialer = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
|
// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
|
||||||
// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's
|
// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's
|
||||||
// Temporary() method to decide if it should try to reconnect to the network address.
|
// Temporary() method to decide if it should try to reconnect to the network address.
|
||||||
func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
|
func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
|
||||||
return func(o *dialOptions) {
|
return withContextDialer(
|
||||||
o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) {
|
func(ctx context.Context, addr string) (net.Conn, error) {
|
||||||
if deadline, ok := ctx.Deadline(); ok {
|
if deadline, ok := ctx.Deadline(); ok {
|
||||||
return f(addr, deadline.Sub(time.Now()))
|
return f(addr, deadline.Sub(time.Now()))
|
||||||
}
|
}
|
||||||
return f(addr, 0)
|
return f(addr, 0)
|
||||||
}
|
})
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithStatsHandler returns a DialOption that specifies the stats handler
|
// WithStatsHandler returns a DialOption that specifies the stats handler
|
||||||
@@ -362,6 +405,10 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) {
|
|||||||
// cancel or expire the pending connection. Once this function returns, the
|
// cancel or expire the pending connection. Once this function returns, the
|
||||||
// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close
|
// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close
|
||||||
// to terminate all the pending operations after this function returns.
|
// to terminate all the pending operations after this function returns.
|
||||||
|
//
|
||||||
|
// The target name syntax is defined in
|
||||||
|
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||||
|
// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
|
||||||
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
|
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
|
||||||
cc := &ClientConn{
|
cc := &ClientConn{
|
||||||
target: target,
|
target: target,
|
||||||
@@ -396,7 +443,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||||||
if cc.dopts.copts.Dialer == nil {
|
if cc.dopts.copts.Dialer == nil {
|
||||||
cc.dopts.copts.Dialer = newProxyDialer(
|
cc.dopts.copts.Dialer = newProxyDialer(
|
||||||
func(ctx context.Context, addr string) (net.Conn, error) {
|
func(ctx context.Context, addr string) (net.Conn, error) {
|
||||||
return dialContext(ctx, "tcp", addr)
|
network, addr := parseDialTarget(addr)
|
||||||
|
return dialContext(ctx, network, addr)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -437,14 +485,28 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Set defaults.
|
|
||||||
if cc.dopts.codec == nil {
|
|
||||||
cc.dopts.codec = protoCodec{}
|
|
||||||
}
|
|
||||||
if cc.dopts.bs == nil {
|
if cc.dopts.bs == nil {
|
||||||
cc.dopts.bs = DefaultBackoffConfig
|
cc.dopts.bs = DefaultBackoffConfig
|
||||||
}
|
}
|
||||||
|
if cc.dopts.resolverBuilder == nil {
|
||||||
|
// Only try to parse target when resolver builder is not already set.
|
||||||
cc.parsedTarget = parseTarget(cc.target)
|
cc.parsedTarget = parseTarget(cc.target)
|
||||||
|
grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
|
||||||
|
cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
|
||||||
|
if cc.dopts.resolverBuilder == nil {
|
||||||
|
// If resolver builder is still nil, the parse target's scheme is
|
||||||
|
// not registered. Fallback to default resolver and set Endpoint to
|
||||||
|
// the original unparsed target.
|
||||||
|
grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
|
||||||
|
cc.parsedTarget = resolver.Target{
|
||||||
|
Scheme: resolver.GetDefaultScheme(),
|
||||||
|
Endpoint: target,
|
||||||
|
}
|
||||||
|
cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cc.parsedTarget = resolver.Target{Endpoint: target}
|
||||||
|
}
|
||||||
creds := cc.dopts.copts.TransportCredentials
|
creds := cc.dopts.copts.TransportCredentials
|
||||||
if creds != nil && creds.Info().ServerName != "" {
|
if creds != nil && creds.Info().ServerName != "" {
|
||||||
cc.authority = creds.Info().ServerName
|
cc.authority = creds.Info().ServerName
|
||||||
@@ -480,17 +542,19 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||||||
Dialer: cc.dopts.copts.Dialer,
|
Dialer: cc.dopts.copts.Dialer,
|
||||||
}
|
}
|
||||||
|
|
||||||
if cc.dopts.balancerBuilder != nil {
|
|
||||||
cc.customBalancer = true
|
|
||||||
// Build should not take long time. So it's ok to not have a goroutine for it.
|
|
||||||
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build the resolver.
|
// Build the resolver.
|
||||||
cc.resolverWrapper, err = newCCResolverWrapper(cc)
|
cc.resolverWrapper, err = newCCResolverWrapper(cc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to build resolver: %v", err)
|
return nil, fmt.Errorf("failed to build resolver: %v", err)
|
||||||
}
|
}
|
||||||
|
// Start the resolver wrapper goroutine after resolverWrapper is created.
|
||||||
|
//
|
||||||
|
// If the goroutine is started before resolverWrapper is ready, the
|
||||||
|
// following may happen: The goroutine sends updates to cc. cc forwards
|
||||||
|
// those to balancer. Balancer creates new addrConn. addrConn fails to
|
||||||
|
// connect, and calls resolveNow(). resolveNow() tries to use the non-ready
|
||||||
|
// resolverWrapper.
|
||||||
|
cc.resolverWrapper.start()
|
||||||
|
|
||||||
// A blocking dial blocks until the clientConn is ready.
|
// A blocking dial blocks until the clientConn is ready.
|
||||||
if cc.dopts.block {
|
if cc.dopts.block {
|
||||||
@@ -563,7 +627,6 @@ type ClientConn struct {
|
|||||||
dopts dialOptions
|
dopts dialOptions
|
||||||
csMgr *connectivityStateManager
|
csMgr *connectivityStateManager
|
||||||
|
|
||||||
customBalancer bool // If this is true, switching balancer will be disabled.
|
|
||||||
balancerBuildOpts balancer.BuildOptions
|
balancerBuildOpts balancer.BuildOptions
|
||||||
resolverWrapper *ccResolverWrapper
|
resolverWrapper *ccResolverWrapper
|
||||||
blockingpicker *pickerWrapper
|
blockingpicker *pickerWrapper
|
||||||
@@ -575,6 +638,7 @@ type ClientConn struct {
|
|||||||
// Keepalive parameter can be updated if a GoAway is received.
|
// Keepalive parameter can be updated if a GoAway is received.
|
||||||
mkp keepalive.ClientParameters
|
mkp keepalive.ClientParameters
|
||||||
curBalancerName string
|
curBalancerName string
|
||||||
|
preBalancerName string // previous balancer name.
|
||||||
curAddresses []resolver.Address
|
curAddresses []resolver.Address
|
||||||
balancerWrapper *ccBalancerWrapper
|
balancerWrapper *ccBalancerWrapper
|
||||||
}
|
}
|
||||||
@@ -624,51 +688,92 @@ func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) {
|
|||||||
cc.mu.Lock()
|
cc.mu.Lock()
|
||||||
defer cc.mu.Unlock()
|
defer cc.mu.Unlock()
|
||||||
if cc.conns == nil {
|
if cc.conns == nil {
|
||||||
|
// cc was closed.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(bar switching) when grpclb is submitted, check address type and start grpclb.
|
if reflect.DeepEqual(cc.curAddresses, addrs) {
|
||||||
if !cc.customBalancer && cc.balancerWrapper == nil {
|
return
|
||||||
// No customBalancer was specified by DialOption, and this is the first
|
|
||||||
// time handling resolved addresses, create a pickfirst balancer.
|
|
||||||
builder := newPickfirstBuilder()
|
|
||||||
cc.curBalancerName = builder.Name()
|
|
||||||
cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(bar switching) compare addresses, if there's no update, don't notify balancer.
|
|
||||||
cc.curAddresses = addrs
|
cc.curAddresses = addrs
|
||||||
|
|
||||||
|
if cc.dopts.balancerBuilder == nil {
|
||||||
|
// Only look at balancer types and switch balancer if balancer dial
|
||||||
|
// option is not set.
|
||||||
|
var isGRPCLB bool
|
||||||
|
for _, a := range addrs {
|
||||||
|
if a.Type == resolver.GRPCLB {
|
||||||
|
isGRPCLB = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var newBalancerName string
|
||||||
|
if isGRPCLB {
|
||||||
|
newBalancerName = grpclbName
|
||||||
|
} else {
|
||||||
|
// Address list doesn't contain grpclb address. Try to pick a
|
||||||
|
// non-grpclb balancer.
|
||||||
|
newBalancerName = cc.curBalancerName
|
||||||
|
// If current balancer is grpclb, switch to the previous one.
|
||||||
|
if newBalancerName == grpclbName {
|
||||||
|
newBalancerName = cc.preBalancerName
|
||||||
|
}
|
||||||
|
// The following could be true in two cases:
|
||||||
|
// - the first time handling resolved addresses
|
||||||
|
// (curBalancerName="")
|
||||||
|
// - the first time handling non-grpclb addresses
|
||||||
|
// (curBalancerName="grpclb", preBalancerName="")
|
||||||
|
if newBalancerName == "" {
|
||||||
|
newBalancerName = PickFirstBalancerName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cc.switchBalancer(newBalancerName)
|
||||||
|
} else if cc.balancerWrapper == nil {
|
||||||
|
// Balancer dial option was set, and this is the first time handling
|
||||||
|
// resolved addresses. Build a balancer with dopts.balancerBuilder.
|
||||||
|
cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
|
||||||
|
}
|
||||||
|
|
||||||
cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
|
cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// switchBalancer starts the switching from current balancer to the balancer with name.
|
// switchBalancer starts the switching from current balancer to the balancer
|
||||||
|
// with the given name.
|
||||||
|
//
|
||||||
|
// It will NOT send the current address list to the new balancer. If needed,
|
||||||
|
// caller of this function should send address list to the new balancer after
|
||||||
|
// this function returns.
|
||||||
|
//
|
||||||
|
// Caller must hold cc.mu.
|
||||||
func (cc *ClientConn) switchBalancer(name string) {
|
func (cc *ClientConn) switchBalancer(name string) {
|
||||||
if cc.conns == nil {
|
if cc.conns == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
grpclog.Infof("ClientConn switching balancer to %q", name)
|
grpclog.Infof("ClientConn switching balancer to %q", name)
|
||||||
|
if cc.dopts.balancerBuilder != nil {
|
||||||
if cc.customBalancer {
|
grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead")
|
||||||
grpclog.Infoln("ignoring service config balancer configuration: WithBalancer DialOption used instead")
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if cc.curBalancerName == name {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(bar switching) change this to two steps: drain and close.
|
// TODO(bar switching) change this to two steps: drain and close.
|
||||||
// Keep track of sc in wrapper.
|
// Keep track of sc in wrapper.
|
||||||
|
if cc.balancerWrapper != nil {
|
||||||
cc.balancerWrapper.close()
|
cc.balancerWrapper.close()
|
||||||
|
}
|
||||||
|
|
||||||
builder := balancer.Get(name)
|
builder := balancer.Get(name)
|
||||||
if builder == nil {
|
if builder == nil {
|
||||||
grpclog.Infof("failed to get balancer builder for: %v (this should never happen...)", name)
|
grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name)
|
||||||
builder = newPickfirstBuilder()
|
builder = newPickfirstBuilder()
|
||||||
}
|
}
|
||||||
|
cc.preBalancerName = cc.curBalancerName
|
||||||
cc.curBalancerName = builder.Name()
|
cc.curBalancerName = builder.Name()
|
||||||
cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
|
cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
|
||||||
cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||||
@@ -684,6 +789,8 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
|
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
|
||||||
|
//
|
||||||
|
// Caller needs to make sure len(addrs) > 0.
|
||||||
func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) {
|
func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) {
|
||||||
ac := &addrConn{
|
ac := &addrConn{
|
||||||
cc: cc,
|
cc: cc,
|
||||||
@@ -774,6 +881,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
|||||||
grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
|
grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
|
||||||
if curAddrFound {
|
if curAddrFound {
|
||||||
ac.addrs = addrs
|
ac.addrs = addrs
|
||||||
|
ac.reconnectIdx = 0 // Start reconnecting from beginning in the new list.
|
||||||
}
|
}
|
||||||
|
|
||||||
return curAddrFound
|
return curAddrFound
|
||||||
@@ -784,7 +892,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
|||||||
// the corresponding MethodConfig.
|
// the corresponding MethodConfig.
|
||||||
// If there isn't an exact match for the input method, we look for the default config
|
// If there isn't an exact match for the input method, we look for the default config
|
||||||
// under the service (i.e /service/). If there is a default MethodConfig for
|
// under the service (i.e /service/). If there is a default MethodConfig for
|
||||||
// the serivce, we return it.
|
// the service, we return it.
|
||||||
// Otherwise, we return an empty MethodConfig.
|
// Otherwise, we return an empty MethodConfig.
|
||||||
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
|
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
|
||||||
// TODO: Avoid the locking here.
|
// TODO: Avoid the locking here.
|
||||||
@@ -816,16 +924,36 @@ func (cc *ClientConn) handleServiceConfig(js string) error {
|
|||||||
cc.mu.Lock()
|
cc.mu.Lock()
|
||||||
cc.scRaw = js
|
cc.scRaw = js
|
||||||
cc.sc = sc
|
cc.sc = sc
|
||||||
if sc.LB != nil {
|
if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config.
|
||||||
|
if cc.curBalancerName == grpclbName {
|
||||||
|
// If current balancer is grpclb, there's at least one grpclb
|
||||||
|
// balancer address in the resolved list. Don't switch the balancer,
|
||||||
|
// but change the previous balancer name, so if a new resolved
|
||||||
|
// address list doesn't contain grpclb address, balancer will be
|
||||||
|
// switched to *sc.LB.
|
||||||
|
cc.preBalancerName = *sc.LB
|
||||||
|
} else {
|
||||||
cc.switchBalancer(*sc.LB)
|
cc.switchBalancer(*sc.LB)
|
||||||
|
cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
cc.mu.Unlock()
|
cc.mu.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
|
||||||
|
cc.mu.Lock()
|
||||||
|
r := cc.resolverWrapper
|
||||||
|
cc.mu.Unlock()
|
||||||
|
if r == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
go r.resolveNow(o)
|
||||||
|
}
|
||||||
|
|
||||||
// Close tears down the ClientConn and all underlying connections.
|
// Close tears down the ClientConn and all underlying connections.
|
||||||
func (cc *ClientConn) Close() error {
|
func (cc *ClientConn) Close() error {
|
||||||
cc.cancel()
|
defer cc.cancel()
|
||||||
|
|
||||||
cc.mu.Lock()
|
cc.mu.Lock()
|
||||||
if cc.conns == nil {
|
if cc.conns == nil {
|
||||||
@@ -860,13 +988,14 @@ type addrConn struct {
|
|||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
|
|
||||||
cc *ClientConn
|
cc *ClientConn
|
||||||
curAddr resolver.Address
|
|
||||||
addrs []resolver.Address
|
addrs []resolver.Address
|
||||||
dopts dialOptions
|
dopts dialOptions
|
||||||
events trace.EventLog
|
events trace.EventLog
|
||||||
acbw balancer.SubConn
|
acbw balancer.SubConn
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
curAddr resolver.Address
|
||||||
|
reconnectIdx int // The index in addrs list to start reconnecting from.
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
// ready is closed and becomes nil when a new transport is up or failed
|
// ready is closed and becomes nil when a new transport is up or failed
|
||||||
// due to timeout.
|
// due to timeout.
|
||||||
@@ -875,6 +1004,14 @@ type addrConn struct {
|
|||||||
|
|
||||||
// The reason this addrConn is torn down.
|
// The reason this addrConn is torn down.
|
||||||
tearDownErr error
|
tearDownErr error
|
||||||
|
|
||||||
|
connectRetryNum int
|
||||||
|
// backoffDeadline is the time until which resetTransport needs to
|
||||||
|
// wait before increasing connectRetryNum count.
|
||||||
|
backoffDeadline time.Time
|
||||||
|
// connectDeadline is the time by which all connection
|
||||||
|
// negotiations must complete.
|
||||||
|
connectDeadline time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// adjustParams updates parameters used to create transports upon
|
// adjustParams updates parameters used to create transports upon
|
||||||
@@ -909,6 +1046,15 @@ func (ac *addrConn) errorf(format string, a ...interface{}) {
|
|||||||
|
|
||||||
// resetTransport recreates a transport to the address for ac. The old
|
// resetTransport recreates a transport to the address for ac. The old
|
||||||
// transport will close itself on error or when the clientconn is closed.
|
// transport will close itself on error or when the clientconn is closed.
|
||||||
|
// The created transport must receive initial settings frame from the server.
|
||||||
|
// In case that doesnt happen, transportMonitor will kill the newly created
|
||||||
|
// transport after connectDeadline has expired.
|
||||||
|
// In case there was an error on the transport before the settings frame was
|
||||||
|
// received, resetTransport resumes connecting to backends after the one that
|
||||||
|
// was previously connected to. In case end of the list is reached, resetTransport
|
||||||
|
// backs off until the original deadline.
|
||||||
|
// If the DialOption WithWaitForHandshake was set, resetTrasport returns
|
||||||
|
// successfully only after server settings are received.
|
||||||
//
|
//
|
||||||
// TODO(bar) make sure all state transitions are valid.
|
// TODO(bar) make sure all state transitions are valid.
|
||||||
func (ac *addrConn) resetTransport() error {
|
func (ac *addrConn) resetTransport() error {
|
||||||
@@ -922,19 +1068,38 @@ func (ac *addrConn) resetTransport() error {
|
|||||||
ac.ready = nil
|
ac.ready = nil
|
||||||
}
|
}
|
||||||
ac.transport = nil
|
ac.transport = nil
|
||||||
ac.curAddr = resolver.Address{}
|
ridx := ac.reconnectIdx
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
ac.cc.mu.RLock()
|
ac.cc.mu.RLock()
|
||||||
ac.dopts.copts.KeepaliveParams = ac.cc.mkp
|
ac.dopts.copts.KeepaliveParams = ac.cc.mkp
|
||||||
ac.cc.mu.RUnlock()
|
ac.cc.mu.RUnlock()
|
||||||
for retries := 0; ; retries++ {
|
var backoffDeadline, connectDeadline time.Time
|
||||||
sleepTime := ac.dopts.bs.backoff(retries)
|
for connectRetryNum := 0; ; connectRetryNum++ {
|
||||||
timeout := minConnectTimeout
|
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
if timeout < time.Duration(int(sleepTime)/len(ac.addrs)) {
|
if ac.backoffDeadline.IsZero() {
|
||||||
timeout = time.Duration(int(sleepTime) / len(ac.addrs))
|
// This means either a successful HTTP2 connection was established
|
||||||
|
// or this is the first time this addrConn is trying to establish a
|
||||||
|
// connection.
|
||||||
|
backoffFor := ac.dopts.bs.backoff(connectRetryNum) // time.Duration.
|
||||||
|
// This will be the duration that dial gets to finish.
|
||||||
|
dialDuration := getMinConnectTimeout()
|
||||||
|
if backoffFor > dialDuration {
|
||||||
|
// Give dial more time as we keep failing to connect.
|
||||||
|
dialDuration = backoffFor
|
||||||
|
}
|
||||||
|
start := time.Now()
|
||||||
|
backoffDeadline = start.Add(backoffFor)
|
||||||
|
connectDeadline = start.Add(dialDuration)
|
||||||
|
ridx = 0 // Start connecting from the beginning.
|
||||||
|
} else {
|
||||||
|
// Continue trying to conect with the same deadlines.
|
||||||
|
connectRetryNum = ac.connectRetryNum
|
||||||
|
backoffDeadline = ac.backoffDeadline
|
||||||
|
connectDeadline = ac.connectDeadline
|
||||||
|
ac.backoffDeadline = time.Time{}
|
||||||
|
ac.connectDeadline = time.Time{}
|
||||||
|
ac.connectRetryNum = 0
|
||||||
}
|
}
|
||||||
connectTime := time.Now()
|
|
||||||
if ac.state == connectivity.Shutdown {
|
if ac.state == connectivity.Shutdown {
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
return errConnClosing
|
return errConnClosing
|
||||||
@@ -949,93 +1114,151 @@ func (ac *addrConn) resetTransport() error {
|
|||||||
copy(addrsIter, ac.addrs)
|
copy(addrsIter, ac.addrs)
|
||||||
copts := ac.dopts.copts
|
copts := ac.dopts.copts
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
for _, addr := range addrsIter {
|
connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts)
|
||||||
ac.mu.Lock()
|
if err != nil {
|
||||||
if ac.state == connectivity.Shutdown {
|
return err
|
||||||
// ac.tearDown(...) has been invoked.
|
|
||||||
ac.mu.Unlock()
|
|
||||||
return errConnClosing
|
|
||||||
}
|
}
|
||||||
ac.mu.Unlock()
|
if connected {
|
||||||
sinfo := transport.TargetInfo{
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// createTransport creates a connection to one of the backends in addrs.
|
||||||
|
// It returns true if a connection was established.
|
||||||
|
func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions) (bool, error) {
|
||||||
|
for i := ridx; i < len(addrs); i++ {
|
||||||
|
addr := addrs[i]
|
||||||
|
target := transport.TargetInfo{
|
||||||
Addr: addr.Addr,
|
Addr: addr.Addr,
|
||||||
Metadata: addr.Metadata,
|
Metadata: addr.Metadata,
|
||||||
Authority: ac.cc.authority,
|
Authority: ac.cc.authority,
|
||||||
}
|
}
|
||||||
newTransport, err := transport.NewClientTransport(ac.cc.ctx, sinfo, copts, timeout)
|
done := make(chan struct{})
|
||||||
if err != nil {
|
onPrefaceReceipt := func() {
|
||||||
if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
|
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
if ac.state != connectivity.Shutdown {
|
close(done)
|
||||||
ac.state = connectivity.TransientFailure
|
if !ac.backoffDeadline.IsZero() {
|
||||||
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
|
// If we haven't already started reconnecting to
|
||||||
|
// other backends.
|
||||||
|
// Note, this can happen when writer notices an error
|
||||||
|
// and triggers resetTransport while at the same time
|
||||||
|
// reader receives the preface and invokes this closure.
|
||||||
|
ac.backoffDeadline = time.Time{}
|
||||||
|
ac.connectDeadline = time.Time{}
|
||||||
|
ac.connectRetryNum = 0
|
||||||
}
|
}
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
grpclog.Warningf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, addr)
|
// Do not cancel in the success path because of
|
||||||
|
// this issue in Go1.6: https://github.com/golang/go/issues/15078.
|
||||||
|
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
|
||||||
|
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt)
|
||||||
|
if err != nil {
|
||||||
|
cancel()
|
||||||
|
ac.cc.blockingpicker.updateConnectionError(err)
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
if ac.state == connectivity.Shutdown {
|
if ac.state == connectivity.Shutdown {
|
||||||
// ac.tearDown(...) has been invoked.
|
// ac.tearDown(...) has been invoked.
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
return errConnClosing
|
return false, errConnClosing
|
||||||
}
|
}
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
|
grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ac.mu.Lock()
|
if ac.dopts.waitForHandshake {
|
||||||
ac.printf("ready")
|
select {
|
||||||
if ac.state == connectivity.Shutdown {
|
case <-done:
|
||||||
// ac.tearDown(...) has been invoked.
|
case <-connectCtx.Done():
|
||||||
ac.mu.Unlock()
|
// Didn't receive server preface, must kill this new transport now.
|
||||||
newTransport.Close()
|
grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.")
|
||||||
return errConnClosing
|
newTr.Close()
|
||||||
|
break
|
||||||
|
case <-ac.ctx.Done():
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
ac.mu.Lock()
|
||||||
|
if ac.state == connectivity.Shutdown {
|
||||||
|
ac.mu.Unlock()
|
||||||
|
// ac.tearDonn(...) has been invoked.
|
||||||
|
newTr.Close()
|
||||||
|
return false, errConnClosing
|
||||||
|
}
|
||||||
|
ac.printf("ready")
|
||||||
ac.state = connectivity.Ready
|
ac.state = connectivity.Ready
|
||||||
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
|
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
|
||||||
t := ac.transport
|
ac.transport = newTr
|
||||||
ac.transport = newTransport
|
|
||||||
if t != nil {
|
|
||||||
t.Close()
|
|
||||||
}
|
|
||||||
ac.curAddr = addr
|
ac.curAddr = addr
|
||||||
if ac.ready != nil {
|
if ac.ready != nil {
|
||||||
close(ac.ready)
|
close(ac.ready)
|
||||||
ac.ready = nil
|
ac.ready = nil
|
||||||
}
|
}
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
// If the server has responded back with preface already,
|
||||||
|
// don't set the reconnect parameters.
|
||||||
|
default:
|
||||||
|
ac.connectRetryNum = connectRetryNum
|
||||||
|
ac.backoffDeadline = backoffDeadline
|
||||||
|
ac.connectDeadline = connectDeadline
|
||||||
|
ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list.
|
||||||
|
}
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
return nil
|
return true, nil
|
||||||
}
|
}
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
ac.state = connectivity.TransientFailure
|
ac.state = connectivity.TransientFailure
|
||||||
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
|
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
|
||||||
|
ac.cc.resolveNow(resolver.ResolveNowOption{})
|
||||||
if ac.ready != nil {
|
if ac.ready != nil {
|
||||||
close(ac.ready)
|
close(ac.ready)
|
||||||
ac.ready = nil
|
ac.ready = nil
|
||||||
}
|
}
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
timer := time.NewTimer(sleepTime - time.Since(connectTime))
|
timer := time.NewTimer(backoffDeadline.Sub(time.Now()))
|
||||||
select {
|
select {
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
case <-ac.ctx.Done():
|
case <-ac.ctx.Done():
|
||||||
timer.Stop()
|
timer.Stop()
|
||||||
return ac.ctx.Err()
|
return false, ac.ctx.Err()
|
||||||
}
|
|
||||||
timer.Stop()
|
|
||||||
}
|
}
|
||||||
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run in a goroutine to track the error in transport and create the
|
// Run in a goroutine to track the error in transport and create the
|
||||||
// new transport if an error happens. It returns when the channel is closing.
|
// new transport if an error happens. It returns when the channel is closing.
|
||||||
func (ac *addrConn) transportMonitor() {
|
func (ac *addrConn) transportMonitor() {
|
||||||
for {
|
for {
|
||||||
|
var timer *time.Timer
|
||||||
|
var cdeadline <-chan time.Time
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
t := ac.transport
|
t := ac.transport
|
||||||
|
if !ac.connectDeadline.IsZero() {
|
||||||
|
timer = time.NewTimer(ac.connectDeadline.Sub(time.Now()))
|
||||||
|
cdeadline = timer.C
|
||||||
|
}
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
// Block until we receive a goaway or an error occurs.
|
// Block until we receive a goaway or an error occurs.
|
||||||
select {
|
select {
|
||||||
case <-t.GoAway():
|
case <-t.GoAway():
|
||||||
case <-t.Error():
|
case <-t.Error():
|
||||||
|
case <-cdeadline:
|
||||||
|
ac.mu.Lock()
|
||||||
|
// This implies that client received server preface.
|
||||||
|
if ac.backoffDeadline.IsZero() {
|
||||||
|
ac.mu.Unlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ac.mu.Unlock()
|
||||||
|
timer = nil
|
||||||
|
// No server preface received until deadline.
|
||||||
|
// Kill the connection.
|
||||||
|
grpclog.Warningf("grpc: addrConn.transportMonitor didn't get server preface after waiting. Closing the new transport now.")
|
||||||
|
t.Close()
|
||||||
|
}
|
||||||
|
if timer != nil {
|
||||||
|
timer.Stop()
|
||||||
}
|
}
|
||||||
// If a GoAway happened, regardless of error, adjust our keepalive
|
// If a GoAway happened, regardless of error, adjust our keepalive
|
||||||
// parameters as appropriate.
|
// parameters as appropriate.
|
||||||
@@ -1053,6 +1276,7 @@ func (ac *addrConn) transportMonitor() {
|
|||||||
// resetTransport. Transition READY->CONNECTING is not valid.
|
// resetTransport. Transition READY->CONNECTING is not valid.
|
||||||
ac.state = connectivity.TransientFailure
|
ac.state = connectivity.TransientFailure
|
||||||
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
|
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
|
||||||
|
ac.cc.resolveNow(resolver.ResolveNowOption{})
|
||||||
ac.curAddr = resolver.Address{}
|
ac.curAddr = resolver.Address{}
|
||||||
ac.mu.Unlock()
|
ac.mu.Unlock()
|
||||||
if err := ac.resetTransport(); err != nil {
|
if err := ac.resetTransport(); err != nil {
|
||||||
@@ -1140,6 +1364,9 @@ func (ac *addrConn) tearDown(err error) {
|
|||||||
ac.cancel()
|
ac.cancel()
|
||||||
ac.mu.Lock()
|
ac.mu.Lock()
|
||||||
defer ac.mu.Unlock()
|
defer ac.mu.Unlock()
|
||||||
|
if ac.state == connectivity.Shutdown {
|
||||||
|
return
|
||||||
|
}
|
||||||
ac.curAddr = resolver.Address{}
|
ac.curAddr = resolver.Address{}
|
||||||
if err == errConnDrain && ac.transport != nil {
|
if err == errConnDrain && ac.transport != nil {
|
||||||
// GracefulClose(...) may be executed multiple times when
|
// GracefulClose(...) may be executed multiple times when
|
||||||
@@ -1148,9 +1375,6 @@ func (ac *addrConn) tearDown(err error) {
|
|||||||
// address removal and GoAway.
|
// address removal and GoAway.
|
||||||
ac.transport.GracefulClose()
|
ac.transport.GracefulClose()
|
||||||
}
|
}
|
||||||
if ac.state == connectivity.Shutdown {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ac.state = connectivity.Shutdown
|
ac.state = connectivity.Shutdown
|
||||||
ac.tearDownErr = err
|
ac.tearDownErr = err
|
||||||
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
|
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
|
||||||
@@ -1170,3 +1394,10 @@ func (ac *addrConn) getState() connectivity.State {
|
|||||||
defer ac.mu.Unlock()
|
defer ac.mu.Unlock()
|
||||||
return ac.state
|
return ac.state
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
|
||||||
|
// underlying connections within the specified timeout.
|
||||||
|
//
|
||||||
|
// Deprecated: This error is never returned by grpc and should not be
|
||||||
|
// referenced by users.
|
||||||
|
var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
|
||||||
|
|||||||
86
vendor/google.golang.org/grpc/codec.go
generated
vendored
86
vendor/google.golang.org/grpc/codec.go
generated
vendored
@@ -19,84 +19,32 @@
|
|||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"google.golang.org/grpc/encoding"
|
||||||
"sync"
|
_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// baseCodec contains the functionality of both Codec and encoding.Codec, but
|
||||||
|
// omits the name/string, which vary between the two and are not needed for
|
||||||
|
// anything besides the registry in the encoding package.
|
||||||
|
type baseCodec interface {
|
||||||
|
Marshal(v interface{}) ([]byte, error)
|
||||||
|
Unmarshal(data []byte, v interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ baseCodec = Codec(nil)
|
||||||
|
var _ baseCodec = encoding.Codec(nil)
|
||||||
|
|
||||||
// Codec defines the interface gRPC uses to encode and decode messages.
|
// Codec defines the interface gRPC uses to encode and decode messages.
|
||||||
// Note that implementations of this interface must be thread safe;
|
// Note that implementations of this interface must be thread safe;
|
||||||
// a Codec's methods can be called from concurrent goroutines.
|
// a Codec's methods can be called from concurrent goroutines.
|
||||||
|
//
|
||||||
|
// Deprecated: use encoding.Codec instead.
|
||||||
type Codec interface {
|
type Codec interface {
|
||||||
// Marshal returns the wire format of v.
|
// Marshal returns the wire format of v.
|
||||||
Marshal(v interface{}) ([]byte, error)
|
Marshal(v interface{}) ([]byte, error)
|
||||||
// Unmarshal parses the wire format into v.
|
// Unmarshal parses the wire format into v.
|
||||||
Unmarshal(data []byte, v interface{}) error
|
Unmarshal(data []byte, v interface{}) error
|
||||||
// String returns the name of the Codec implementation. The returned
|
// String returns the name of the Codec implementation. This is unused by
|
||||||
// string will be used as part of content type in transmission.
|
// gRPC.
|
||||||
String() string
|
String() string
|
||||||
}
|
}
|
||||||
|
|
||||||
// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
|
|
||||||
type protoCodec struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
type cachedProtoBuffer struct {
|
|
||||||
lastMarshaledSize uint32
|
|
||||||
proto.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func capToMaxInt32(val int) uint32 {
|
|
||||||
if val > math.MaxInt32 {
|
|
||||||
return uint32(math.MaxInt32)
|
|
||||||
}
|
|
||||||
return uint32(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
|
|
||||||
protoMsg := v.(proto.Message)
|
|
||||||
newSlice := make([]byte, 0, cb.lastMarshaledSize)
|
|
||||||
|
|
||||||
cb.SetBuf(newSlice)
|
|
||||||
cb.Reset()
|
|
||||||
if err := cb.Marshal(protoMsg); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
out := cb.Bytes()
|
|
||||||
cb.lastMarshaledSize = capToMaxInt32(len(out))
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
|
|
||||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
|
||||||
out, err := p.marshal(v, cb)
|
|
||||||
|
|
||||||
// put back buffer and lose the ref to the slice
|
|
||||||
cb.SetBuf(nil)
|
|
||||||
protoBufferPool.Put(cb)
|
|
||||||
return out, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p protoCodec) Unmarshal(data []byte, v interface{}) error {
|
|
||||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
|
||||||
cb.SetBuf(data)
|
|
||||||
v.(proto.Message).Reset()
|
|
||||||
err := cb.Unmarshal(v.(proto.Message))
|
|
||||||
cb.SetBuf(nil)
|
|
||||||
protoBufferPool.Put(cb)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (protoCodec) String() string {
|
|
||||||
return "proto"
|
|
||||||
}
|
|
||||||
|
|
||||||
var protoBufferPool = &sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return &cachedProtoBuffer{
|
|
||||||
Buffer: proto.Buffer{},
|
|
||||||
lastMarshaledSize: 16,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|||||||
64
vendor/google.golang.org/grpc/codes/code_string.go
generated
vendored
64
vendor/google.golang.org/grpc/codes/code_string.go
generated
vendored
@@ -1,16 +1,62 @@
|
|||||||
// Code generated by "stringer -type=Code"; DO NOT EDIT.
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2017 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
package codes
|
package codes
|
||||||
|
|
||||||
import "strconv"
|
import "strconv"
|
||||||
|
|
||||||
const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
|
func (c Code) String() string {
|
||||||
|
switch c {
|
||||||
var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
|
case OK:
|
||||||
|
return "OK"
|
||||||
func (i Code) String() string {
|
case Canceled:
|
||||||
if i >= Code(len(_Code_index)-1) {
|
return "Canceled"
|
||||||
return "Code(" + strconv.FormatInt(int64(i), 10) + ")"
|
case Unknown:
|
||||||
|
return "Unknown"
|
||||||
|
case InvalidArgument:
|
||||||
|
return "InvalidArgument"
|
||||||
|
case DeadlineExceeded:
|
||||||
|
return "DeadlineExceeded"
|
||||||
|
case NotFound:
|
||||||
|
return "NotFound"
|
||||||
|
case AlreadyExists:
|
||||||
|
return "AlreadyExists"
|
||||||
|
case PermissionDenied:
|
||||||
|
return "PermissionDenied"
|
||||||
|
case ResourceExhausted:
|
||||||
|
return "ResourceExhausted"
|
||||||
|
case FailedPrecondition:
|
||||||
|
return "FailedPrecondition"
|
||||||
|
case Aborted:
|
||||||
|
return "Aborted"
|
||||||
|
case OutOfRange:
|
||||||
|
return "OutOfRange"
|
||||||
|
case Unimplemented:
|
||||||
|
return "Unimplemented"
|
||||||
|
case Internal:
|
||||||
|
return "Internal"
|
||||||
|
case Unavailable:
|
||||||
|
return "Unavailable"
|
||||||
|
case DataLoss:
|
||||||
|
return "DataLoss"
|
||||||
|
case Unauthenticated:
|
||||||
|
return "Unauthenticated"
|
||||||
|
default:
|
||||||
|
return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
|
||||||
}
|
}
|
||||||
return _Code_name[_Code_index[i]:_Code_index[i+1]]
|
|
||||||
}
|
}
|
||||||
|
|||||||
52
vendor/google.golang.org/grpc/codes/codes.go
generated
vendored
52
vendor/google.golang.org/grpc/codes/codes.go
generated
vendored
@@ -20,11 +20,13 @@
|
|||||||
// consistent across various languages.
|
// consistent across various languages.
|
||||||
package codes // import "google.golang.org/grpc/codes"
|
package codes // import "google.golang.org/grpc/codes"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
|
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
|
||||||
type Code uint32
|
type Code uint32
|
||||||
|
|
||||||
//go:generate stringer -type=Code
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// OK is returned on success.
|
// OK is returned on success.
|
||||||
OK Code = 0
|
OK Code = 0
|
||||||
@@ -68,10 +70,6 @@ const (
|
|||||||
// instead for those errors).
|
// instead for those errors).
|
||||||
PermissionDenied Code = 7
|
PermissionDenied Code = 7
|
||||||
|
|
||||||
// Unauthenticated indicates the request does not have valid
|
|
||||||
// authentication credentials for the operation.
|
|
||||||
Unauthenticated Code = 16
|
|
||||||
|
|
||||||
// ResourceExhausted indicates some resource has been exhausted, perhaps
|
// ResourceExhausted indicates some resource has been exhausted, perhaps
|
||||||
// a per-user quota, or perhaps the entire file system is out of space.
|
// a per-user quota, or perhaps the entire file system is out of space.
|
||||||
ResourceExhausted Code = 8
|
ResourceExhausted Code = 8
|
||||||
@@ -141,4 +139,46 @@ const (
|
|||||||
|
|
||||||
// DataLoss indicates unrecoverable data loss or corruption.
|
// DataLoss indicates unrecoverable data loss or corruption.
|
||||||
DataLoss Code = 15
|
DataLoss Code = 15
|
||||||
|
|
||||||
|
// Unauthenticated indicates the request does not have valid
|
||||||
|
// authentication credentials for the operation.
|
||||||
|
Unauthenticated Code = 16
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var strToCode = map[string]Code{
|
||||||
|
`"OK"`: OK,
|
||||||
|
`"CANCELLED"`:/* [sic] */ Canceled,
|
||||||
|
`"UNKNOWN"`: Unknown,
|
||||||
|
`"INVALID_ARGUMENT"`: InvalidArgument,
|
||||||
|
`"DEADLINE_EXCEEDED"`: DeadlineExceeded,
|
||||||
|
`"NOT_FOUND"`: NotFound,
|
||||||
|
`"ALREADY_EXISTS"`: AlreadyExists,
|
||||||
|
`"PERMISSION_DENIED"`: PermissionDenied,
|
||||||
|
`"RESOURCE_EXHAUSTED"`: ResourceExhausted,
|
||||||
|
`"FAILED_PRECONDITION"`: FailedPrecondition,
|
||||||
|
`"ABORTED"`: Aborted,
|
||||||
|
`"OUT_OF_RANGE"`: OutOfRange,
|
||||||
|
`"UNIMPLEMENTED"`: Unimplemented,
|
||||||
|
`"INTERNAL"`: Internal,
|
||||||
|
`"UNAVAILABLE"`: Unavailable,
|
||||||
|
`"DATA_LOSS"`: DataLoss,
|
||||||
|
`"UNAUTHENTICATED"`: Unauthenticated,
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON unmarshals b into the Code.
|
||||||
|
func (c *Code) UnmarshalJSON(b []byte) error {
|
||||||
|
// From json.Unmarshaler: By convention, to approximate the behavior of
|
||||||
|
// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
|
||||||
|
// a no-op.
|
||||||
|
if string(b) == "null" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if c == nil {
|
||||||
|
return fmt.Errorf("nil receiver passed to UnmarshalJSON")
|
||||||
|
}
|
||||||
|
if jc, ok := strToCode[string(b)]; ok {
|
||||||
|
*c = jc
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("invalid code: %q", string(b))
|
||||||
|
}
|
||||||
|
|||||||
5
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
5
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
@@ -43,8 +43,9 @@ type PerRPCCredentials interface {
|
|||||||
// GetRequestMetadata gets the current request metadata, refreshing
|
// GetRequestMetadata gets the current request metadata, refreshing
|
||||||
// tokens if required. This should be called by the transport layer on
|
// tokens if required. This should be called by the transport layer on
|
||||||
// each request, and the data should be populated in headers or other
|
// each request, and the data should be populated in headers or other
|
||||||
// context. uri is the URI of the entry point for the request. When
|
// context. If a status code is returned, it will be used as the status
|
||||||
// supported by the underlying implementation, ctx can be used for
|
// for the RPC. uri is the URI of the entry point for the request.
|
||||||
|
// When supported by the underlying implementation, ctx can be used for
|
||||||
// timeout and cancellation.
|
// timeout and cancellation.
|
||||||
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
||||||
// it as an arbitrary string.
|
// it as an arbitrary string.
|
||||||
|
|||||||
123
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
123
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
@@ -16,46 +16,103 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package encoding defines the interface for the compressor and the functions
|
// Package encoding defines the interface for the compressor and codec, and
|
||||||
// to register and get the compossor.
|
// functions to register and retrieve compressors and codecs.
|
||||||
|
//
|
||||||
// This package is EXPERIMENTAL.
|
// This package is EXPERIMENTAL.
|
||||||
package encoding
|
package encoding
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
var registerCompressor = make(map[string]Compressor)
|
|
||||||
|
|
||||||
// Compressor is used for compressing and decompressing when sending or receiving messages.
|
|
||||||
type Compressor interface {
|
|
||||||
// Compress writes the data written to wc to w after compressing it. If an error
|
|
||||||
// occurs while initializing the compressor, that error is returned instead.
|
|
||||||
Compress(w io.Writer) (io.WriteCloser, error)
|
|
||||||
// Decompress reads data from r, decompresses it, and provides the uncompressed data
|
|
||||||
// via the returned io.Reader. If an error occurs while initializing the decompressor, that error
|
|
||||||
// is returned instead.
|
|
||||||
Decompress(r io.Reader) (io.Reader, error)
|
|
||||||
// Name is the name of the compression codec and is used to set the content coding header.
|
|
||||||
Name() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterCompressor registers the compressor with gRPC by its name. It can be activated when
|
|
||||||
// sending an RPC via grpc.UseCompressor(). It will be automatically accessed when receiving a
|
|
||||||
// message based on the content coding header. Servers also use it to send a response with the
|
|
||||||
// same encoding as the request.
|
|
||||||
//
|
|
||||||
// NOTE: this function must only be called during initialization time (i.e. in an init() function). If
|
|
||||||
// multiple Compressors are registered with the same name, the one registered last will take effect.
|
|
||||||
func RegisterCompressor(c Compressor) {
|
|
||||||
registerCompressor[c.Name()] = c
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCompressor returns Compressor for the given compressor name.
|
|
||||||
func GetCompressor(name string) Compressor {
|
|
||||||
return registerCompressor[name]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Identity specifies the optional encoding for uncompressed streams.
|
// Identity specifies the optional encoding for uncompressed streams.
|
||||||
// It is intended for grpc internal use only.
|
// It is intended for grpc internal use only.
|
||||||
const Identity = "identity"
|
const Identity = "identity"
|
||||||
|
|
||||||
|
// Compressor is used for compressing and decompressing when sending or
|
||||||
|
// receiving messages.
|
||||||
|
type Compressor interface {
|
||||||
|
// Compress writes the data written to wc to w after compressing it. If an
|
||||||
|
// error occurs while initializing the compressor, that error is returned
|
||||||
|
// instead.
|
||||||
|
Compress(w io.Writer) (io.WriteCloser, error)
|
||||||
|
// Decompress reads data from r, decompresses it, and provides the
|
||||||
|
// uncompressed data via the returned io.Reader. If an error occurs while
|
||||||
|
// initializing the decompressor, that error is returned instead.
|
||||||
|
Decompress(r io.Reader) (io.Reader, error)
|
||||||
|
// Name is the name of the compression codec and is used to set the content
|
||||||
|
// coding header. The result must be static; the result cannot change
|
||||||
|
// between calls.
|
||||||
|
Name() string
|
||||||
|
}
|
||||||
|
|
||||||
|
var registeredCompressor = make(map[string]Compressor)
|
||||||
|
|
||||||
|
// RegisterCompressor registers the compressor with gRPC by its name. It can
|
||||||
|
// be activated when sending an RPC via grpc.UseCompressor(). It will be
|
||||||
|
// automatically accessed when receiving a message based on the content coding
|
||||||
|
// header. Servers also use it to send a response with the same encoding as
|
||||||
|
// the request.
|
||||||
|
//
|
||||||
|
// NOTE: this function must only be called during initialization time (i.e. in
|
||||||
|
// an init() function), and is not thread-safe. If multiple Compressors are
|
||||||
|
// registered with the same name, the one registered last will take effect.
|
||||||
|
func RegisterCompressor(c Compressor) {
|
||||||
|
registeredCompressor[c.Name()] = c
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCompressor returns Compressor for the given compressor name.
|
||||||
|
func GetCompressor(name string) Compressor {
|
||||||
|
return registeredCompressor[name]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Codec defines the interface gRPC uses to encode and decode messages. Note
|
||||||
|
// that implementations of this interface must be thread safe; a Codec's
|
||||||
|
// methods can be called from concurrent goroutines.
|
||||||
|
type Codec interface {
|
||||||
|
// Marshal returns the wire format of v.
|
||||||
|
Marshal(v interface{}) ([]byte, error)
|
||||||
|
// Unmarshal parses the wire format into v.
|
||||||
|
Unmarshal(data []byte, v interface{}) error
|
||||||
|
// Name returns the name of the Codec implementation. The returned string
|
||||||
|
// will be used as part of content type in transmission. The result must be
|
||||||
|
// static; the result cannot change between calls.
|
||||||
|
Name() string
|
||||||
|
}
|
||||||
|
|
||||||
|
var registeredCodecs = make(map[string]Codec, 0)
|
||||||
|
|
||||||
|
// RegisterCodec registers the provided Codec for use with all gRPC clients and
|
||||||
|
// servers.
|
||||||
|
//
|
||||||
|
// The Codec will be stored and looked up by result of its Name() method, which
|
||||||
|
// should match the content-subtype of the encoding handled by the Codec. This
|
||||||
|
// is case-insensitive, and is stored and looked up as lowercase. If the
|
||||||
|
// result of calling Name() is an empty string, RegisterCodec will panic. See
|
||||||
|
// Content-Type on
|
||||||
|
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||||
|
// more details.
|
||||||
|
//
|
||||||
|
// NOTE: this function must only be called during initialization time (i.e. in
|
||||||
|
// an init() function), and is not thread-safe. If multiple Compressors are
|
||||||
|
// registered with the same name, the one registered last will take effect.
|
||||||
|
func RegisterCodec(codec Codec) {
|
||||||
|
if codec == nil {
|
||||||
|
panic("cannot register a nil Codec")
|
||||||
|
}
|
||||||
|
contentSubtype := strings.ToLower(codec.Name())
|
||||||
|
if contentSubtype == "" {
|
||||||
|
panic("cannot register Codec with empty string result for String()")
|
||||||
|
}
|
||||||
|
registeredCodecs[contentSubtype] = codec
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is
|
||||||
|
// registered for the content-subtype.
|
||||||
|
//
|
||||||
|
// The content-subtype is expected to be lowercase.
|
||||||
|
func GetCodec(contentSubtype string) Codec {
|
||||||
|
return registeredCodecs[contentSubtype]
|
||||||
|
}
|
||||||
|
|||||||
110
vendor/google.golang.org/grpc/encoding/proto/proto.go
generated
vendored
Normal file
110
vendor/google.golang.org/grpc/encoding/proto/proto.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2018 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package proto defines the protobuf codec. Importing this package will
|
||||||
|
// register the codec.
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/grpc/encoding"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Name is the name registered for the proto compressor.
|
||||||
|
const Name = "proto"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
encoding.RegisterCodec(codec{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
|
||||||
|
type codec struct{}
|
||||||
|
|
||||||
|
type cachedProtoBuffer struct {
|
||||||
|
lastMarshaledSize uint32
|
||||||
|
proto.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func capToMaxInt32(val int) uint32 {
|
||||||
|
if val > math.MaxInt32 {
|
||||||
|
return uint32(math.MaxInt32)
|
||||||
|
}
|
||||||
|
return uint32(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
|
||||||
|
protoMsg := v.(proto.Message)
|
||||||
|
newSlice := make([]byte, 0, cb.lastMarshaledSize)
|
||||||
|
|
||||||
|
cb.SetBuf(newSlice)
|
||||||
|
cb.Reset()
|
||||||
|
if err := cb.Marshal(protoMsg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out := cb.Bytes()
|
||||||
|
cb.lastMarshaledSize = capToMaxInt32(len(out))
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (codec) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
if pm, ok := v.(proto.Marshaler); ok {
|
||||||
|
// object can marshal itself, no need for buffer
|
||||||
|
return pm.Marshal()
|
||||||
|
}
|
||||||
|
|
||||||
|
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||||
|
out, err := marshal(v, cb)
|
||||||
|
|
||||||
|
// put back buffer and lose the ref to the slice
|
||||||
|
cb.SetBuf(nil)
|
||||||
|
protoBufferPool.Put(cb)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (codec) Unmarshal(data []byte, v interface{}) error {
|
||||||
|
protoMsg := v.(proto.Message)
|
||||||
|
protoMsg.Reset()
|
||||||
|
|
||||||
|
if pu, ok := protoMsg.(proto.Unmarshaler); ok {
|
||||||
|
// object can unmarshal itself, no need for buffer
|
||||||
|
return pu.Unmarshal(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||||
|
cb.SetBuf(data)
|
||||||
|
err := cb.Unmarshal(protoMsg)
|
||||||
|
cb.SetBuf(nil)
|
||||||
|
protoBufferPool.Put(cb)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (codec) Name() string {
|
||||||
|
return Name
|
||||||
|
}
|
||||||
|
|
||||||
|
var protoBufferPool = &sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return &cachedProtoBuffer{
|
||||||
|
Buffer: proto.Buffer{},
|
||||||
|
lastMarshaledSize: 16,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
34
vendor/google.golang.org/grpc/go16.go
generated
vendored
34
vendor/google.golang.org/grpc/go16.go
generated
vendored
@@ -25,7 +25,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
@@ -48,6 +47,9 @@ func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) erro
|
|||||||
|
|
||||||
// toRPCErr converts an error into an error from the status package.
|
// toRPCErr converts an error into an error from the status package.
|
||||||
func toRPCErr(err error) error {
|
func toRPCErr(err error) error {
|
||||||
|
if err == nil || err == io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if _, ok := status.FromError(err); ok {
|
if _, ok := status.FromError(err); ok {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -62,37 +64,7 @@ func toRPCErr(err error) error {
|
|||||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||||
case context.Canceled:
|
case context.Canceled:
|
||||||
return status.Error(codes.Canceled, err.Error())
|
return status.Error(codes.Canceled, err.Error())
|
||||||
case ErrClientConnClosing:
|
|
||||||
return status.Error(codes.FailedPrecondition, err.Error())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return status.Error(codes.Unknown, err.Error())
|
return status.Error(codes.Unknown, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertCode converts a standard Go error into its canonical code. Note that
|
|
||||||
// this is only used to translate the error returned by the server applications.
|
|
||||||
func convertCode(err error) codes.Code {
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
return codes.OK
|
|
||||||
case io.EOF:
|
|
||||||
return codes.OutOfRange
|
|
||||||
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
|
|
||||||
return codes.FailedPrecondition
|
|
||||||
case os.ErrInvalid:
|
|
||||||
return codes.InvalidArgument
|
|
||||||
case context.Canceled:
|
|
||||||
return codes.Canceled
|
|
||||||
case context.DeadlineExceeded:
|
|
||||||
return codes.DeadlineExceeded
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case os.IsExist(err):
|
|
||||||
return codes.AlreadyExists
|
|
||||||
case os.IsNotExist(err):
|
|
||||||
return codes.NotFound
|
|
||||||
case os.IsPermission(err):
|
|
||||||
return codes.PermissionDenied
|
|
||||||
}
|
|
||||||
return codes.Unknown
|
|
||||||
}
|
|
||||||
|
|||||||
34
vendor/google.golang.org/grpc/go17.go
generated
vendored
34
vendor/google.golang.org/grpc/go17.go
generated
vendored
@@ -26,7 +26,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
|
|
||||||
netctx "golang.org/x/net/context"
|
netctx "golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
@@ -49,6 +48,9 @@ func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) erro
|
|||||||
|
|
||||||
// toRPCErr converts an error into an error from the status package.
|
// toRPCErr converts an error into an error from the status package.
|
||||||
func toRPCErr(err error) error {
|
func toRPCErr(err error) error {
|
||||||
|
if err == nil || err == io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if _, ok := status.FromError(err); ok {
|
if _, ok := status.FromError(err); ok {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -63,37 +65,7 @@ func toRPCErr(err error) error {
|
|||||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||||
case context.Canceled, netctx.Canceled:
|
case context.Canceled, netctx.Canceled:
|
||||||
return status.Error(codes.Canceled, err.Error())
|
return status.Error(codes.Canceled, err.Error())
|
||||||
case ErrClientConnClosing:
|
|
||||||
return status.Error(codes.FailedPrecondition, err.Error())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return status.Error(codes.Unknown, err.Error())
|
return status.Error(codes.Unknown, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// convertCode converts a standard Go error into its canonical code. Note that
|
|
||||||
// this is only used to translate the error returned by the server applications.
|
|
||||||
func convertCode(err error) codes.Code {
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
return codes.OK
|
|
||||||
case io.EOF:
|
|
||||||
return codes.OutOfRange
|
|
||||||
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
|
|
||||||
return codes.FailedPrecondition
|
|
||||||
case os.ErrInvalid:
|
|
||||||
return codes.InvalidArgument
|
|
||||||
case context.Canceled, netctx.Canceled:
|
|
||||||
return codes.Canceled
|
|
||||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
|
||||||
return codes.DeadlineExceeded
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case os.IsExist(err):
|
|
||||||
return codes.AlreadyExists
|
|
||||||
case os.IsNotExist(err):
|
|
||||||
return codes.NotFound
|
|
||||||
case os.IsPermission(err):
|
|
||||||
return codes.PermissionDenied
|
|
||||||
}
|
|
||||||
return codes.Unknown
|
|
||||||
}
|
|
||||||
|
|||||||
874
vendor/google.golang.org/grpc/grpclb.go
generated
vendored
874
vendor/google.golang.org/grpc/grpclb.go
generated
vendored
@@ -19,21 +19,32 @@
|
|||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"strconv"
|
||||||
"fmt"
|
"strings"
|
||||||
"math/rand"
|
|
||||||
"net"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/balancer"
|
||||||
lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
"google.golang.org/grpc/connectivity"
|
||||||
|
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/naming"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
lbTokeyKey = "lb-token"
|
||||||
|
defaultFallbackTimeout = 10 * time.Second
|
||||||
|
grpclbName = "grpclb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func convertDuration(d *lbpb.Duration) time.Duration {
|
||||||
|
if d == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
|
||||||
|
}
|
||||||
|
|
||||||
// Client API for LoadBalancer service.
|
// Client API for LoadBalancer service.
|
||||||
// Mostly copied from generated pb.go file.
|
// Mostly copied from generated pb.go file.
|
||||||
// To avoid circular dependency.
|
// To avoid circular dependency.
|
||||||
@@ -59,646 +70,273 @@ type balanceLoadClientStream struct {
|
|||||||
ClientStream
|
ClientStream
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *balanceLoadClientStream) Send(m *lbmpb.LoadBalanceRequest) error {
|
func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
|
||||||
return x.ClientStream.SendMsg(m)
|
return x.ClientStream.SendMsg(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) {
|
func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
|
||||||
m := new(lbmpb.LoadBalanceResponse)
|
m := new(lbpb.LoadBalanceResponse)
|
||||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGRPCLBBalancer creates a grpclb load balancer.
|
func init() {
|
||||||
func NewGRPCLBBalancer(r naming.Resolver) Balancer {
|
balancer.Register(newLBBuilder())
|
||||||
return &grpclbBalancer{
|
}
|
||||||
r: r,
|
|
||||||
|
// newLBBuilder creates a builder for grpclb.
|
||||||
|
func newLBBuilder() balancer.Builder {
|
||||||
|
return NewLBBuilderWithFallbackTimeout(defaultFallbackTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLBBuilderWithFallbackTimeout creates a grpclb builder with the given
|
||||||
|
// fallbackTimeout. If no response is received from the remote balancer within
|
||||||
|
// fallbackTimeout, the backend addresses from the resolved address list will be
|
||||||
|
// used.
|
||||||
|
//
|
||||||
|
// Only call this function when a non-default fallback timeout is needed.
|
||||||
|
func NewLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder {
|
||||||
|
return &lbBuilder{
|
||||||
|
fallbackTimeout: fallbackTimeout,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type remoteBalancerInfo struct {
|
type lbBuilder struct {
|
||||||
addr string
|
fallbackTimeout time.Duration
|
||||||
// the server name used for authentication with the remote LB server.
|
|
||||||
name string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// grpclbAddrInfo consists of the information of a backend server.
|
func (b *lbBuilder) Name() string {
|
||||||
type grpclbAddrInfo struct {
|
return grpclbName
|
||||||
addr Address
|
|
||||||
connected bool
|
|
||||||
// dropForRateLimiting indicates whether this particular request should be
|
|
||||||
// dropped by the client for rate limiting.
|
|
||||||
dropForRateLimiting bool
|
|
||||||
// dropForLoadBalancing indicates whether this particular request should be
|
|
||||||
// dropped by the client for load balancing.
|
|
||||||
dropForLoadBalancing bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type grpclbBalancer struct {
|
func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||||
r naming.Resolver
|
// This generates a manual resolver builder with a random scheme. This
|
||||||
|
// scheme will be used to dial to remote LB, so we can send filtered address
|
||||||
|
// updates to remote LB ClientConn using this manual resolver.
|
||||||
|
scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36)
|
||||||
|
r := &lbManualResolver{scheme: scheme, ccb: cc}
|
||||||
|
|
||||||
|
var target string
|
||||||
|
targetSplitted := strings.Split(cc.Target(), ":///")
|
||||||
|
if len(targetSplitted) < 2 {
|
||||||
|
target = cc.Target()
|
||||||
|
} else {
|
||||||
|
target = targetSplitted[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
lb := &lbBalancer{
|
||||||
|
cc: cc,
|
||||||
|
target: target,
|
||||||
|
opt: opt,
|
||||||
|
fallbackTimeout: b.fallbackTimeout,
|
||||||
|
doneCh: make(chan struct{}),
|
||||||
|
|
||||||
|
manualResolver: r,
|
||||||
|
csEvltr: &connectivityStateEvaluator{},
|
||||||
|
subConns: make(map[resolver.Address]balancer.SubConn),
|
||||||
|
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||||
|
picker: &errPicker{err: balancer.ErrNoSubConnAvailable},
|
||||||
|
clientStats: &rpcStats{},
|
||||||
|
}
|
||||||
|
|
||||||
|
return lb
|
||||||
|
}
|
||||||
|
|
||||||
|
type lbBalancer struct {
|
||||||
|
cc balancer.ClientConn
|
||||||
target string
|
target string
|
||||||
mu sync.Mutex
|
opt balancer.BuildOptions
|
||||||
seq int // a sequence number to make sure addrCh does not get stale addresses.
|
fallbackTimeout time.Duration
|
||||||
w naming.Watcher
|
doneCh chan struct{}
|
||||||
addrCh chan []Address
|
|
||||||
rbs []remoteBalancerInfo
|
|
||||||
addrs []*grpclbAddrInfo
|
|
||||||
next int
|
|
||||||
waitCh chan struct{}
|
|
||||||
done bool
|
|
||||||
rand *rand.Rand
|
|
||||||
|
|
||||||
clientStats lbmpb.ClientStats
|
// manualResolver is used in the remote LB ClientConn inside grpclb. When
|
||||||
|
// resolved address updates are received by grpclb, filtered updates will be
|
||||||
|
// send to remote LB ClientConn through this resolver.
|
||||||
|
manualResolver *lbManualResolver
|
||||||
|
// The ClientConn to talk to the remote balancer.
|
||||||
|
ccRemoteLB *ClientConn
|
||||||
|
|
||||||
|
// Support client side load reporting. Each picker gets a reference to this,
|
||||||
|
// and will update its content.
|
||||||
|
clientStats *rpcStats
|
||||||
|
|
||||||
|
mu sync.Mutex // guards everything following.
|
||||||
|
// The full server list including drops, used to check if the newly received
|
||||||
|
// serverList contains anything new. Each generate picker will also have
|
||||||
|
// reference to this list to do the first layer pick.
|
||||||
|
fullServerList []*lbpb.Server
|
||||||
|
// All backends addresses, with metadata set to nil. This list contains all
|
||||||
|
// backend addresses in the same order and with the same duplicates as in
|
||||||
|
// serverlist. When generating picker, a SubConn slice with the same order
|
||||||
|
// but with only READY SCs will be gerenated.
|
||||||
|
backendAddrs []resolver.Address
|
||||||
|
// Roundrobin functionalities.
|
||||||
|
csEvltr *connectivityStateEvaluator
|
||||||
|
state connectivity.State
|
||||||
|
subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn.
|
||||||
|
scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
|
||||||
|
picker balancer.Picker
|
||||||
|
// Support fallback to resolved backend addresses if there's no response
|
||||||
|
// from remote balancer within fallbackTimeout.
|
||||||
|
fallbackTimerExpired bool
|
||||||
|
serverListReceived bool
|
||||||
|
// resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set
|
||||||
|
// when resolved address updates are received, and read in the goroutine
|
||||||
|
// handling fallback.
|
||||||
|
resolvedBackendAddrs []resolver.Address
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
|
// regeneratePicker takes a snapshot of the balancer, and generates a picker from
|
||||||
updates, err := w.Next()
|
// it. The picker
|
||||||
if err != nil {
|
// - always returns ErrTransientFailure if the balancer is in TransientFailure,
|
||||||
grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err)
|
// - does two layer roundrobin pick otherwise.
|
||||||
return err
|
// Caller must hold lb.mu.
|
||||||
|
func (lb *lbBalancer) regeneratePicker() {
|
||||||
|
if lb.state == connectivity.TransientFailure {
|
||||||
|
lb.picker = &errPicker{err: balancer.ErrTransientFailure}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
b.mu.Lock()
|
var readySCs []balancer.SubConn
|
||||||
defer b.mu.Unlock()
|
for _, a := range lb.backendAddrs {
|
||||||
if b.done {
|
if sc, ok := lb.subConns[a]; ok {
|
||||||
return ErrClientConnClosing
|
if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready {
|
||||||
}
|
readySCs = append(readySCs, sc)
|
||||||
for _, update := range updates {
|
|
||||||
switch update.Op {
|
|
||||||
case naming.Add:
|
|
||||||
var exist bool
|
|
||||||
for _, v := range b.rbs {
|
|
||||||
// TODO: Is the same addr with different server name a different balancer?
|
|
||||||
if update.Addr == v.addr {
|
|
||||||
exist = true
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if exist {
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
md, ok := update.Metadata.(*naming.AddrMetadataGRPCLB)
|
|
||||||
|
if len(lb.fullServerList) <= 0 {
|
||||||
|
if len(readySCs) <= 0 {
|
||||||
|
lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lb.picker = &rrPicker{subConns: readySCs}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lb.picker = &lbPicker{
|
||||||
|
serverList: lb.fullServerList,
|
||||||
|
subConns: readySCs,
|
||||||
|
stats: lb.clientStats,
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||||
|
grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s)
|
||||||
|
lb.mu.Lock()
|
||||||
|
defer lb.mu.Unlock()
|
||||||
|
|
||||||
|
oldS, ok := lb.scStates[sc]
|
||||||
if !ok {
|
if !ok {
|
||||||
// TODO: Revisit the handling here and may introduce some fallback mechanism.
|
grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
|
||||||
grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata)
|
return
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
switch md.AddrType {
|
lb.scStates[sc] = s
|
||||||
case naming.Backend:
|
switch s {
|
||||||
// TODO: Revisit the handling here and may introduce some fallback mechanism.
|
case connectivity.Idle:
|
||||||
grpclog.Errorf("The name resolution does not give grpclb addresses")
|
sc.Connect()
|
||||||
continue
|
case connectivity.Shutdown:
|
||||||
case naming.GRPCLB:
|
// When an address was removed by resolver, b called RemoveSubConn but
|
||||||
b.rbs = append(b.rbs, remoteBalancerInfo{
|
// kept the sc's state in scStates. Remove state for this sc here.
|
||||||
addr: update.Addr,
|
delete(lb.scStates, sc)
|
||||||
name: md.ServerName,
|
|
||||||
})
|
|
||||||
default:
|
|
||||||
grpclog.Errorf("Received unknow address type %d", md.AddrType)
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
case naming.Delete:
|
|
||||||
for i, v := range b.rbs {
|
oldAggrState := lb.state
|
||||||
if update.Addr == v.addr {
|
lb.state = lb.csEvltr.recordTransition(oldS, s)
|
||||||
copy(b.rbs[i:], b.rbs[i+1:])
|
|
||||||
b.rbs = b.rbs[:len(b.rbs)-1]
|
// Regenerate picker when one of the following happens:
|
||||||
break
|
// - this sc became ready from not-ready
|
||||||
|
// - this sc became not-ready from ready
|
||||||
|
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
|
||||||
|
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
|
||||||
|
if (oldS == connectivity.Ready) != (s == connectivity.Ready) ||
|
||||||
|
(lb.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
|
||||||
|
lb.regeneratePicker()
|
||||||
}
|
}
|
||||||
}
|
|
||||||
default:
|
lb.cc.UpdateBalancerState(lb.state, lb.picker)
|
||||||
grpclog.Errorf("Unknown update.Op %v", update.Op)
|
return
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// TODO: Fall back to the basic round-robin load balancing if the resulting address is
|
// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
|
||||||
// not a load balancer.
|
// resolved backends (backends received from resolver, not from remote balancer)
|
||||||
|
// if no connection to remote balancers was successful.
|
||||||
|
func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
|
||||||
|
timer := time.NewTimer(fallbackTimeout)
|
||||||
|
defer timer.Stop()
|
||||||
select {
|
select {
|
||||||
case <-ch:
|
case <-timer.C:
|
||||||
|
case <-lb.doneCh:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lb.mu.Lock()
|
||||||
|
if lb.serverListReceived {
|
||||||
|
lb.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lb.fallbackTimerExpired = true
|
||||||
|
lb.refreshSubConns(lb.resolvedBackendAddrs)
|
||||||
|
lb.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB
|
||||||
|
// clientConn. The remoteLB clientConn will handle creating/removing remoteLB
|
||||||
|
// connections.
|
||||||
|
func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||||
|
grpclog.Infof("lbBalancer: handleResolvedResult: %+v", addrs)
|
||||||
|
if len(addrs) <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var remoteBalancerAddrs, backendAddrs []resolver.Address
|
||||||
|
for _, a := range addrs {
|
||||||
|
if a.Type == resolver.GRPCLB {
|
||||||
|
remoteBalancerAddrs = append(remoteBalancerAddrs, a)
|
||||||
|
} else {
|
||||||
|
backendAddrs = append(backendAddrs, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if lb.ccRemoteLB == nil {
|
||||||
|
if len(remoteBalancerAddrs) <= 0 {
|
||||||
|
grpclog.Errorf("grpclb: no remote balancer address is available, should never happen")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// First time receiving resolved addresses, create a cc to remote
|
||||||
|
// balancers.
|
||||||
|
lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName)
|
||||||
|
// Start the fallback goroutine.
|
||||||
|
go lb.fallbackToBackendsAfter(lb.fallbackTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// cc to remote balancers uses lb.manualResolver. Send the updated remote
|
||||||
|
// balancer addresses to it through manualResolver.
|
||||||
|
lb.manualResolver.NewAddress(remoteBalancerAddrs)
|
||||||
|
|
||||||
|
lb.mu.Lock()
|
||||||
|
lb.resolvedBackendAddrs = backendAddrs
|
||||||
|
// If serverListReceived is true, connection to remote balancer was
|
||||||
|
// successful and there's no need to do fallback anymore.
|
||||||
|
// If fallbackTimerExpired is false, fallback hasn't happened yet.
|
||||||
|
if !lb.serverListReceived && lb.fallbackTimerExpired {
|
||||||
|
// This means we received a new list of resolved backends, and we are
|
||||||
|
// still in fallback mode. Need to update the list of backends we are
|
||||||
|
// using to the new list of backends.
|
||||||
|
lb.refreshSubConns(lb.resolvedBackendAddrs)
|
||||||
|
}
|
||||||
|
lb.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lb *lbBalancer) Close() {
|
||||||
|
select {
|
||||||
|
case <-lb.doneCh:
|
||||||
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
ch <- b.rbs
|
close(lb.doneCh)
|
||||||
return nil
|
if lb.ccRemoteLB != nil {
|
||||||
}
|
lb.ccRemoteLB.Close()
|
||||||
|
|
||||||
func convertDuration(d *lbmpb.Duration) time.Duration {
|
|
||||||
if d == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) {
|
|
||||||
if l == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
servers := l.GetServers()
|
|
||||||
var (
|
|
||||||
sl []*grpclbAddrInfo
|
|
||||||
addrs []Address
|
|
||||||
)
|
|
||||||
for _, s := range servers {
|
|
||||||
md := metadata.Pairs("lb-token", s.LoadBalanceToken)
|
|
||||||
ip := net.IP(s.IpAddress)
|
|
||||||
ipStr := ip.String()
|
|
||||||
if ip.To4() == nil {
|
|
||||||
// Add square brackets to ipv6 addresses, otherwise net.Dial() and
|
|
||||||
// net.SplitHostPort() will return too many colons error.
|
|
||||||
ipStr = fmt.Sprintf("[%s]", ipStr)
|
|
||||||
}
|
|
||||||
addr := Address{
|
|
||||||
Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
|
|
||||||
Metadata: &md,
|
|
||||||
}
|
|
||||||
sl = append(sl, &grpclbAddrInfo{
|
|
||||||
addr: addr,
|
|
||||||
dropForRateLimiting: s.DropForRateLimiting,
|
|
||||||
dropForLoadBalancing: s.DropForLoadBalancing,
|
|
||||||
})
|
|
||||||
addrs = append(addrs, addr)
|
|
||||||
}
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
if b.done || seq < b.seq {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(sl) > 0 {
|
|
||||||
// reset b.next to 0 when replacing the server list.
|
|
||||||
b.next = 0
|
|
||||||
b.addrs = sl
|
|
||||||
b.addrCh <- addrs
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
|
|
||||||
ticker := time.NewTicker(interval)
|
|
||||||
defer ticker.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
case <-done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.mu.Lock()
|
|
||||||
stats := b.clientStats
|
|
||||||
b.clientStats = lbmpb.ClientStats{} // Clear the stats.
|
|
||||||
b.mu.Unlock()
|
|
||||||
t := time.Now()
|
|
||||||
stats.Timestamp = &lbmpb.Timestamp{
|
|
||||||
Seconds: t.Unix(),
|
|
||||||
Nanos: int32(t.Nanosecond()),
|
|
||||||
}
|
|
||||||
if err := s.Send(&lbmpb.LoadBalanceRequest{
|
|
||||||
LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_ClientStats{
|
|
||||||
ClientStats: &stats,
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
grpclog.Errorf("grpclb: failed to send load report: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
stream, err := lbc.BalanceLoad(ctx)
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.mu.Lock()
|
|
||||||
if b.done {
|
|
||||||
b.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.mu.Unlock()
|
|
||||||
initReq := &lbmpb.LoadBalanceRequest{
|
|
||||||
LoadBalanceRequestType: &lbmpb.LoadBalanceRequest_InitialRequest{
|
|
||||||
InitialRequest: &lbmpb.InitialLoadBalanceRequest{
|
|
||||||
Name: b.target,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if err := stream.Send(initReq); err != nil {
|
|
||||||
grpclog.Errorf("grpclb: failed to send init request: %v", err)
|
|
||||||
// TODO: backoff on retry?
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
reply, err := stream.Recv()
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Errorf("grpclb: failed to recv init response: %v", err)
|
|
||||||
// TODO: backoff on retry?
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
initResp := reply.GetInitialResponse()
|
|
||||||
if initResp == nil {
|
|
||||||
grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// TODO: Support delegation.
|
|
||||||
if initResp.LoadBalancerDelegate != "" {
|
|
||||||
// delegation
|
|
||||||
grpclog.Errorf("TODO: Delegation is not supported yet.")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
streamDone := make(chan struct{})
|
|
||||||
defer close(streamDone)
|
|
||||||
b.mu.Lock()
|
|
||||||
b.clientStats = lbmpb.ClientStats{} // Clear client stats.
|
|
||||||
b.mu.Unlock()
|
|
||||||
if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
|
|
||||||
go b.sendLoadReport(stream, d, streamDone)
|
|
||||||
}
|
|
||||||
// Retrieve the server list.
|
|
||||||
for {
|
|
||||||
reply, err := stream.Recv()
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Errorf("grpclb: failed to recv server list: %v", err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
b.mu.Lock()
|
|
||||||
if b.done || seq < b.seq {
|
|
||||||
b.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.seq++ // tick when receiving a new list of servers.
|
|
||||||
seq = b.seq
|
|
||||||
b.mu.Unlock()
|
|
||||||
if serverList := reply.GetServerList(); serverList != nil {
|
|
||||||
b.processServerList(serverList, seq)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *grpclbBalancer) Start(target string, config BalancerConfig) error {
|
|
||||||
b.rand = rand.New(rand.NewSource(time.Now().Unix()))
|
|
||||||
// TODO: Fall back to the basic direct connection if there is no name resolver.
|
|
||||||
if b.r == nil {
|
|
||||||
return errors.New("there is no name resolver installed")
|
|
||||||
}
|
|
||||||
b.target = target
|
|
||||||
b.mu.Lock()
|
|
||||||
if b.done {
|
|
||||||
b.mu.Unlock()
|
|
||||||
return ErrClientConnClosing
|
|
||||||
}
|
|
||||||
b.addrCh = make(chan []Address)
|
|
||||||
w, err := b.r.Resolve(target)
|
|
||||||
if err != nil {
|
|
||||||
b.mu.Unlock()
|
|
||||||
grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
b.w = w
|
|
||||||
b.mu.Unlock()
|
|
||||||
balancerAddrsCh := make(chan []remoteBalancerInfo, 1)
|
|
||||||
// Spawn a goroutine to monitor the name resolution of remote load balancer.
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil {
|
|
||||||
grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err)
|
|
||||||
close(balancerAddrsCh)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// Spawn a goroutine to talk to the remote load balancer.
|
|
||||||
go func() {
|
|
||||||
var (
|
|
||||||
cc *ClientConn
|
|
||||||
// ccError is closed when there is an error in the current cc.
|
|
||||||
// A new rb should be picked from rbs and connected.
|
|
||||||
ccError chan struct{}
|
|
||||||
rb *remoteBalancerInfo
|
|
||||||
rbs []remoteBalancerInfo
|
|
||||||
rbIdx int
|
|
||||||
)
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if ccError != nil {
|
|
||||||
select {
|
|
||||||
case <-ccError:
|
|
||||||
default:
|
|
||||||
close(ccError)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if cc != nil {
|
|
||||||
cc.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
var ok bool
|
|
||||||
select {
|
|
||||||
case rbs, ok = <-balancerAddrsCh:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
foundIdx := -1
|
|
||||||
if rb != nil {
|
|
||||||
for i, trb := range rbs {
|
|
||||||
if trb == *rb {
|
|
||||||
foundIdx = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if foundIdx >= 0 {
|
|
||||||
if foundIdx >= 1 {
|
|
||||||
// Move the address in use to the beginning of the list.
|
|
||||||
b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0]
|
|
||||||
rbIdx = 0
|
|
||||||
}
|
|
||||||
continue // If found, don't dial new cc.
|
|
||||||
} else if len(rbs) > 0 {
|
|
||||||
// Pick a random one from the list, instead of always using the first one.
|
|
||||||
if l := len(rbs); l > 1 && rb != nil {
|
|
||||||
tmpIdx := b.rand.Intn(l - 1)
|
|
||||||
b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0]
|
|
||||||
}
|
|
||||||
rbIdx = 0
|
|
||||||
rb = &rbs[0]
|
|
||||||
} else {
|
|
||||||
// foundIdx < 0 && len(rbs) <= 0.
|
|
||||||
rb = nil
|
|
||||||
}
|
|
||||||
case <-ccError:
|
|
||||||
ccError = nil
|
|
||||||
if rbIdx < len(rbs)-1 {
|
|
||||||
rbIdx++
|
|
||||||
rb = &rbs[rbIdx]
|
|
||||||
} else {
|
|
||||||
rb = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if rb == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if cc != nil {
|
|
||||||
cc.Close()
|
|
||||||
}
|
|
||||||
// Talk to the remote load balancer to get the server list.
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
dopts []DialOption
|
|
||||||
)
|
|
||||||
if creds := config.DialCreds; creds != nil {
|
|
||||||
if rb.name != "" {
|
|
||||||
if err := creds.OverrideServerName(rb.name); err != nil {
|
|
||||||
grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dopts = append(dopts, WithTransportCredentials(creds))
|
|
||||||
} else {
|
|
||||||
dopts = append(dopts, WithInsecure())
|
|
||||||
}
|
|
||||||
if dialer := config.Dialer; dialer != nil {
|
|
||||||
// WithDialer takes a different type of function, so we instead use a special DialOption here.
|
|
||||||
dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
|
|
||||||
}
|
|
||||||
dopts = append(dopts, WithBlock())
|
|
||||||
ccError = make(chan struct{})
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
|
||||||
cc, err = DialContext(ctx, rb.addr, dopts...)
|
|
||||||
cancel()
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
|
|
||||||
close(ccError)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
b.mu.Lock()
|
|
||||||
b.seq++ // tick when getting a new balancer address
|
|
||||||
seq := b.seq
|
|
||||||
b.next = 0
|
|
||||||
b.mu.Unlock()
|
|
||||||
go func(cc *ClientConn, ccError chan struct{}) {
|
|
||||||
lbc := &loadBalancerClient{cc}
|
|
||||||
b.callRemoteBalancer(lbc, seq)
|
|
||||||
cc.Close()
|
|
||||||
select {
|
|
||||||
case <-ccError:
|
|
||||||
default:
|
|
||||||
close(ccError)
|
|
||||||
}
|
|
||||||
}(cc, ccError)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *grpclbBalancer) down(addr Address, err error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
for _, a := range b.addrs {
|
|
||||||
if addr == a.addr {
|
|
||||||
a.connected = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *grpclbBalancer) Up(addr Address) func(error) {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
if b.done {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var cnt int
|
|
||||||
for _, a := range b.addrs {
|
|
||||||
if a.addr == addr {
|
|
||||||
if a.connected {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
a.connected = true
|
|
||||||
}
|
|
||||||
if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing {
|
|
||||||
cnt++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// addr is the only one which is connected. Notify the Get() callers who are blocking.
|
|
||||||
if cnt == 1 && b.waitCh != nil {
|
|
||||||
close(b.waitCh)
|
|
||||||
b.waitCh = nil
|
|
||||||
}
|
|
||||||
return func(err error) {
|
|
||||||
b.down(addr, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
|
|
||||||
var ch chan struct{}
|
|
||||||
b.mu.Lock()
|
|
||||||
if b.done {
|
|
||||||
b.mu.Unlock()
|
|
||||||
err = ErrClientConnClosing
|
|
||||||
return
|
|
||||||
}
|
|
||||||
seq := b.seq
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
put = func() {
|
|
||||||
s, ok := rpcInfoFromContext(ctx)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
if b.done || seq < b.seq {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b.clientStats.NumCallsFinished++
|
|
||||||
if !s.bytesSent {
|
|
||||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
|
||||||
} else if s.bytesReceived {
|
|
||||||
b.clientStats.NumCallsFinishedKnownReceived++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
b.clientStats.NumCallsStarted++
|
|
||||||
if len(b.addrs) > 0 {
|
|
||||||
if b.next >= len(b.addrs) {
|
|
||||||
b.next = 0
|
|
||||||
}
|
|
||||||
next := b.next
|
|
||||||
for {
|
|
||||||
a := b.addrs[next]
|
|
||||||
next = (next + 1) % len(b.addrs)
|
|
||||||
if a.connected {
|
|
||||||
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
|
|
||||||
addr = a.addr
|
|
||||||
b.next = next
|
|
||||||
b.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !opts.BlockingWait {
|
|
||||||
b.next = next
|
|
||||||
if a.dropForLoadBalancing {
|
|
||||||
b.clientStats.NumCallsFinished++
|
|
||||||
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
|
|
||||||
} else if a.dropForRateLimiting {
|
|
||||||
b.clientStats.NumCallsFinished++
|
|
||||||
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
|
|
||||||
}
|
|
||||||
b.mu.Unlock()
|
|
||||||
err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if next == b.next {
|
|
||||||
// Has iterated all the possible address but none is connected.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !opts.BlockingWait {
|
|
||||||
b.clientStats.NumCallsFinished++
|
|
||||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
|
||||||
b.mu.Unlock()
|
|
||||||
err = Errorf(codes.Unavailable, "there is no address available")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Wait on b.waitCh for non-failfast RPCs.
|
|
||||||
if b.waitCh == nil {
|
|
||||||
ch = make(chan struct{})
|
|
||||||
b.waitCh = ch
|
|
||||||
} else {
|
|
||||||
ch = b.waitCh
|
|
||||||
}
|
|
||||||
b.mu.Unlock()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
b.mu.Lock()
|
|
||||||
b.clientStats.NumCallsFinished++
|
|
||||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
|
||||||
b.mu.Unlock()
|
|
||||||
err = ctx.Err()
|
|
||||||
return
|
|
||||||
case <-ch:
|
|
||||||
b.mu.Lock()
|
|
||||||
if b.done {
|
|
||||||
b.clientStats.NumCallsFinished++
|
|
||||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
|
||||||
b.mu.Unlock()
|
|
||||||
err = ErrClientConnClosing
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(b.addrs) > 0 {
|
|
||||||
if b.next >= len(b.addrs) {
|
|
||||||
b.next = 0
|
|
||||||
}
|
|
||||||
next := b.next
|
|
||||||
for {
|
|
||||||
a := b.addrs[next]
|
|
||||||
next = (next + 1) % len(b.addrs)
|
|
||||||
if a.connected {
|
|
||||||
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
|
|
||||||
addr = a.addr
|
|
||||||
b.next = next
|
|
||||||
b.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !opts.BlockingWait {
|
|
||||||
b.next = next
|
|
||||||
if a.dropForLoadBalancing {
|
|
||||||
b.clientStats.NumCallsFinished++
|
|
||||||
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
|
|
||||||
} else if a.dropForRateLimiting {
|
|
||||||
b.clientStats.NumCallsFinished++
|
|
||||||
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
|
|
||||||
}
|
|
||||||
b.mu.Unlock()
|
|
||||||
err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if next == b.next {
|
|
||||||
// Has iterated all the possible address but none is connected.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// The newly added addr got removed by Down() again.
|
|
||||||
if b.waitCh == nil {
|
|
||||||
ch = make(chan struct{})
|
|
||||||
b.waitCh = ch
|
|
||||||
} else {
|
|
||||||
ch = b.waitCh
|
|
||||||
}
|
|
||||||
b.mu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *grpclbBalancer) Notify() <-chan []Address {
|
|
||||||
return b.addrCh
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *grpclbBalancer) Close() error {
|
|
||||||
b.mu.Lock()
|
|
||||||
defer b.mu.Unlock()
|
|
||||||
if b.done {
|
|
||||||
return errBalancerClosed
|
|
||||||
}
|
|
||||||
b.done = true
|
|
||||||
if b.waitCh != nil {
|
|
||||||
close(b.waitCh)
|
|
||||||
}
|
|
||||||
if b.addrCh != nil {
|
|
||||||
close(b.addrCh)
|
|
||||||
}
|
|
||||||
if b.w != nil {
|
|
||||||
b.w.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|||||||
159
vendor/google.golang.org/grpc/grpclb_picker.go
generated
vendored
Normal file
159
vendor/google.golang.org/grpc/grpclb_picker.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2017 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
type rpcStats struct {
|
||||||
|
NumCallsStarted int64
|
||||||
|
NumCallsFinished int64
|
||||||
|
NumCallsFinishedWithDropForRateLimiting int64
|
||||||
|
NumCallsFinishedWithDropForLoadBalancing int64
|
||||||
|
NumCallsFinishedWithClientFailedToSend int64
|
||||||
|
NumCallsFinishedKnownReceived int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats.
|
||||||
|
func (s *rpcStats) toClientStats() *lbpb.ClientStats {
|
||||||
|
stats := &lbpb.ClientStats{
|
||||||
|
NumCallsStarted: atomic.SwapInt64(&s.NumCallsStarted, 0),
|
||||||
|
NumCallsFinished: atomic.SwapInt64(&s.NumCallsFinished, 0),
|
||||||
|
NumCallsFinishedWithDropForRateLimiting: atomic.SwapInt64(&s.NumCallsFinishedWithDropForRateLimiting, 0),
|
||||||
|
NumCallsFinishedWithDropForLoadBalancing: atomic.SwapInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 0),
|
||||||
|
NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.NumCallsFinishedWithClientFailedToSend, 0),
|
||||||
|
NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.NumCallsFinishedKnownReceived, 0),
|
||||||
|
}
|
||||||
|
return stats
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *rpcStats) dropForRateLimiting() {
|
||||||
|
atomic.AddInt64(&s.NumCallsStarted, 1)
|
||||||
|
atomic.AddInt64(&s.NumCallsFinishedWithDropForRateLimiting, 1)
|
||||||
|
atomic.AddInt64(&s.NumCallsFinished, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *rpcStats) dropForLoadBalancing() {
|
||||||
|
atomic.AddInt64(&s.NumCallsStarted, 1)
|
||||||
|
atomic.AddInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 1)
|
||||||
|
atomic.AddInt64(&s.NumCallsFinished, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *rpcStats) failedToSend() {
|
||||||
|
atomic.AddInt64(&s.NumCallsStarted, 1)
|
||||||
|
atomic.AddInt64(&s.NumCallsFinishedWithClientFailedToSend, 1)
|
||||||
|
atomic.AddInt64(&s.NumCallsFinished, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *rpcStats) knownReceived() {
|
||||||
|
atomic.AddInt64(&s.NumCallsStarted, 1)
|
||||||
|
atomic.AddInt64(&s.NumCallsFinishedKnownReceived, 1)
|
||||||
|
atomic.AddInt64(&s.NumCallsFinished, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
type errPicker struct {
|
||||||
|
// Pick always returns this err.
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||||
|
return nil, nil, p.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// rrPicker does roundrobin on subConns. It's typically used when there's no
|
||||||
|
// response from remote balancer, and grpclb falls back to the resolved
|
||||||
|
// backends.
|
||||||
|
//
|
||||||
|
// It guaranteed that len(subConns) > 0.
|
||||||
|
type rrPicker struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
subConns []balancer.SubConn // The subConns that were READY when taking the snapshot.
|
||||||
|
subConnsNext int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
sc := p.subConns[p.subConnsNext]
|
||||||
|
p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
|
||||||
|
return sc, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// lbPicker does two layers of picks:
|
||||||
|
//
|
||||||
|
// First layer: roundrobin on all servers in serverList, including drops and backends.
|
||||||
|
// - If it picks a drop, the RPC will fail as being dropped.
|
||||||
|
// - If it picks a backend, do a second layer pick to pick the real backend.
|
||||||
|
//
|
||||||
|
// Second layer: roundrobin on all READY backends.
|
||||||
|
//
|
||||||
|
// It's guaranteed that len(serverList) > 0.
|
||||||
|
type lbPicker struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
serverList []*lbpb.Server
|
||||||
|
serverListNext int
|
||||||
|
subConns []balancer.SubConn // The subConns that were READY when taking the snapshot.
|
||||||
|
subConnsNext int
|
||||||
|
|
||||||
|
stats *rpcStats
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *lbPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
|
||||||
|
// Layer one roundrobin on serverList.
|
||||||
|
s := p.serverList[p.serverListNext]
|
||||||
|
p.serverListNext = (p.serverListNext + 1) % len(p.serverList)
|
||||||
|
|
||||||
|
// If it's a drop, return an error and fail the RPC.
|
||||||
|
if s.DropForRateLimiting {
|
||||||
|
p.stats.dropForRateLimiting()
|
||||||
|
return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
|
||||||
|
}
|
||||||
|
if s.DropForLoadBalancing {
|
||||||
|
p.stats.dropForLoadBalancing()
|
||||||
|
return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If not a drop but there's no ready subConns.
|
||||||
|
if len(p.subConns) <= 0 {
|
||||||
|
return nil, nil, balancer.ErrNoSubConnAvailable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the next ready subConn in the list, also collect rpc stats.
|
||||||
|
sc := p.subConns[p.subConnsNext]
|
||||||
|
p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
|
||||||
|
done := func(info balancer.DoneInfo) {
|
||||||
|
if !info.BytesSent {
|
||||||
|
p.stats.failedToSend()
|
||||||
|
} else if info.BytesReceived {
|
||||||
|
p.stats.knownReceived()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sc, done, nil
|
||||||
|
}
|
||||||
254
vendor/google.golang.org/grpc/grpclb_remote_balancer.go
generated
vendored
Normal file
254
vendor/google.golang.org/grpc/grpclb_remote_balancer.go
generated
vendored
Normal file
@@ -0,0 +1,254 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2017 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/connectivity"
|
||||||
|
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// processServerList updates balaner's internal state, create/remove SubConns
|
||||||
|
// and regenerates picker using the received serverList.
|
||||||
|
func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
|
||||||
|
grpclog.Infof("lbBalancer: processing server list: %+v", l)
|
||||||
|
lb.mu.Lock()
|
||||||
|
defer lb.mu.Unlock()
|
||||||
|
|
||||||
|
// Set serverListReceived to true so fallback will not take effect if it has
|
||||||
|
// not hit timeout.
|
||||||
|
lb.serverListReceived = true
|
||||||
|
|
||||||
|
// If the new server list == old server list, do nothing.
|
||||||
|
if reflect.DeepEqual(lb.fullServerList, l.Servers) {
|
||||||
|
grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lb.fullServerList = l.Servers
|
||||||
|
|
||||||
|
var backendAddrs []resolver.Address
|
||||||
|
for _, s := range l.Servers {
|
||||||
|
if s.DropForLoadBalancing || s.DropForRateLimiting {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
md := metadata.Pairs(lbTokeyKey, s.LoadBalanceToken)
|
||||||
|
ip := net.IP(s.IpAddress)
|
||||||
|
ipStr := ip.String()
|
||||||
|
if ip.To4() == nil {
|
||||||
|
// Add square brackets to ipv6 addresses, otherwise net.Dial() and
|
||||||
|
// net.SplitHostPort() will return too many colons error.
|
||||||
|
ipStr = fmt.Sprintf("[%s]", ipStr)
|
||||||
|
}
|
||||||
|
addr := resolver.Address{
|
||||||
|
Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
|
||||||
|
Metadata: &md,
|
||||||
|
}
|
||||||
|
|
||||||
|
backendAddrs = append(backendAddrs, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call refreshSubConns to create/remove SubConns.
|
||||||
|
backendsUpdated := lb.refreshSubConns(backendAddrs)
|
||||||
|
// If no backend was updated, no SubConn will be newed/removed. But since
|
||||||
|
// the full serverList was different, there might be updates in drops or
|
||||||
|
// pick weights(different number of duplicates). We need to update picker
|
||||||
|
// with the fulllist.
|
||||||
|
if !backendsUpdated {
|
||||||
|
lb.regeneratePicker()
|
||||||
|
lb.cc.UpdateBalancerState(lb.state, lb.picker)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool
|
||||||
|
// indicating whether the backendAddrs are different from the cached
|
||||||
|
// backendAddrs (whether any SubConn was newed/removed).
|
||||||
|
// Caller must hold lb.mu.
|
||||||
|
func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address) bool {
|
||||||
|
lb.backendAddrs = nil
|
||||||
|
var backendsUpdated bool
|
||||||
|
// addrsSet is the set converted from backendAddrs, it's used to quick
|
||||||
|
// lookup for an address.
|
||||||
|
addrsSet := make(map[resolver.Address]struct{})
|
||||||
|
// Create new SubConns.
|
||||||
|
for _, addr := range backendAddrs {
|
||||||
|
addrWithoutMD := addr
|
||||||
|
addrWithoutMD.Metadata = nil
|
||||||
|
addrsSet[addrWithoutMD] = struct{}{}
|
||||||
|
lb.backendAddrs = append(lb.backendAddrs, addrWithoutMD)
|
||||||
|
|
||||||
|
if _, ok := lb.subConns[addrWithoutMD]; !ok {
|
||||||
|
backendsUpdated = true
|
||||||
|
|
||||||
|
// Use addrWithMD to create the SubConn.
|
||||||
|
sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Warningf("roundrobinBalancer: failed to create new SubConn: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map.
|
||||||
|
lb.scStates[sc] = connectivity.Idle
|
||||||
|
sc.Connect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for a, sc := range lb.subConns {
|
||||||
|
// a was removed by resolver.
|
||||||
|
if _, ok := addrsSet[a]; !ok {
|
||||||
|
backendsUpdated = true
|
||||||
|
|
||||||
|
lb.cc.RemoveSubConn(sc)
|
||||||
|
delete(lb.subConns, a)
|
||||||
|
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
||||||
|
// The entry will be deleted in HandleSubConnStateChange.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return backendsUpdated
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lb *lbBalancer) readServerList(s *balanceLoadClientStream) error {
|
||||||
|
for {
|
||||||
|
reply, err := s.Recv()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("grpclb: failed to recv server list: %v", err)
|
||||||
|
}
|
||||||
|
if serverList := reply.GetServerList(); serverList != nil {
|
||||||
|
lb.processServerList(serverList)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) {
|
||||||
|
ticker := time.NewTicker(interval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
case <-s.Context().Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
stats := lb.clientStats.toClientStats()
|
||||||
|
t := time.Now()
|
||||||
|
stats.Timestamp = &lbpb.Timestamp{
|
||||||
|
Seconds: t.Unix(),
|
||||||
|
Nanos: int32(t.Nanosecond()),
|
||||||
|
}
|
||||||
|
if err := s.Send(&lbpb.LoadBalanceRequest{
|
||||||
|
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
|
||||||
|
ClientStats: stats,
|
||||||
|
},
|
||||||
|
}); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (lb *lbBalancer) callRemoteBalancer() error {
|
||||||
|
lbClient := &loadBalancerClient{cc: lb.ccRemoteLB}
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
stream, err := lbClient.BalanceLoad(ctx, FailFast(false))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// grpclb handshake on the stream.
|
||||||
|
initReq := &lbpb.LoadBalanceRequest{
|
||||||
|
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
|
||||||
|
InitialRequest: &lbpb.InitialLoadBalanceRequest{
|
||||||
|
Name: lb.target,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := stream.Send(initReq); err != nil {
|
||||||
|
return fmt.Errorf("grpclb: failed to send init request: %v", err)
|
||||||
|
}
|
||||||
|
reply, err := stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("grpclb: failed to recv init response: %v", err)
|
||||||
|
}
|
||||||
|
initResp := reply.GetInitialResponse()
|
||||||
|
if initResp == nil {
|
||||||
|
return fmt.Errorf("grpclb: reply from remote balancer did not include initial response")
|
||||||
|
}
|
||||||
|
if initResp.LoadBalancerDelegate != "" {
|
||||||
|
return fmt.Errorf("grpclb: Delegation is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
|
||||||
|
lb.sendLoadReport(stream, d)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return lb.readServerList(stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lb *lbBalancer) watchRemoteBalancer() {
|
||||||
|
for {
|
||||||
|
err := lb.callRemoteBalancer()
|
||||||
|
select {
|
||||||
|
case <-lb.doneCh:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (lb *lbBalancer) dialRemoteLB(remoteLBName string) {
|
||||||
|
var dopts []DialOption
|
||||||
|
if creds := lb.opt.DialCreds; creds != nil {
|
||||||
|
if err := creds.OverrideServerName(remoteLBName); err == nil {
|
||||||
|
dopts = append(dopts, WithTransportCredentials(creds))
|
||||||
|
} else {
|
||||||
|
grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v, using Insecure", err)
|
||||||
|
dopts = append(dopts, WithInsecure())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dopts = append(dopts, WithInsecure())
|
||||||
|
}
|
||||||
|
if lb.opt.Dialer != nil {
|
||||||
|
// WithDialer takes a different type of function, so we instead use a
|
||||||
|
// special DialOption here.
|
||||||
|
dopts = append(dopts, withContextDialer(lb.opt.Dialer))
|
||||||
|
}
|
||||||
|
// Explicitly set pickfirst as the balancer.
|
||||||
|
dopts = append(dopts, WithBalancerName(PickFirstBalancerName))
|
||||||
|
dopts = append(dopts, withResolverBuilder(lb.manualResolver))
|
||||||
|
// Dial using manualResolver.Scheme, which is a random scheme generated
|
||||||
|
// when init grpclb. The target name is not important.
|
||||||
|
cc, err := Dial("grpclb:///grpclb.server", dopts...)
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Fatalf("failed to dial: %v", err)
|
||||||
|
}
|
||||||
|
lb.ccRemoteLB = cc
|
||||||
|
go lb.watchRemoteBalancer()
|
||||||
|
}
|
||||||
90
vendor/google.golang.org/grpc/grpclb_util.go
generated
vendored
Normal file
90
vendor/google.golang.org/grpc/grpclb_util.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2016 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
package grpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The parent ClientConn should re-resolve when grpclb loses connection to the
|
||||||
|
// remote balancer. When the ClientConn inside grpclb gets a TransientFailure,
|
||||||
|
// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's
|
||||||
|
// ResolveNow, and eventually results in re-resolve happening in parent
|
||||||
|
// ClientConn's resolver (DNS for example).
|
||||||
|
//
|
||||||
|
// parent
|
||||||
|
// ClientConn
|
||||||
|
// +-----------------------------------------------------------------+
|
||||||
|
// | parent +---------------------------------+ |
|
||||||
|
// | DNS ClientConn | grpclb | |
|
||||||
|
// | resolver balancerWrapper | | |
|
||||||
|
// | + + | grpclb grpclb | |
|
||||||
|
// | | | | ManualResolver ClientConn | |
|
||||||
|
// | | | | + + | |
|
||||||
|
// | | | | | | Transient | |
|
||||||
|
// | | | | | | Failure | |
|
||||||
|
// | | | | | <--------- | | |
|
||||||
|
// | | | <--------------- | ResolveNow | | |
|
||||||
|
// | | <--------- | ResolveNow | | | | |
|
||||||
|
// | | ResolveNow | | | | | |
|
||||||
|
// | | | | | | | |
|
||||||
|
// | + + | + + | |
|
||||||
|
// | +---------------------------------+ |
|
||||||
|
// +-----------------------------------------------------------------+
|
||||||
|
|
||||||
|
// lbManualResolver is used by the ClientConn inside grpclb. It's a manual
|
||||||
|
// resolver with a special ResolveNow() function.
|
||||||
|
//
|
||||||
|
// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn,
|
||||||
|
// so when grpclb client lose contact with remote balancers, the parent
|
||||||
|
// ClientConn's resolver will re-resolve.
|
||||||
|
type lbManualResolver struct {
|
||||||
|
scheme string
|
||||||
|
ccr resolver.ClientConn
|
||||||
|
|
||||||
|
ccb balancer.ClientConn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) {
|
||||||
|
r.ccr = cc
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *lbManualResolver) Scheme() string {
|
||||||
|
return r.scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolveNow calls resolveNow on the parent ClientConn.
|
||||||
|
func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOption) {
|
||||||
|
r.ccb.ResolveNow(o)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close is a noop for Resolver.
|
||||||
|
func (*lbManualResolver) Close() {}
|
||||||
|
|
||||||
|
// NewAddress calls cc.NewAddress.
|
||||||
|
func (r *lbManualResolver) NewAddress(addrs []resolver.Address) {
|
||||||
|
r.ccr.NewAddress(addrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServiceConfig calls cc.NewServiceConfig.
|
||||||
|
func (r *lbManualResolver) NewServiceConfig(sc string) {
|
||||||
|
r.ccr.NewServiceConfig(sc)
|
||||||
|
}
|
||||||
4
vendor/google.golang.org/grpc/interceptor.go
generated
vendored
4
vendor/google.golang.org/grpc/interceptor.go
generated
vendored
@@ -48,7 +48,9 @@ type UnaryServerInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
|
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
|
||||||
// execution of a unary RPC.
|
// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
|
||||||
|
// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
|
||||||
|
// the status message of the RPC.
|
||||||
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
||||||
|
|
||||||
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
||||||
|
|||||||
61
vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
61
vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
@@ -17,7 +17,8 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// Package metadata define the structure of the metadata supported by gRPC library.
|
// Package metadata define the structure of the metadata supported by gRPC library.
|
||||||
// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata.
|
// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
|
||||||
|
// for more information about custom-metadata.
|
||||||
package metadata // import "google.golang.org/grpc/metadata"
|
package metadata // import "google.golang.org/grpc/metadata"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -115,9 +116,26 @@ func NewIncomingContext(ctx context.Context, md MD) context.Context {
|
|||||||
return context.WithValue(ctx, mdIncomingKey{}, md)
|
return context.WithValue(ctx, mdIncomingKey{}, md)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewOutgoingContext creates a new context with outgoing md attached.
|
// NewOutgoingContext creates a new context with outgoing md attached. If used
|
||||||
|
// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
|
||||||
|
// overwrite any previously-appended metadata.
|
||||||
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
|
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
|
||||||
return context.WithValue(ctx, mdOutgoingKey{}, md)
|
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendToOutgoingContext returns a new context with the provided kv merged
|
||||||
|
// with any existing metadata in the context. Please refer to the
|
||||||
|
// documentation of Pairs for a description of kv.
|
||||||
|
func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
|
||||||
|
if len(kv)%2 == 1 {
|
||||||
|
panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
|
||||||
|
}
|
||||||
|
md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||||
|
added := make([][]string, len(md.added)+1)
|
||||||
|
copy(added, md.added)
|
||||||
|
added[len(added)-1] = make([]string, len(kv))
|
||||||
|
copy(added[len(added)-1], kv)
|
||||||
|
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
|
||||||
}
|
}
|
||||||
|
|
||||||
// FromIncomingContext returns the incoming metadata in ctx if it exists. The
|
// FromIncomingContext returns the incoming metadata in ctx if it exists. The
|
||||||
@@ -128,10 +146,39 @@ func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FromOutgoingContextRaw returns the un-merged, intermediary contents
|
||||||
|
// of rawMD. Remember to perform strings.ToLower on the keys. The returned
|
||||||
|
// MD should not be modified. Writing to it may cause races. Modification
|
||||||
|
// should be made to copies of the returned MD.
|
||||||
|
//
|
||||||
|
// This is intended for gRPC-internal use ONLY.
|
||||||
|
func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
|
||||||
|
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
return raw.md, raw.added, true
|
||||||
|
}
|
||||||
|
|
||||||
// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
|
// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
|
||||||
// returned MD should not be modified. Writing to it may cause races.
|
// returned MD should not be modified. Writing to it may cause races.
|
||||||
// Modification should be made to the copies of the returned MD.
|
// Modification should be made to copies of the returned MD.
|
||||||
func FromOutgoingContext(ctx context.Context) (md MD, ok bool) {
|
func FromOutgoingContext(ctx context.Context) (MD, bool) {
|
||||||
md, ok = ctx.Value(mdOutgoingKey{}).(MD)
|
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
|
||||||
return
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
mds := make([]MD, 0, len(raw.added)+1)
|
||||||
|
mds = append(mds, raw.md)
|
||||||
|
for _, vv := range raw.added {
|
||||||
|
mds = append(mds, Pairs(vv...))
|
||||||
|
}
|
||||||
|
return Join(mds...), ok
|
||||||
|
}
|
||||||
|
|
||||||
|
type rawMD struct {
|
||||||
|
md MD
|
||||||
|
added [][]string
|
||||||
}
|
}
|
||||||
|
|||||||
2
vendor/google.golang.org/grpc/naming/go17.go
generated
vendored
2
vendor/google.golang.org/grpc/naming/go17.go
generated
vendored
@@ -1,4 +1,4 @@
|
|||||||
// +build go1.6, !go1.8
|
// +build go1.6,!go1.8
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
|
|||||||
23
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
23
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
@@ -36,6 +36,10 @@ type pickerWrapper struct {
|
|||||||
done bool
|
done bool
|
||||||
blockingCh chan struct{}
|
blockingCh chan struct{}
|
||||||
picker balancer.Picker
|
picker balancer.Picker
|
||||||
|
|
||||||
|
// The latest connection happened.
|
||||||
|
connErrMu sync.Mutex
|
||||||
|
connErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPickerWrapper() *pickerWrapper {
|
func newPickerWrapper() *pickerWrapper {
|
||||||
@@ -43,6 +47,19 @@ func newPickerWrapper() *pickerWrapper {
|
|||||||
return bp
|
return bp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (bp *pickerWrapper) updateConnectionError(err error) {
|
||||||
|
bp.connErrMu.Lock()
|
||||||
|
bp.connErr = err
|
||||||
|
bp.connErrMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bp *pickerWrapper) connectionError() error {
|
||||||
|
bp.connErrMu.Lock()
|
||||||
|
err := bp.connErr
|
||||||
|
bp.connErrMu.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||||
func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||||
bp.mu.Lock()
|
bp.mu.Lock()
|
||||||
@@ -97,7 +114,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
|
|||||||
p = bp.picker
|
p = bp.picker
|
||||||
bp.mu.Unlock()
|
bp.mu.Unlock()
|
||||||
|
|
||||||
subConn, put, err := p.Pick(ctx, opts)
|
subConn, done, err := p.Pick(ctx, opts)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch err {
|
switch err {
|
||||||
@@ -107,7 +124,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
|
|||||||
if !failfast {
|
if !failfast {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, nil, status.Errorf(codes.Unavailable, "%v", err)
|
return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
|
||||||
default:
|
default:
|
||||||
// err is some other error.
|
// err is some other error.
|
||||||
return nil, nil, toRPCErr(err)
|
return nil, nil, toRPCErr(err)
|
||||||
@@ -120,7 +137,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
|
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
|
||||||
return t, put, nil
|
return t, done, nil
|
||||||
}
|
}
|
||||||
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
|
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
|
||||||
// If ok == false, ac.state is not READY.
|
// If ok == false, ac.state is not READY.
|
||||||
|
|||||||
5
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
5
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
@@ -26,6 +26,9 @@ import (
|
|||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// PickFirstBalancerName is the name of the pick_first balancer.
|
||||||
|
const PickFirstBalancerName = "pick_first"
|
||||||
|
|
||||||
func newPickfirstBuilder() balancer.Builder {
|
func newPickfirstBuilder() balancer.Builder {
|
||||||
return &pickfirstBuilder{}
|
return &pickfirstBuilder{}
|
||||||
}
|
}
|
||||||
@@ -37,7 +40,7 @@ func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (*pickfirstBuilder) Name() string {
|
func (*pickfirstBuilder) Name() string {
|
||||||
return "pick_first"
|
return PickFirstBalancerName
|
||||||
}
|
}
|
||||||
|
|
||||||
type pickfirstBalancer struct {
|
type pickfirstBalancer struct {
|
||||||
|
|||||||
35
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
35
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
@@ -36,30 +36,26 @@ func Register(b Builder) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get returns the resolver builder registered with the given scheme.
|
// Get returns the resolver builder registered with the given scheme.
|
||||||
// If no builder is register with the scheme, the default scheme will
|
//
|
||||||
// be used.
|
// If no builder is register with the scheme, nil will be returned.
|
||||||
// If the default scheme is not modified, "dns" will be the default
|
|
||||||
// scheme, and the preinstalled dns resolver will be used.
|
|
||||||
// If the default scheme is modified, and a resolver is registered with
|
|
||||||
// the scheme, that resolver will be returned.
|
|
||||||
// If the default scheme is modified, and no resolver is registered with
|
|
||||||
// the scheme, nil will be returned.
|
|
||||||
func Get(scheme string) Builder {
|
func Get(scheme string) Builder {
|
||||||
if b, ok := m[scheme]; ok {
|
if b, ok := m[scheme]; ok {
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
if b, ok := m[defaultScheme]; ok {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDefaultScheme sets the default scheme that will be used.
|
// SetDefaultScheme sets the default scheme that will be used.
|
||||||
// The default default scheme is "dns".
|
// The default default scheme is "passthrough".
|
||||||
func SetDefaultScheme(scheme string) {
|
func SetDefaultScheme(scheme string) {
|
||||||
defaultScheme = scheme
|
defaultScheme = scheme
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetDefaultScheme gets the default scheme that will be used.
|
||||||
|
func GetDefaultScheme() string {
|
||||||
|
return defaultScheme
|
||||||
|
}
|
||||||
|
|
||||||
// AddressType indicates the address type returned by name resolution.
|
// AddressType indicates the address type returned by name resolution.
|
||||||
type AddressType uint8
|
type AddressType uint8
|
||||||
|
|
||||||
@@ -78,7 +74,9 @@ type Address struct {
|
|||||||
// Type is the type of this address.
|
// Type is the type of this address.
|
||||||
Type AddressType
|
Type AddressType
|
||||||
// ServerName is the name of this address.
|
// ServerName is the name of this address.
|
||||||
// It's the name of the grpc load balancer, which will be used for authentication.
|
//
|
||||||
|
// e.g. if Type is GRPCLB, ServerName should be the name of the remote load
|
||||||
|
// balancer, not the name of the backend.
|
||||||
ServerName string
|
ServerName string
|
||||||
// Metadata is the information associated with Addr, which may be used
|
// Metadata is the information associated with Addr, which may be used
|
||||||
// to make load balancing decision.
|
// to make load balancing decision.
|
||||||
@@ -92,6 +90,11 @@ type BuildOption struct {
|
|||||||
|
|
||||||
// ClientConn contains the callbacks for resolver to notify any updates
|
// ClientConn contains the callbacks for resolver to notify any updates
|
||||||
// to the gRPC ClientConn.
|
// to the gRPC ClientConn.
|
||||||
|
//
|
||||||
|
// This interface is to be implemented by gRPC. Users should not need a
|
||||||
|
// brand new implementation of this interface. For the situations like
|
||||||
|
// testing, the new implementation should embed this interface. This allows
|
||||||
|
// gRPC to add new methods to this interface.
|
||||||
type ClientConn interface {
|
type ClientConn interface {
|
||||||
// NewAddress is called by resolver to notify ClientConn a new list
|
// NewAddress is called by resolver to notify ClientConn a new list
|
||||||
// of resolved addresses.
|
// of resolved addresses.
|
||||||
@@ -128,8 +131,10 @@ type ResolveNowOption struct{}
|
|||||||
// Resolver watches for the updates on the specified target.
|
// Resolver watches for the updates on the specified target.
|
||||||
// Updates include address updates and service config updates.
|
// Updates include address updates and service config updates.
|
||||||
type Resolver interface {
|
type Resolver interface {
|
||||||
// ResolveNow will be called by gRPC to try to resolve the target name again.
|
// ResolveNow will be called by gRPC to try to resolve the target name
|
||||||
// It's just a hint, resolver can ignore this if it's not necessary.
|
// again. It's just a hint, resolver can ignore this if it's not necessary.
|
||||||
|
//
|
||||||
|
// It could be called multiple times concurrently.
|
||||||
ResolveNow(ResolveNowOption)
|
ResolveNow(ResolveNowOption)
|
||||||
// Close closes the resolver.
|
// Close closes the resolver.
|
||||||
Close()
|
Close()
|
||||||
|
|||||||
24
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
24
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
@@ -48,23 +48,30 @@ func split2(s, sep string) (string, string, bool) {
|
|||||||
|
|
||||||
// parseTarget splits target into a struct containing scheme, authority and
|
// parseTarget splits target into a struct containing scheme, authority and
|
||||||
// endpoint.
|
// endpoint.
|
||||||
|
//
|
||||||
|
// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
|
||||||
|
// target}.
|
||||||
func parseTarget(target string) (ret resolver.Target) {
|
func parseTarget(target string) (ret resolver.Target) {
|
||||||
var ok bool
|
var ok bool
|
||||||
ret.Scheme, ret.Endpoint, ok = split2(target, "://")
|
ret.Scheme, ret.Endpoint, ok = split2(target, "://")
|
||||||
if !ok {
|
if !ok {
|
||||||
return resolver.Target{Endpoint: target}
|
return resolver.Target{Endpoint: target}
|
||||||
}
|
}
|
||||||
ret.Authority, ret.Endpoint, _ = split2(ret.Endpoint, "/")
|
ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
|
||||||
|
if !ok {
|
||||||
|
return resolver.Target{Endpoint: target}
|
||||||
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
// newCCResolverWrapper parses cc.target for scheme and gets the resolver
|
// newCCResolverWrapper parses cc.target for scheme and gets the resolver
|
||||||
// builder for this scheme. It then builds the resolver and starts the
|
// builder for this scheme. It then builds the resolver and starts the
|
||||||
// monitoring goroutine for it.
|
// monitoring goroutine for it.
|
||||||
|
//
|
||||||
|
// If withResolverBuilder dial option is set, the specified resolver will be
|
||||||
|
// used instead.
|
||||||
func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
|
func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
|
||||||
grpclog.Infof("dialing to target with scheme: %q", cc.parsedTarget.Scheme)
|
rb := cc.dopts.resolverBuilder
|
||||||
|
|
||||||
rb := resolver.Get(cc.parsedTarget.Scheme)
|
|
||||||
if rb == nil {
|
if rb == nil {
|
||||||
return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
|
return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
|
||||||
}
|
}
|
||||||
@@ -81,10 +88,13 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
go ccr.watcher()
|
|
||||||
return ccr, nil
|
return ccr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ccr *ccResolverWrapper) start() {
|
||||||
|
go ccr.watcher()
|
||||||
|
}
|
||||||
|
|
||||||
// watcher processes address updates and service config updates sequencially.
|
// watcher processes address updates and service config updates sequencially.
|
||||||
// Otherwise, we need to resolve possible races between address and service
|
// Otherwise, we need to resolve possible races between address and service
|
||||||
// config (e.g. they specify different balancer types).
|
// config (e.g. they specify different balancer types).
|
||||||
@@ -119,6 +129,10 @@ func (ccr *ccResolverWrapper) watcher() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
|
||||||
|
ccr.resolver.ResolveNow(o)
|
||||||
|
}
|
||||||
|
|
||||||
func (ccr *ccResolverWrapper) close() {
|
func (ccr *ccResolverWrapper) close() {
|
||||||
ccr.resolver.Close()
|
ccr.resolver.Close()
|
||||||
close(ccr.done)
|
close(ccr.done)
|
||||||
|
|||||||
354
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
354
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
@@ -22,9 +22,12 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math"
|
"math"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -32,6 +35,7 @@ import (
|
|||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/encoding"
|
"google.golang.org/grpc/encoding"
|
||||||
|
"google.golang.org/grpc/encoding/proto"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/peer"
|
"google.golang.org/grpc/peer"
|
||||||
"google.golang.org/grpc/stats"
|
"google.golang.org/grpc/stats"
|
||||||
@@ -53,13 +57,29 @@ type gzipCompressor struct {
|
|||||||
|
|
||||||
// NewGZIPCompressor creates a Compressor based on GZIP.
|
// NewGZIPCompressor creates a Compressor based on GZIP.
|
||||||
func NewGZIPCompressor() Compressor {
|
func NewGZIPCompressor() Compressor {
|
||||||
|
c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead
|
||||||
|
// of assuming DefaultCompression.
|
||||||
|
//
|
||||||
|
// The error returned will be nil if the level is valid.
|
||||||
|
func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
|
||||||
|
if level < gzip.DefaultCompression || level > gzip.BestCompression {
|
||||||
|
return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
|
||||||
|
}
|
||||||
return &gzipCompressor{
|
return &gzipCompressor{
|
||||||
pool: sync.Pool{
|
pool: sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
return gzip.NewWriter(ioutil.Discard)
|
w, err := gzip.NewWriterLevel(ioutil.Discard, level)
|
||||||
},
|
if err != nil {
|
||||||
},
|
panic(err)
|
||||||
}
|
}
|
||||||
|
return w
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
|
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
|
||||||
@@ -125,13 +145,13 @@ func (d *gzipDecompressor) Type() string {
|
|||||||
type callInfo struct {
|
type callInfo struct {
|
||||||
compressorType string
|
compressorType string
|
||||||
failFast bool
|
failFast bool
|
||||||
headerMD metadata.MD
|
stream *clientStream
|
||||||
trailerMD metadata.MD
|
|
||||||
peer *peer.Peer
|
|
||||||
traceInfo traceInfo // in trace.go
|
traceInfo traceInfo // in trace.go
|
||||||
maxReceiveMessageSize *int
|
maxReceiveMessageSize *int
|
||||||
maxSendMessageSize *int
|
maxSendMessageSize *int
|
||||||
creds credentials.PerRPCCredentials
|
creds credentials.PerRPCCredentials
|
||||||
|
contentSubtype string
|
||||||
|
codec baseCodec
|
||||||
}
|
}
|
||||||
|
|
||||||
func defaultCallInfo() *callInfo {
|
func defaultCallInfo() *callInfo {
|
||||||
@@ -158,40 +178,66 @@ type EmptyCallOption struct{}
|
|||||||
func (EmptyCallOption) before(*callInfo) error { return nil }
|
func (EmptyCallOption) before(*callInfo) error { return nil }
|
||||||
func (EmptyCallOption) after(*callInfo) {}
|
func (EmptyCallOption) after(*callInfo) {}
|
||||||
|
|
||||||
type beforeCall func(c *callInfo) error
|
|
||||||
|
|
||||||
func (o beforeCall) before(c *callInfo) error { return o(c) }
|
|
||||||
func (o beforeCall) after(c *callInfo) {}
|
|
||||||
|
|
||||||
type afterCall func(c *callInfo)
|
|
||||||
|
|
||||||
func (o afterCall) before(c *callInfo) error { return nil }
|
|
||||||
func (o afterCall) after(c *callInfo) { o(c) }
|
|
||||||
|
|
||||||
// Header returns a CallOptions that retrieves the header metadata
|
// Header returns a CallOptions that retrieves the header metadata
|
||||||
// for a unary RPC.
|
// for a unary RPC.
|
||||||
func Header(md *metadata.MD) CallOption {
|
func Header(md *metadata.MD) CallOption {
|
||||||
return afterCall(func(c *callInfo) {
|
return HeaderCallOption{HeaderAddr: md}
|
||||||
*md = c.headerMD
|
}
|
||||||
})
|
|
||||||
|
// HeaderCallOption is a CallOption for collecting response header metadata.
|
||||||
|
// The metadata field will be populated *after* the RPC completes.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type HeaderCallOption struct {
|
||||||
|
HeaderAddr *metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o HeaderCallOption) before(c *callInfo) error { return nil }
|
||||||
|
func (o HeaderCallOption) after(c *callInfo) {
|
||||||
|
if c.stream != nil {
|
||||||
|
*o.HeaderAddr, _ = c.stream.Header()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Trailer returns a CallOptions that retrieves the trailer metadata
|
// Trailer returns a CallOptions that retrieves the trailer metadata
|
||||||
// for a unary RPC.
|
// for a unary RPC.
|
||||||
func Trailer(md *metadata.MD) CallOption {
|
func Trailer(md *metadata.MD) CallOption {
|
||||||
return afterCall(func(c *callInfo) {
|
return TrailerCallOption{TrailerAddr: md}
|
||||||
*md = c.trailerMD
|
}
|
||||||
})
|
|
||||||
|
// TrailerCallOption is a CallOption for collecting response trailer metadata.
|
||||||
|
// The metadata field will be populated *after* the RPC completes.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type TrailerCallOption struct {
|
||||||
|
TrailerAddr *metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o TrailerCallOption) before(c *callInfo) error { return nil }
|
||||||
|
func (o TrailerCallOption) after(c *callInfo) {
|
||||||
|
if c.stream != nil {
|
||||||
|
*o.TrailerAddr = c.stream.Trailer()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Peer returns a CallOption that retrieves peer information for a
|
// Peer returns a CallOption that retrieves peer information for a
|
||||||
// unary RPC.
|
// unary RPC.
|
||||||
func Peer(peer *peer.Peer) CallOption {
|
func Peer(p *peer.Peer) CallOption {
|
||||||
return afterCall(func(c *callInfo) {
|
return PeerCallOption{PeerAddr: p}
|
||||||
if c.peer != nil {
|
}
|
||||||
*peer = *c.peer
|
|
||||||
|
// PeerCallOption is a CallOption for collecting the identity of the remote
|
||||||
|
// peer. The peer field will be populated *after* the RPC completes.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type PeerCallOption struct {
|
||||||
|
PeerAddr *peer.Peer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o PeerCallOption) before(c *callInfo) error { return nil }
|
||||||
|
func (o PeerCallOption) after(c *callInfo) {
|
||||||
|
if c.stream != nil {
|
||||||
|
if x, ok := peer.FromContext(c.stream.Context()); ok {
|
||||||
|
*o.PeerAddr = *x
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FailFast configures the action to take when an RPC is attempted on broken
|
// FailFast configures the action to take when an RPC is attempted on broken
|
||||||
@@ -205,49 +251,160 @@ func Peer(peer *peer.Peer) CallOption {
|
|||||||
//
|
//
|
||||||
// By default, RPCs are "Fail Fast".
|
// By default, RPCs are "Fail Fast".
|
||||||
func FailFast(failFast bool) CallOption {
|
func FailFast(failFast bool) CallOption {
|
||||||
return beforeCall(func(c *callInfo) error {
|
return FailFastCallOption{FailFast: failFast}
|
||||||
c.failFast = failFast
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
|
||||||
|
// fast or not.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type FailFastCallOption struct {
|
||||||
|
FailFast bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o FailFastCallOption) before(c *callInfo) error {
|
||||||
|
c.failFast = o.FailFast
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o FailFastCallOption) after(c *callInfo) { return }
|
||||||
|
|
||||||
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
|
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
|
||||||
func MaxCallRecvMsgSize(s int) CallOption {
|
func MaxCallRecvMsgSize(s int) CallOption {
|
||||||
return beforeCall(func(o *callInfo) error {
|
return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s}
|
||||||
o.maxReceiveMessageSize = &s
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||||
|
// size the client can receive.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type MaxRecvMsgSizeCallOption struct {
|
||||||
|
MaxRecvMsgSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
|
||||||
|
c.maxReceiveMessageSize = &o.MaxRecvMsgSize
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o MaxRecvMsgSizeCallOption) after(c *callInfo) { return }
|
||||||
|
|
||||||
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
|
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
|
||||||
func MaxCallSendMsgSize(s int) CallOption {
|
func MaxCallSendMsgSize(s int) CallOption {
|
||||||
return beforeCall(func(o *callInfo) error {
|
return MaxSendMsgSizeCallOption{MaxSendMsgSize: s}
|
||||||
o.maxSendMessageSize = &s
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
|
||||||
|
// size the client can send.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type MaxSendMsgSizeCallOption struct {
|
||||||
|
MaxSendMsgSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
|
||||||
|
c.maxSendMessageSize = &o.MaxSendMsgSize
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o MaxSendMsgSizeCallOption) after(c *callInfo) { return }
|
||||||
|
|
||||||
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
|
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
|
||||||
// for a call.
|
// for a call.
|
||||||
func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
|
func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
|
||||||
return beforeCall(func(c *callInfo) error {
|
return PerRPCCredsCallOption{Creds: creds}
|
||||||
c.creds = creds
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
|
||||||
|
// credentials to use for the call.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type PerRPCCredsCallOption struct {
|
||||||
|
Creds credentials.PerRPCCredentials
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o PerRPCCredsCallOption) before(c *callInfo) error {
|
||||||
|
c.creds = o.Creds
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o PerRPCCredsCallOption) after(c *callInfo) { return }
|
||||||
|
|
||||||
// UseCompressor returns a CallOption which sets the compressor used when
|
// UseCompressor returns a CallOption which sets the compressor used when
|
||||||
// sending the request. If WithCompressor is also set, UseCompressor has
|
// sending the request. If WithCompressor is also set, UseCompressor has
|
||||||
// higher priority.
|
// higher priority.
|
||||||
//
|
//
|
||||||
// This API is EXPERIMENTAL.
|
// This API is EXPERIMENTAL.
|
||||||
func UseCompressor(name string) CallOption {
|
func UseCompressor(name string) CallOption {
|
||||||
return beforeCall(func(c *callInfo) error {
|
return CompressorCallOption{CompressorType: name}
|
||||||
c.compressorType = name
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CompressorCallOption is a CallOption that indicates the compressor to use.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type CompressorCallOption struct {
|
||||||
|
CompressorType string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o CompressorCallOption) before(c *callInfo) error {
|
||||||
|
c.compressorType = o.CompressorType
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o CompressorCallOption) after(c *callInfo) { return }
|
||||||
|
|
||||||
|
// CallContentSubtype returns a CallOption that will set the content-subtype
|
||||||
|
// for a call. For example, if content-subtype is "json", the Content-Type over
|
||||||
|
// the wire will be "application/grpc+json". The content-subtype is converted
|
||||||
|
// to lowercase before being included in Content-Type. See Content-Type on
|
||||||
|
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||||
|
// more details.
|
||||||
|
//
|
||||||
|
// If CallCustomCodec is not also used, the content-subtype will be used to
|
||||||
|
// look up the Codec to use in the registry controlled by RegisterCodec. See
|
||||||
|
// the documention on RegisterCodec for details on registration. The lookup
|
||||||
|
// of content-subtype is case-insensitive. If no such Codec is found, the call
|
||||||
|
// will result in an error with code codes.Internal.
|
||||||
|
//
|
||||||
|
// If CallCustomCodec is also used, that Codec will be used for all request and
|
||||||
|
// response messages, with the content-subtype set to the given contentSubtype
|
||||||
|
// here for requests.
|
||||||
|
func CallContentSubtype(contentSubtype string) CallOption {
|
||||||
|
return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
|
||||||
|
// used for marshaling messages.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type ContentSubtypeCallOption struct {
|
||||||
|
ContentSubtype string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o ContentSubtypeCallOption) before(c *callInfo) error {
|
||||||
|
c.contentSubtype = o.ContentSubtype
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o ContentSubtypeCallOption) after(c *callInfo) { return }
|
||||||
|
|
||||||
|
// CallCustomCodec returns a CallOption that will set the given Codec to be
|
||||||
|
// used for all request and response messages for a call. The result of calling
|
||||||
|
// String() will be used as the content-subtype in a case-insensitive manner.
|
||||||
|
//
|
||||||
|
// See Content-Type on
|
||||||
|
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||||
|
// more details. Also see the documentation on RegisterCodec and
|
||||||
|
// CallContentSubtype for more details on the interaction between Codec and
|
||||||
|
// content-subtype.
|
||||||
|
//
|
||||||
|
// This function is provided for advanced users; prefer to use only
|
||||||
|
// CallContentSubtype to select a registered codec instead.
|
||||||
|
func CallCustomCodec(codec Codec) CallOption {
|
||||||
|
return CustomCodecCallOption{Codec: codec}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomCodecCallOption is a CallOption that indicates the codec used for
|
||||||
|
// marshaling messages.
|
||||||
|
// This is an EXPERIMENTAL API.
|
||||||
|
type CustomCodecCallOption struct {
|
||||||
|
Codec Codec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o CustomCodecCallOption) before(c *callInfo) error {
|
||||||
|
c.codec = o.Codec
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (o CustomCodecCallOption) after(c *callInfo) { return }
|
||||||
|
|
||||||
// The format of the payload: compressed or not?
|
// The format of the payload: compressed or not?
|
||||||
type payloadFormat uint8
|
type payloadFormat uint8
|
||||||
|
|
||||||
@@ -263,8 +420,8 @@ type parser struct {
|
|||||||
// error types.
|
// error types.
|
||||||
r io.Reader
|
r io.Reader
|
||||||
|
|
||||||
// The header of a gRPC message. Find more detail
|
// The header of a gRPC message. Find more detail at
|
||||||
// at https://grpc.io/docs/guides/wire.html.
|
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
|
||||||
header [5]byte
|
header [5]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -293,10 +450,10 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
|
|||||||
return pf, nil, nil
|
return pf, nil, nil
|
||||||
}
|
}
|
||||||
if int64(length) > int64(maxInt) {
|
if int64(length) > int64(maxInt) {
|
||||||
return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
|
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
|
||||||
}
|
}
|
||||||
if int(length) > maxReceiveMessageSize {
|
if int(length) > maxReceiveMessageSize {
|
||||||
return 0, nil, Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
|
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
|
||||||
}
|
}
|
||||||
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
|
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
|
||||||
// of making it for each message:
|
// of making it for each message:
|
||||||
@@ -313,7 +470,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
|
|||||||
// encode serializes msg and returns a buffer of message header and a buffer of msg.
|
// encode serializes msg and returns a buffer of message header and a buffer of msg.
|
||||||
// If msg is nil, it generates the message header and an empty msg buffer.
|
// If msg is nil, it generates the message header and an empty msg buffer.
|
||||||
// TODO(ddyihai): eliminate extra Compressor parameter.
|
// TODO(ddyihai): eliminate extra Compressor parameter.
|
||||||
func encode(c Codec, msg interface{}, cp Compressor, outPayload *stats.OutPayload, compressor encoding.Compressor) ([]byte, []byte, error) {
|
func encode(c baseCodec, msg interface{}, cp Compressor, outPayload *stats.OutPayload, compressor encoding.Compressor) ([]byte, []byte, error) {
|
||||||
var (
|
var (
|
||||||
b []byte
|
b []byte
|
||||||
cbuf *bytes.Buffer
|
cbuf *bytes.Buffer
|
||||||
@@ -326,7 +483,7 @@ func encode(c Codec, msg interface{}, cp Compressor, outPayload *stats.OutPayloa
|
|||||||
var err error
|
var err error
|
||||||
b, err = c.Marshal(msg)
|
b, err = c.Marshal(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
|
return nil, nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
|
||||||
}
|
}
|
||||||
if outPayload != nil {
|
if outPayload != nil {
|
||||||
outPayload.Payload = msg
|
outPayload.Payload = msg
|
||||||
@@ -340,20 +497,20 @@ func encode(c Codec, msg interface{}, cp Compressor, outPayload *stats.OutPayloa
|
|||||||
if compressor != nil {
|
if compressor != nil {
|
||||||
z, _ := compressor.Compress(cbuf)
|
z, _ := compressor.Compress(cbuf)
|
||||||
if _, err := z.Write(b); err != nil {
|
if _, err := z.Write(b); err != nil {
|
||||||
return nil, nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
|
return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
|
||||||
}
|
}
|
||||||
z.Close()
|
z.Close()
|
||||||
} else {
|
} else {
|
||||||
// If Compressor is not set by UseCompressor, use default Compressor
|
// If Compressor is not set by UseCompressor, use default Compressor
|
||||||
if err := cp.Do(cbuf, b); err != nil {
|
if err := cp.Do(cbuf, b); err != nil {
|
||||||
return nil, nil, Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
|
return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
b = cbuf.Bytes()
|
b = cbuf.Bytes()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if uint(len(b)) > math.MaxUint32 {
|
if uint(len(b)) > math.MaxUint32 {
|
||||||
return nil, nil, Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
|
return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
bufHeader := make([]byte, payloadLen+sizeLen)
|
bufHeader := make([]byte, payloadLen+sizeLen)
|
||||||
@@ -390,7 +547,7 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
|
|||||||
// For the two compressor parameters, both should not be set, but if they are,
|
// For the two compressor parameters, both should not be set, but if they are,
|
||||||
// dc takes precedence over compressor.
|
// dc takes precedence over compressor.
|
||||||
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
|
||||||
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
|
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
|
||||||
pf, d, err := p.recvMsg(maxReceiveMessageSize)
|
pf, d, err := p.recvMsg(maxReceiveMessageSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -409,26 +566,26 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
|
|||||||
if dc != nil {
|
if dc != nil {
|
||||||
d, err = dc.Do(bytes.NewReader(d))
|
d, err = dc.Do(bytes.NewReader(d))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
dcReader, err := compressor.Decompress(bytes.NewReader(d))
|
dcReader, err := compressor.Decompress(bytes.NewReader(d))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||||
}
|
}
|
||||||
d, err = ioutil.ReadAll(dcReader)
|
d, err = ioutil.ReadAll(dcReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(d) > maxReceiveMessageSize {
|
if len(d) > maxReceiveMessageSize {
|
||||||
// TODO: Revisit the error code. Currently keep it consistent with java
|
// TODO: Revisit the error code. Currently keep it consistent with java
|
||||||
// implementation.
|
// implementation.
|
||||||
return Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
|
return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
|
||||||
}
|
}
|
||||||
if err := c.Unmarshal(d, m); err != nil {
|
if err := c.Unmarshal(d, m); err != nil {
|
||||||
return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||||||
}
|
}
|
||||||
if inPayload != nil {
|
if inPayload != nil {
|
||||||
inPayload.RecvTime = time.Now()
|
inPayload.RecvTime = time.Now()
|
||||||
@@ -442,8 +599,6 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
|
|||||||
|
|
||||||
type rpcInfo struct {
|
type rpcInfo struct {
|
||||||
failfast bool
|
failfast bool
|
||||||
bytesSent bool
|
|
||||||
bytesReceived bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type rpcInfoContextKey struct{}
|
type rpcInfoContextKey struct{}
|
||||||
@@ -457,18 +612,10 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateRPCInfoInContext(ctx context.Context, s rpcInfo) {
|
|
||||||
if ss, ok := rpcInfoFromContext(ctx); ok {
|
|
||||||
ss.bytesReceived = s.bytesReceived
|
|
||||||
ss.bytesSent = s.bytesSent
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Code returns the error code for err if it was produced by the rpc system.
|
// Code returns the error code for err if it was produced by the rpc system.
|
||||||
// Otherwise, it returns codes.Unknown.
|
// Otherwise, it returns codes.Unknown.
|
||||||
//
|
//
|
||||||
// Deprecated; use status.FromError and Code method instead.
|
// Deprecated: use status.FromError and Code method instead.
|
||||||
func Code(err error) codes.Code {
|
func Code(err error) codes.Code {
|
||||||
if s, ok := status.FromError(err); ok {
|
if s, ok := status.FromError(err); ok {
|
||||||
return s.Code()
|
return s.Code()
|
||||||
@@ -479,7 +626,7 @@ func Code(err error) codes.Code {
|
|||||||
// ErrorDesc returns the error description of err if it was produced by the rpc system.
|
// ErrorDesc returns the error description of err if it was produced by the rpc system.
|
||||||
// Otherwise, it returns err.Error() or empty string when err is nil.
|
// Otherwise, it returns err.Error() or empty string when err is nil.
|
||||||
//
|
//
|
||||||
// Deprecated; use status.FromError and Message method instead.
|
// Deprecated: use status.FromError and Message method instead.
|
||||||
func ErrorDesc(err error) string {
|
func ErrorDesc(err error) string {
|
||||||
if s, ok := status.FromError(err); ok {
|
if s, ok := status.FromError(err); ok {
|
||||||
return s.Message()
|
return s.Message()
|
||||||
@@ -490,11 +637,66 @@ func ErrorDesc(err error) string {
|
|||||||
// Errorf returns an error containing an error code and a description;
|
// Errorf returns an error containing an error code and a description;
|
||||||
// Errorf returns nil if c is OK.
|
// Errorf returns nil if c is OK.
|
||||||
//
|
//
|
||||||
// Deprecated; use status.Errorf instead.
|
// Deprecated: use status.Errorf instead.
|
||||||
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
||||||
return status.Errorf(c, format, a...)
|
return status.Errorf(c, format, a...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setCallInfoCodec should only be called after CallOptions have been applied.
|
||||||
|
func setCallInfoCodec(c *callInfo) error {
|
||||||
|
if c.codec != nil {
|
||||||
|
// codec was already set by a CallOption; use it.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.contentSubtype == "" {
|
||||||
|
// No codec specified in CallOptions; use proto by default.
|
||||||
|
c.codec = encoding.GetCodec(proto.Name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// c.contentSubtype is already lowercased in CallContentSubtype
|
||||||
|
c.codec = encoding.GetCodec(c.contentSubtype)
|
||||||
|
if c.codec == nil {
|
||||||
|
return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDialTarget returns the network and address to pass to dialer
|
||||||
|
func parseDialTarget(target string) (net string, addr string) {
|
||||||
|
net = "tcp"
|
||||||
|
|
||||||
|
m1 := strings.Index(target, ":")
|
||||||
|
m2 := strings.Index(target, ":/")
|
||||||
|
|
||||||
|
// handle unix:addr which will fail with url.Parse
|
||||||
|
if m1 >= 0 && m2 < 0 {
|
||||||
|
if n := target[0:m1]; n == "unix" {
|
||||||
|
net = n
|
||||||
|
addr = target[m1+1:]
|
||||||
|
return net, addr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m2 >= 0 {
|
||||||
|
t, err := url.Parse(target)
|
||||||
|
if err != nil {
|
||||||
|
return net, target
|
||||||
|
}
|
||||||
|
scheme := t.Scheme
|
||||||
|
addr = t.Path
|
||||||
|
if scheme == "unix" {
|
||||||
|
net = scheme
|
||||||
|
if addr == "" {
|
||||||
|
addr = t.Host
|
||||||
|
}
|
||||||
|
return net, addr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return net, target
|
||||||
|
}
|
||||||
|
|
||||||
// The SupportPackageIsVersion variables are referenced from generated protocol
|
// The SupportPackageIsVersion variables are referenced from generated protocol
|
||||||
// buffer files to ensure compatibility with the gRPC version used. The latest
|
// buffer files to ensure compatibility with the gRPC version used. The latest
|
||||||
// support package version is 5.
|
// support package version is 5.
|
||||||
@@ -510,6 +712,6 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Version is the current grpc version.
|
// Version is the current grpc version.
|
||||||
const Version = "1.8.0"
|
const Version = "1.11.3"
|
||||||
|
|
||||||
const grpcUA = "grpc-go/" + Version
|
const grpcUA = "grpc-go/" + Version
|
||||||
|
|||||||
221
vendor/google.golang.org/grpc/server.go
generated
vendored
221
vendor/google.golang.org/grpc/server.go
generated
vendored
@@ -40,6 +40,7 @@ import (
|
|||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/encoding"
|
"google.golang.org/grpc/encoding"
|
||||||
|
"google.golang.org/grpc/encoding/proto"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/internal"
|
"google.golang.org/grpc/internal"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
@@ -92,11 +93,7 @@ type Server struct {
|
|||||||
conns map[io.Closer]bool
|
conns map[io.Closer]bool
|
||||||
serve bool
|
serve bool
|
||||||
drain bool
|
drain bool
|
||||||
ctx context.Context
|
cv *sync.Cond // signaled when connections close for GracefulStop
|
||||||
cancel context.CancelFunc
|
|
||||||
// A CondVar to let GracefulStop() blocks until all the pending RPCs are finished
|
|
||||||
// and all the transport goes away.
|
|
||||||
cv *sync.Cond
|
|
||||||
m map[string]*service // service name -> service info
|
m map[string]*service // service name -> service info
|
||||||
events trace.EventLog
|
events trace.EventLog
|
||||||
|
|
||||||
@@ -104,11 +101,12 @@ type Server struct {
|
|||||||
done chan struct{}
|
done chan struct{}
|
||||||
quitOnce sync.Once
|
quitOnce sync.Once
|
||||||
doneOnce sync.Once
|
doneOnce sync.Once
|
||||||
|
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
|
||||||
}
|
}
|
||||||
|
|
||||||
type options struct {
|
type options struct {
|
||||||
creds credentials.TransportCredentials
|
creds credentials.TransportCredentials
|
||||||
codec Codec
|
codec baseCodec
|
||||||
cp Compressor
|
cp Compressor
|
||||||
dc Decompressor
|
dc Decompressor
|
||||||
unaryInt UnaryServerInterceptor
|
unaryInt UnaryServerInterceptor
|
||||||
@@ -185,6 +183,8 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
|
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
|
||||||
|
//
|
||||||
|
// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
|
||||||
func CustomCodec(codec Codec) ServerOption {
|
func CustomCodec(codec Codec) ServerOption {
|
||||||
return func(o *options) {
|
return func(o *options) {
|
||||||
o.codec = codec
|
o.codec = codec
|
||||||
@@ -330,10 +330,6 @@ func NewServer(opt ...ServerOption) *Server {
|
|||||||
for _, o := range opt {
|
for _, o := range opt {
|
||||||
o(&opts)
|
o(&opts)
|
||||||
}
|
}
|
||||||
if opts.codec == nil {
|
|
||||||
// Set the default codec.
|
|
||||||
opts.codec = protoCodec{}
|
|
||||||
}
|
|
||||||
s := &Server{
|
s := &Server{
|
||||||
lis: make(map[net.Listener]bool),
|
lis: make(map[net.Listener]bool),
|
||||||
opts: opts,
|
opts: opts,
|
||||||
@@ -343,7 +339,6 @@ func NewServer(opt ...ServerOption) *Server {
|
|||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
}
|
}
|
||||||
s.cv = sync.NewCond(&s.mu)
|
s.cv = sync.NewCond(&s.mu)
|
||||||
s.ctx, s.cancel = context.WithCancel(context.Background())
|
|
||||||
if EnableTracing {
|
if EnableTracing {
|
||||||
_, file, line, _ := runtime.Caller(1)
|
_, file, line, _ := runtime.Caller(1)
|
||||||
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
|
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
|
||||||
@@ -474,10 +469,23 @@ func (s *Server) Serve(lis net.Listener) error {
|
|||||||
s.printf("serving")
|
s.printf("serving")
|
||||||
s.serve = true
|
s.serve = true
|
||||||
if s.lis == nil {
|
if s.lis == nil {
|
||||||
|
// Serve called after Stop or GracefulStop.
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
lis.Close()
|
lis.Close()
|
||||||
return ErrServerStopped
|
return ErrServerStopped
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s.serveWG.Add(1)
|
||||||
|
defer func() {
|
||||||
|
s.serveWG.Done()
|
||||||
|
select {
|
||||||
|
// Stop or GracefulStop called; block until done and return nil.
|
||||||
|
case <-s.quit:
|
||||||
|
<-s.done
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
s.lis[lis] = true
|
s.lis[lis] = true
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
defer func() {
|
defer func() {
|
||||||
@@ -511,33 +519,39 @@ func (s *Server) Serve(lis net.Listener) error {
|
|||||||
timer := time.NewTimer(tempDelay)
|
timer := time.NewTimer(tempDelay)
|
||||||
select {
|
select {
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
case <-s.ctx.Done():
|
case <-s.quit:
|
||||||
}
|
|
||||||
timer.Stop()
|
timer.Stop()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
s.printf("done serving; Accept = %v", err)
|
s.printf("done serving; Accept = %v", err)
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
|
|
||||||
// If Stop or GracefulStop is called, block until they are done and return nil
|
|
||||||
select {
|
select {
|
||||||
case <-s.quit:
|
case <-s.quit:
|
||||||
<-s.done
|
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tempDelay = 0
|
tempDelay = 0
|
||||||
// Start a new goroutine to deal with rawConn
|
// Start a new goroutine to deal with rawConn so we don't stall this Accept
|
||||||
// so we don't stall this Accept loop goroutine.
|
// loop goroutine.
|
||||||
go s.handleRawConn(rawConn)
|
//
|
||||||
|
// Make sure we account for the goroutine so GracefulStop doesn't nil out
|
||||||
|
// s.conns before this conn can be added.
|
||||||
|
s.serveWG.Add(1)
|
||||||
|
go func() {
|
||||||
|
s.handleRawConn(rawConn)
|
||||||
|
s.serveWG.Done()
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleRawConn is run in its own goroutine and handles a just-accepted
|
// handleRawConn forks a goroutine to handle a just-accepted connection that
|
||||||
// connection that has not had any I/O performed on it yet.
|
// has not had any I/O performed on it yet.
|
||||||
func (s *Server) handleRawConn(rawConn net.Conn) {
|
func (s *Server) handleRawConn(rawConn net.Conn) {
|
||||||
rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
|
rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
|
||||||
conn, authInfo, err := s.useTransportAuthenticator(rawConn)
|
conn, authInfo, err := s.useTransportAuthenticator(rawConn)
|
||||||
@@ -562,17 +576,28 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
|
|||||||
}
|
}
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
var serve func()
|
||||||
|
c := conn.(io.Closer)
|
||||||
if s.opts.useHandlerImpl {
|
if s.opts.useHandlerImpl {
|
||||||
rawConn.SetDeadline(time.Time{})
|
serve = func() { s.serveUsingHandler(conn) }
|
||||||
s.serveUsingHandler(conn)
|
|
||||||
} else {
|
} else {
|
||||||
|
// Finish handshaking (HTTP2)
|
||||||
st := s.newHTTP2Transport(conn, authInfo)
|
st := s.newHTTP2Transport(conn, authInfo)
|
||||||
if st == nil {
|
if st == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
rawConn.SetDeadline(time.Time{})
|
c = st
|
||||||
s.serveStreams(st)
|
serve = func() { s.serveStreams(st) }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rawConn.SetDeadline(time.Time{})
|
||||||
|
if !s.addConn(c) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
serve()
|
||||||
|
s.removeConn(c)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHTTP2Transport sets up a http/2 transport (using the
|
// newHTTP2Transport sets up a http/2 transport (using the
|
||||||
@@ -599,15 +624,10 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
|
|||||||
grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
|
grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !s.addConn(st) {
|
|
||||||
st.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return st
|
return st
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) serveStreams(st transport.ServerTransport) {
|
func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||||
defer s.removeConn(st)
|
|
||||||
defer st.Close()
|
defer st.Close()
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
st.HandleStreams(func(stream *transport.Stream) {
|
st.HandleStreams(func(stream *transport.Stream) {
|
||||||
@@ -641,11 +661,6 @@ var _ http.Handler = (*Server)(nil)
|
|||||||
//
|
//
|
||||||
// conn is the *tls.Conn that's already been authenticated.
|
// conn is the *tls.Conn that's already been authenticated.
|
||||||
func (s *Server) serveUsingHandler(conn net.Conn) {
|
func (s *Server) serveUsingHandler(conn net.Conn) {
|
||||||
if !s.addConn(conn) {
|
|
||||||
conn.Close()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer s.removeConn(conn)
|
|
||||||
h2s := &http2.Server{
|
h2s := &http2.Server{
|
||||||
MaxConcurrentStreams: s.opts.maxConcurrentStreams,
|
MaxConcurrentStreams: s.opts.maxConcurrentStreams,
|
||||||
}
|
}
|
||||||
@@ -679,13 +694,12 @@ func (s *Server) serveUsingHandler(conn net.Conn) {
|
|||||||
// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
|
// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
|
||||||
// and subject to change.
|
// and subject to change.
|
||||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
st, err := transport.NewServerHandlerTransport(w, r)
|
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !s.addConn(st) {
|
if !s.addConn(st) {
|
||||||
st.Close()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer s.removeConn(st)
|
defer s.removeConn(st)
|
||||||
@@ -715,9 +729,15 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea
|
|||||||
func (s *Server) addConn(c io.Closer) bool {
|
func (s *Server) addConn(c io.Closer) bool {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
if s.conns == nil || s.drain {
|
if s.conns == nil {
|
||||||
|
c.Close()
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
if s.drain {
|
||||||
|
// Transport added after we drained our existing conns: drain it
|
||||||
|
// immediately.
|
||||||
|
c.(transport.ServerTransport).Drain()
|
||||||
|
}
|
||||||
s.conns[c] = true
|
s.conns[c] = true
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@@ -738,7 +758,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
|
|||||||
if s.opts.statsHandler != nil {
|
if s.opts.statsHandler != nil {
|
||||||
outPayload = &stats.OutPayload{}
|
outPayload = &stats.OutPayload{}
|
||||||
}
|
}
|
||||||
hdr, data, err := encode(s.opts.codec, msg, cp, outPayload, comp)
|
hdr, data, err := encode(s.getCodec(stream.ContentSubtype()), msg, cp, outPayload, comp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Errorln("grpc: server failed to encode response: ", err)
|
grpclog.Errorln("grpc: server failed to encode response: ", err)
|
||||||
return err
|
return err
|
||||||
@@ -757,12 +777,14 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
|
|||||||
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
|
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||||
sh := s.opts.statsHandler
|
sh := s.opts.statsHandler
|
||||||
if sh != nil {
|
if sh != nil {
|
||||||
|
beginTime := time.Now()
|
||||||
begin := &stats.Begin{
|
begin := &stats.Begin{
|
||||||
BeginTime: time.Now(),
|
BeginTime: beginTime,
|
||||||
}
|
}
|
||||||
sh.HandleRPC(stream.Context(), begin)
|
sh.HandleRPC(stream.Context(), begin)
|
||||||
defer func() {
|
defer func() {
|
||||||
end := &stats.End{
|
end := &stats.End{
|
||||||
|
BeginTime: beginTime,
|
||||||
EndTime: time.Now(),
|
EndTime: time.Now(),
|
||||||
}
|
}
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
@@ -826,7 +848,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err == io.ErrUnexpectedEOF {
|
if err == io.ErrUnexpectedEOF {
|
||||||
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
|
err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if st, ok := status.FromError(err); ok {
|
if st, ok := status.FromError(err); ok {
|
||||||
@@ -868,13 +890,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||||||
if dc != nil {
|
if dc != nil {
|
||||||
req, err = dc.Do(bytes.NewReader(req))
|
req, err = dc.Do(bytes.NewReader(req))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Errorf(codes.Internal, err.Error())
|
return status.Errorf(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tmp, _ := decomp.Decompress(bytes.NewReader(req))
|
tmp, _ := decomp.Decompress(bytes.NewReader(req))
|
||||||
req, err = ioutil.ReadAll(tmp)
|
req, err = ioutil.ReadAll(tmp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -883,7 +905,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||||||
// java implementation.
|
// java implementation.
|
||||||
return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize)
|
return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize)
|
||||||
}
|
}
|
||||||
if err := s.opts.codec.Unmarshal(req, v); err != nil {
|
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(req, v); err != nil {
|
||||||
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
||||||
}
|
}
|
||||||
if inPayload != nil {
|
if inPayload != nil {
|
||||||
@@ -897,12 +919,13 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
|
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
|
||||||
|
reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt)
|
||||||
if appErr != nil {
|
if appErr != nil {
|
||||||
appStatus, ok := status.FromError(appErr)
|
appStatus, ok := status.FromError(appErr)
|
||||||
if !ok {
|
if !ok {
|
||||||
// Convert appErr if it is not a grpc status error.
|
// Convert appErr if it is not a grpc status error.
|
||||||
appErr = status.Error(convertCode(appErr), appErr.Error())
|
appErr = status.Error(codes.Unknown, appErr.Error())
|
||||||
appStatus, _ = status.FromError(appErr)
|
appStatus, _ = status.FromError(appErr)
|
||||||
}
|
}
|
||||||
if trInfo != nil {
|
if trInfo != nil {
|
||||||
@@ -957,12 +980,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||||||
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||||
sh := s.opts.statsHandler
|
sh := s.opts.statsHandler
|
||||||
if sh != nil {
|
if sh != nil {
|
||||||
|
beginTime := time.Now()
|
||||||
begin := &stats.Begin{
|
begin := &stats.Begin{
|
||||||
BeginTime: time.Now(),
|
BeginTime: beginTime,
|
||||||
}
|
}
|
||||||
sh.HandleRPC(stream.Context(), begin)
|
sh.HandleRPC(stream.Context(), begin)
|
||||||
defer func() {
|
defer func() {
|
||||||
end := &stats.End{
|
end := &stats.End{
|
||||||
|
BeginTime: beginTime,
|
||||||
EndTime: time.Now(),
|
EndTime: time.Now(),
|
||||||
}
|
}
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
@@ -971,11 +996,13 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||||||
sh.HandleRPC(stream.Context(), end)
|
sh.HandleRPC(stream.Context(), end)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
ctx := NewContextWithServerTransportStream(stream.Context(), stream)
|
||||||
ss := &serverStream{
|
ss := &serverStream{
|
||||||
|
ctx: ctx,
|
||||||
t: t,
|
t: t,
|
||||||
s: stream,
|
s: stream,
|
||||||
p: &parser{r: stream},
|
p: &parser{r: stream},
|
||||||
codec: s.opts.codec,
|
codec: s.getCodec(stream.ContentSubtype()),
|
||||||
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
||||||
maxSendMessageSize: s.opts.maxSendMessageSize,
|
maxSendMessageSize: s.opts.maxSendMessageSize,
|
||||||
trInfo: trInfo,
|
trInfo: trInfo,
|
||||||
@@ -1045,7 +1072,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||||||
case transport.StreamError:
|
case transport.StreamError:
|
||||||
appStatus = status.New(err.Code, err.Desc)
|
appStatus = status.New(err.Code, err.Desc)
|
||||||
default:
|
default:
|
||||||
appStatus = status.New(convertCode(appErr), appErr.Error())
|
appStatus = status.New(codes.Unknown, appErr.Error())
|
||||||
}
|
}
|
||||||
appErr = appStatus.Err()
|
appErr = appStatus.Err()
|
||||||
}
|
}
|
||||||
@@ -1065,7 +1092,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||||||
ss.mu.Unlock()
|
ss.mu.Unlock()
|
||||||
}
|
}
|
||||||
return t.WriteStatus(ss.s, status.New(codes.OK, ""))
|
return t.WriteStatus(ss.s, status.New(codes.OK, ""))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
|
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
|
||||||
@@ -1147,6 +1173,40 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The key to save ServerTransportStream in the context.
|
||||||
|
type streamKey struct{}
|
||||||
|
|
||||||
|
// NewContextWithServerTransportStream creates a new context from ctx and
|
||||||
|
// attaches stream to it.
|
||||||
|
//
|
||||||
|
// This API is EXPERIMENTAL.
|
||||||
|
func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
|
||||||
|
return context.WithValue(ctx, streamKey{}, stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerTransportStream is a minimal interface that a transport stream must
|
||||||
|
// implement. This can be used to mock an actual transport stream for tests of
|
||||||
|
// handler code that use, for example, grpc.SetHeader (which requires some
|
||||||
|
// stream to be in context).
|
||||||
|
//
|
||||||
|
// See also NewContextWithServerTransportStream.
|
||||||
|
//
|
||||||
|
// This API is EXPERIMENTAL.
|
||||||
|
type ServerTransportStream interface {
|
||||||
|
Method() string
|
||||||
|
SetHeader(md metadata.MD) error
|
||||||
|
SendHeader(md metadata.MD) error
|
||||||
|
SetTrailer(md metadata.MD) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// serverStreamFromContext returns the server stream saved in ctx. Returns
|
||||||
|
// nil if the given context has no stream associated with it (which implies
|
||||||
|
// it is not an RPC invocation context).
|
||||||
|
func serverTransportStreamFromContext(ctx context.Context) ServerTransportStream {
|
||||||
|
s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
// Stop stops the gRPC server. It immediately closes all open
|
// Stop stops the gRPC server. It immediately closes all open
|
||||||
// connections and listeners.
|
// connections and listeners.
|
||||||
// It cancels all active RPCs on the server side and the corresponding
|
// It cancels all active RPCs on the server side and the corresponding
|
||||||
@@ -1158,6 +1218,7 @@ func (s *Server) Stop() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
|
s.serveWG.Wait()
|
||||||
s.doneOnce.Do(func() {
|
s.doneOnce.Do(func() {
|
||||||
close(s.done)
|
close(s.done)
|
||||||
})
|
})
|
||||||
@@ -1180,7 +1241,6 @@ func (s *Server) Stop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
s.cancel()
|
|
||||||
if s.events != nil {
|
if s.events != nil {
|
||||||
s.events.Finish()
|
s.events.Finish()
|
||||||
s.events = nil
|
s.events = nil
|
||||||
@@ -1203,21 +1263,27 @@ func (s *Server) GracefulStop() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
|
||||||
if s.conns == nil {
|
if s.conns == nil {
|
||||||
|
s.mu.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for lis := range s.lis {
|
for lis := range s.lis {
|
||||||
lis.Close()
|
lis.Close()
|
||||||
}
|
}
|
||||||
s.lis = nil
|
s.lis = nil
|
||||||
s.cancel()
|
|
||||||
if !s.drain {
|
if !s.drain {
|
||||||
for c := range s.conns {
|
for c := range s.conns {
|
||||||
c.(transport.ServerTransport).Drain()
|
c.(transport.ServerTransport).Drain()
|
||||||
}
|
}
|
||||||
s.drain = true
|
s.drain = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait for serving threads to be ready to exit. Only then can we be sure no
|
||||||
|
// new conns will be created.
|
||||||
|
s.mu.Unlock()
|
||||||
|
s.serveWG.Wait()
|
||||||
|
s.mu.Lock()
|
||||||
|
|
||||||
for len(s.conns) != 0 {
|
for len(s.conns) != 0 {
|
||||||
s.cv.Wait()
|
s.cv.Wait()
|
||||||
}
|
}
|
||||||
@@ -1226,6 +1292,7 @@ func (s *Server) GracefulStop() {
|
|||||||
s.events.Finish()
|
s.events.Finish()
|
||||||
s.events = nil
|
s.events = nil
|
||||||
}
|
}
|
||||||
|
s.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -1234,6 +1301,22 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// contentSubtype must be lowercase
|
||||||
|
// cannot return nil
|
||||||
|
func (s *Server) getCodec(contentSubtype string) baseCodec {
|
||||||
|
if s.opts.codec != nil {
|
||||||
|
return s.opts.codec
|
||||||
|
}
|
||||||
|
if contentSubtype == "" {
|
||||||
|
return encoding.GetCodec(proto.Name)
|
||||||
|
}
|
||||||
|
codec := encoding.GetCodec(contentSubtype)
|
||||||
|
if codec == nil {
|
||||||
|
return encoding.GetCodec(proto.Name)
|
||||||
|
}
|
||||||
|
return codec
|
||||||
|
}
|
||||||
|
|
||||||
// SetHeader sets the header metadata.
|
// SetHeader sets the header metadata.
|
||||||
// When called multiple times, all the provided metadata will be merged.
|
// When called multiple times, all the provided metadata will be merged.
|
||||||
// All the metadata will be sent out when one of the following happens:
|
// All the metadata will be sent out when one of the following happens:
|
||||||
@@ -1244,9 +1327,9 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
|
|||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
stream, ok := transport.StreamFromContext(ctx)
|
stream := serverTransportStreamFromContext(ctx)
|
||||||
if !ok {
|
if stream == nil {
|
||||||
return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
||||||
}
|
}
|
||||||
return stream.SetHeader(md)
|
return stream.SetHeader(md)
|
||||||
}
|
}
|
||||||
@@ -1254,15 +1337,11 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
|
|||||||
// SendHeader sends header metadata. It may be called at most once.
|
// SendHeader sends header metadata. It may be called at most once.
|
||||||
// The provided md and headers set by SetHeader() will be sent.
|
// The provided md and headers set by SetHeader() will be sent.
|
||||||
func SendHeader(ctx context.Context, md metadata.MD) error {
|
func SendHeader(ctx context.Context, md metadata.MD) error {
|
||||||
stream, ok := transport.StreamFromContext(ctx)
|
stream := serverTransportStreamFromContext(ctx)
|
||||||
if !ok {
|
if stream == nil {
|
||||||
return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
||||||
}
|
}
|
||||||
t := stream.ServerTransport()
|
if err := stream.SendHeader(md); err != nil {
|
||||||
if t == nil {
|
|
||||||
grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream)
|
|
||||||
}
|
|
||||||
if err := t.WriteHeader(stream, md); err != nil {
|
|
||||||
return toRPCErr(err)
|
return toRPCErr(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -1274,9 +1353,19 @@ func SetTrailer(ctx context.Context, md metadata.MD) error {
|
|||||||
if md.Len() == 0 {
|
if md.Len() == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
stream, ok := transport.StreamFromContext(ctx)
|
stream := serverTransportStreamFromContext(ctx)
|
||||||
if !ok {
|
if stream == nil {
|
||||||
return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
|
||||||
}
|
}
|
||||||
return stream.SetTrailer(md)
|
return stream.SetTrailer(md)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Method returns the method string for the server context. The returned
|
||||||
|
// string is in the format of "/service/method".
|
||||||
|
func Method(ctx context.Context) (string, bool) {
|
||||||
|
s := serverTransportStreamFromContext(ctx)
|
||||||
|
if s == nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return s.Method(), true
|
||||||
|
}
|
||||||
|
|||||||
61
vendor/google.golang.org/grpc/service_config.go
generated
vendored
61
vendor/google.golang.org/grpc/service_config.go
generated
vendored
@@ -20,6 +20,9 @@ package grpc
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
@@ -70,12 +73,48 @@ type ServiceConfig struct {
|
|||||||
Methods map[string]MethodConfig
|
Methods map[string]MethodConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseTimeout(t *string) (*time.Duration, error) {
|
func parseDuration(s *string) (*time.Duration, error) {
|
||||||
if t == nil {
|
if s == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
d, err := time.ParseDuration(*t)
|
if !strings.HasSuffix(*s, "s") {
|
||||||
return &d, err
|
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||||
|
}
|
||||||
|
ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
|
||||||
|
if len(ss) > 2 {
|
||||||
|
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||||
|
}
|
||||||
|
// hasDigits is set if either the whole or fractional part of the number is
|
||||||
|
// present, since both are optional but one is required.
|
||||||
|
hasDigits := false
|
||||||
|
var d time.Duration
|
||||||
|
if len(ss[0]) > 0 {
|
||||||
|
i, err := strconv.ParseInt(ss[0], 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
|
||||||
|
}
|
||||||
|
d = time.Duration(i) * time.Second
|
||||||
|
hasDigits = true
|
||||||
|
}
|
||||||
|
if len(ss) == 2 && len(ss[1]) > 0 {
|
||||||
|
if len(ss[1]) > 9 {
|
||||||
|
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||||
|
}
|
||||||
|
f, err := strconv.ParseInt(ss[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
|
||||||
|
}
|
||||||
|
for i := 9; i > len(ss[1]); i-- {
|
||||||
|
f *= 10
|
||||||
|
}
|
||||||
|
d += time.Duration(f)
|
||||||
|
hasDigits = true
|
||||||
|
}
|
||||||
|
if !hasDigits {
|
||||||
|
return nil, fmt.Errorf("malformed duration %q", *s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type jsonName struct {
|
type jsonName struct {
|
||||||
@@ -128,7 +167,7 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
|
|||||||
if m.Name == nil {
|
if m.Name == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
d, err := parseTimeout(m.Timeout)
|
d, err := parseDuration(m.Timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||||
return ServiceConfig{}, err
|
return ServiceConfig{}, err
|
||||||
@@ -182,18 +221,6 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
|
|||||||
return doptMax
|
return doptMax
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBool(b bool) *bool {
|
|
||||||
return &b
|
|
||||||
}
|
|
||||||
|
|
||||||
func newInt(b int) *int {
|
func newInt(b int) *int {
|
||||||
return &b
|
return &b
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDuration(b time.Duration) *time.Duration {
|
|
||||||
return &b
|
|
||||||
}
|
|
||||||
|
|
||||||
func newString(b string) *string {
|
|
||||||
return &b
|
|
||||||
}
|
|
||||||
|
|||||||
2
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
2
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
@@ -169,6 +169,8 @@ func (s *OutTrailer) isRPCStats() {}
|
|||||||
type End struct {
|
type End struct {
|
||||||
// Client is true if this End is from client side.
|
// Client is true if this End is from client side.
|
||||||
Client bool
|
Client bool
|
||||||
|
// BeginTime is the time when the RPC began.
|
||||||
|
BeginTime time.Time
|
||||||
// EndTime is the time when the RPC ends.
|
// EndTime is the time when the RPC ends.
|
||||||
EndTime time.Time
|
EndTime time.Time
|
||||||
// Error is the error the RPC ended with. It is an error generated from
|
// Error is the error the RPC ended with. It is an error generated from
|
||||||
|
|||||||
31
vendor/google.golang.org/grpc/status/status.go
generated
vendored
31
vendor/google.golang.org/grpc/status/status.go
generated
vendored
@@ -46,7 +46,7 @@ func (se *statusError) Error() string {
|
|||||||
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
|
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (se *statusError) status() *Status {
|
func (se *statusError) GRPCStatus() *Status {
|
||||||
return &Status{s: (*spb.Status)(se)}
|
return &Status{s: (*spb.Status)(se)}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -120,15 +120,23 @@ func FromProto(s *spb.Status) *Status {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FromError returns a Status representing err if it was produced from this
|
// FromError returns a Status representing err if it was produced from this
|
||||||
// package, otherwise it returns nil, false.
|
// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a
|
||||||
|
// Status is returned with codes.Unknown and the original error message.
|
||||||
func FromError(err error) (s *Status, ok bool) {
|
func FromError(err error) (s *Status, ok bool) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
|
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
|
||||||
}
|
}
|
||||||
if s, ok := err.(*statusError); ok {
|
if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
|
||||||
return s.status(), true
|
return se.GRPCStatus(), true
|
||||||
}
|
}
|
||||||
return nil, false
|
return New(codes.Unknown, err.Error()), false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert is a convenience function which removes the need to handle the
|
||||||
|
// boolean return value from FromError.
|
||||||
|
func Convert(err error) *Status {
|
||||||
|
s, _ := FromError(err)
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithDetails returns a new status with the provided details messages appended to the status.
|
// WithDetails returns a new status with the provided details messages appended to the status.
|
||||||
@@ -166,3 +174,16 @@ func (s *Status) Details() []interface{} {
|
|||||||
}
|
}
|
||||||
return details
|
return details
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Code returns the Code of the error if it is a Status error, codes.OK if err
|
||||||
|
// is nil, or codes.Unknown otherwise.
|
||||||
|
func Code(err error) codes.Code {
|
||||||
|
// Don't use FromError to avoid allocation of OK status.
|
||||||
|
if err == nil {
|
||||||
|
return codes.OK
|
||||||
|
}
|
||||||
|
if se, ok := err.(interface{ GRPCStatus() *Status }); ok {
|
||||||
|
return se.GRPCStatus().Code()
|
||||||
|
}
|
||||||
|
return codes.Unknown
|
||||||
|
}
|
||||||
|
|||||||
460
vendor/google.golang.org/grpc/stream.go
generated
vendored
460
vendor/google.golang.org/grpc/stream.go
generated
vendored
@@ -30,14 +30,16 @@ import (
|
|||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/encoding"
|
"google.golang.org/grpc/encoding"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/peer"
|
|
||||||
"google.golang.org/grpc/stats"
|
"google.golang.org/grpc/stats"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/grpc/transport"
|
"google.golang.org/grpc/transport"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StreamHandler defines the handler called by gRPC server to complete the
|
// StreamHandler defines the handler called by gRPC server to complete the
|
||||||
// execution of a streaming RPC.
|
// execution of a streaming RPC. If a StreamHandler returns an error, it
|
||||||
|
// should be produced by the status package, or else gRPC will use
|
||||||
|
// codes.Unknown as the status code and err.Error() as the status message
|
||||||
|
// of the RPC.
|
||||||
type StreamHandler func(srv interface{}, stream ServerStream) error
|
type StreamHandler func(srv interface{}, stream ServerStream) error
|
||||||
|
|
||||||
// StreamDesc represents a streaming RPC service's method specification.
|
// StreamDesc represents a streaming RPC service's method specification.
|
||||||
@@ -51,6 +53,8 @@ type StreamDesc struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stream defines the common interface a client or server stream has to satisfy.
|
// Stream defines the common interface a client or server stream has to satisfy.
|
||||||
|
//
|
||||||
|
// All errors returned from Stream are compatible with the status package.
|
||||||
type Stream interface {
|
type Stream interface {
|
||||||
// Context returns the context for this stream.
|
// Context returns the context for this stream.
|
||||||
Context() context.Context
|
Context() context.Context
|
||||||
@@ -89,14 +93,19 @@ type ClientStream interface {
|
|||||||
// Stream.SendMsg() may return a non-nil error when something wrong happens sending
|
// Stream.SendMsg() may return a non-nil error when something wrong happens sending
|
||||||
// the request. The returned error indicates the status of this sending, not the final
|
// the request. The returned error indicates the status of this sending, not the final
|
||||||
// status of the RPC.
|
// status of the RPC.
|
||||||
// Always call Stream.RecvMsg() to get the final status if you care about the status of
|
//
|
||||||
// the RPC.
|
// Always call Stream.RecvMsg() to drain the stream and get the final
|
||||||
|
// status, otherwise there could be leaked resources.
|
||||||
Stream
|
Stream
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStream creates a new Stream for the client side. This is typically
|
// NewStream creates a new Stream for the client side. This is typically
|
||||||
// called by generated code.
|
// called by generated code.
|
||||||
func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
||||||
|
// allow interceptor to see all applicable call options, which means those
|
||||||
|
// configured as defaults from dial option as well as per-call options
|
||||||
|
opts = combine(cc.dopts.callOptions, opts)
|
||||||
|
|
||||||
if cc.dopts.streamInt != nil {
|
if cc.dopts.streamInt != nil {
|
||||||
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
|
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
|
||||||
}
|
}
|
||||||
@@ -112,28 +121,29 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
|
||||||
var (
|
|
||||||
t transport.ClientTransport
|
|
||||||
s *transport.Stream
|
|
||||||
done func(balancer.DoneInfo)
|
|
||||||
cancel context.CancelFunc
|
|
||||||
)
|
|
||||||
c := defaultCallInfo()
|
c := defaultCallInfo()
|
||||||
mc := cc.GetMethodConfig(method)
|
mc := cc.GetMethodConfig(method)
|
||||||
if mc.WaitForReady != nil {
|
if mc.WaitForReady != nil {
|
||||||
c.failFast = !*mc.WaitForReady
|
c.failFast = !*mc.WaitForReady
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Possible context leak:
|
||||||
|
// The cancel function for the child context we create will only be called
|
||||||
|
// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
|
||||||
|
// an error is generated by SendMsg.
|
||||||
|
// https://github.com/grpc/grpc-go/issues/1818.
|
||||||
|
var cancel context.CancelFunc
|
||||||
if mc.Timeout != nil && *mc.Timeout >= 0 {
|
if mc.Timeout != nil && *mc.Timeout >= 0 {
|
||||||
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
|
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
|
||||||
|
} else {
|
||||||
|
ctx, cancel = context.WithCancel(ctx)
|
||||||
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
|
||||||
|
|
||||||
opts = append(cc.dopts.callOptions, opts...)
|
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
if err := o.before(c); err != nil {
|
if err := o.before(c); err != nil {
|
||||||
return nil, toRPCErr(err)
|
return nil, toRPCErr(err)
|
||||||
@@ -141,6 +151,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
}
|
}
|
||||||
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
|
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
|
||||||
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
|
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
|
||||||
|
if err := setCallInfoCodec(c); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
callHdr := &transport.CallHdr{
|
callHdr := &transport.CallHdr{
|
||||||
Host: cc.authority,
|
Host: cc.authority,
|
||||||
@@ -150,6 +163,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
// If it's client streaming, the user may never send a request or send it any
|
// If it's client streaming, the user may never send a request or send it any
|
||||||
// time soon, so we ask the transport to flush the header.
|
// time soon, so we ask the transport to flush the header.
|
||||||
Flush: desc.ClientStreams,
|
Flush: desc.ClientStreams,
|
||||||
|
ContentSubtype: c.contentSubtype,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set our outgoing compression according to the UseCompressor CallOption, if
|
// Set our outgoing compression according to the UseCompressor CallOption, if
|
||||||
@@ -163,7 +177,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
if ct != encoding.Identity {
|
if ct != encoding.Identity {
|
||||||
comp = encoding.GetCompressor(ct)
|
comp = encoding.GetCompressor(ct)
|
||||||
if comp == nil {
|
if comp == nil {
|
||||||
return nil, Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
|
return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if cc.dopts.cp != nil {
|
} else if cc.dopts.cp != nil {
|
||||||
@@ -194,11 +208,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
}
|
}
|
||||||
ctx = newContextWithRPCInfo(ctx, c.failFast)
|
ctx = newContextWithRPCInfo(ctx, c.failFast)
|
||||||
sh := cc.dopts.copts.StatsHandler
|
sh := cc.dopts.copts.StatsHandler
|
||||||
|
var beginTime time.Time
|
||||||
if sh != nil {
|
if sh != nil {
|
||||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
|
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
|
||||||
|
beginTime = time.Now()
|
||||||
begin := &stats.Begin{
|
begin := &stats.Begin{
|
||||||
Client: true,
|
Client: true,
|
||||||
BeginTime: time.Now(),
|
BeginTime: beginTime,
|
||||||
FailFast: c.failFast,
|
FailFast: c.failFast,
|
||||||
}
|
}
|
||||||
sh.HandleRPC(ctx, begin)
|
sh.HandleRPC(ctx, begin)
|
||||||
@@ -208,12 +224,19 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
end := &stats.End{
|
end := &stats.End{
|
||||||
Client: true,
|
Client: true,
|
||||||
Error: err,
|
Error: err,
|
||||||
|
BeginTime: beginTime,
|
||||||
|
EndTime: time.Now(),
|
||||||
}
|
}
|
||||||
sh.HandleRPC(ctx, end)
|
sh.HandleRPC(ctx, end)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
t transport.ClientTransport
|
||||||
|
s *transport.Stream
|
||||||
|
done func(balancer.DoneInfo)
|
||||||
|
)
|
||||||
for {
|
for {
|
||||||
// Check to make sure the context has expired. This will prevent us from
|
// Check to make sure the context has expired. This will prevent us from
|
||||||
// looping forever if an error occurs for wait-for-ready RPCs where no data
|
// looping forever if an error occurs for wait-for-ready RPCs where no data
|
||||||
@@ -246,54 +269,43 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set callInfo.peer object from stream's context.
|
|
||||||
if peer, ok := peer.FromContext(s.Context()); ok {
|
|
||||||
c.peer = peer
|
|
||||||
}
|
|
||||||
cs := &clientStream{
|
cs := &clientStream{
|
||||||
opts: opts,
|
opts: opts,
|
||||||
c: c,
|
c: c,
|
||||||
desc: desc,
|
desc: desc,
|
||||||
codec: cc.dopts.codec,
|
codec: c.codec,
|
||||||
cp: cp,
|
cp: cp,
|
||||||
dc: cc.dopts.dc,
|
|
||||||
comp: comp,
|
comp: comp,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
|
attempt: &csAttempt{
|
||||||
done: done,
|
|
||||||
t: t,
|
t: t,
|
||||||
s: s,
|
s: s,
|
||||||
p: &parser{r: s},
|
p: &parser{r: s},
|
||||||
|
done: done,
|
||||||
tracing: EnableTracing,
|
dc: cc.dopts.dc,
|
||||||
|
ctx: ctx,
|
||||||
trInfo: trInfo,
|
trInfo: trInfo,
|
||||||
|
statsHandler: sh,
|
||||||
statsCtx: ctx,
|
beginTime: beginTime,
|
||||||
statsHandler: cc.dopts.copts.StatsHandler,
|
},
|
||||||
}
|
}
|
||||||
// Listen on s.Context().Done() to detect cancellation and s.Done() to detect
|
cs.c.stream = cs
|
||||||
// normal termination when there is no pending I/O operations on this stream.
|
cs.attempt.cs = cs
|
||||||
|
if desc != unaryStreamDesc {
|
||||||
|
// Listen on cc and stream contexts to cleanup when the user closes the
|
||||||
|
// ClientConn or cancels the stream context. In all other cases, an error
|
||||||
|
// should already be injected into the recv buffer by the transport, which
|
||||||
|
// the client will eventually receive, and then we will cancel the stream's
|
||||||
|
// context in clientStream.finish.
|
||||||
go func() {
|
go func() {
|
||||||
select {
|
select {
|
||||||
case <-t.Error():
|
|
||||||
// Incur transport error, simply exit.
|
|
||||||
case <-cc.ctx.Done():
|
case <-cc.ctx.Done():
|
||||||
cs.finish(ErrClientConnClosing)
|
cs.finish(ErrClientConnClosing)
|
||||||
cs.closeTransportStream(ErrClientConnClosing)
|
case <-ctx.Done():
|
||||||
case <-s.Done():
|
cs.finish(toRPCErr(ctx.Err()))
|
||||||
// TODO: The trace of the RPC is terminated here when there is no pending
|
|
||||||
// I/O, which is probably not the optimal solution.
|
|
||||||
cs.finish(s.Status().Err())
|
|
||||||
cs.closeTransportStream(nil)
|
|
||||||
case <-s.GoAway():
|
|
||||||
cs.finish(errConnDrain)
|
|
||||||
cs.closeTransportStream(errConnDrain)
|
|
||||||
case <-s.Context().Done():
|
|
||||||
err := s.Context().Err()
|
|
||||||
cs.finish(err)
|
|
||||||
cs.closeTransportStream(transport.ContextErr(err))
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
}
|
||||||
return cs, nil
|
return cs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -301,89 +313,143 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||||||
type clientStream struct {
|
type clientStream struct {
|
||||||
opts []CallOption
|
opts []CallOption
|
||||||
c *callInfo
|
c *callInfo
|
||||||
|
desc *StreamDesc
|
||||||
|
|
||||||
|
codec baseCodec
|
||||||
|
cp Compressor
|
||||||
|
comp encoding.Compressor
|
||||||
|
|
||||||
|
cancel context.CancelFunc // cancels all attempts
|
||||||
|
|
||||||
|
sentLast bool // sent an end stream
|
||||||
|
|
||||||
|
mu sync.Mutex // guards finished
|
||||||
|
finished bool // TODO: replace with atomic cmpxchg or sync.Once?
|
||||||
|
|
||||||
|
attempt *csAttempt // the active client stream attempt
|
||||||
|
// TODO(hedging): hedging will have multiple attempts simultaneously.
|
||||||
|
}
|
||||||
|
|
||||||
|
// csAttempt implements a single transport stream attempt within a
|
||||||
|
// clientStream.
|
||||||
|
type csAttempt struct {
|
||||||
|
cs *clientStream
|
||||||
t transport.ClientTransport
|
t transport.ClientTransport
|
||||||
s *transport.Stream
|
s *transport.Stream
|
||||||
p *parser
|
p *parser
|
||||||
desc *StreamDesc
|
done func(balancer.DoneInfo)
|
||||||
|
|
||||||
codec Codec
|
|
||||||
cp Compressor
|
|
||||||
dc Decompressor
|
dc Decompressor
|
||||||
comp encoding.Compressor
|
|
||||||
decomp encoding.Compressor
|
decomp encoding.Compressor
|
||||||
decompSet bool
|
decompSet bool
|
||||||
|
|
||||||
cancel context.CancelFunc
|
ctx context.Context // the application's context, wrapped by stats/tracing
|
||||||
|
|
||||||
tracing bool // set to EnableTracing when the clientStream is created.
|
mu sync.Mutex // guards trInfo.tr
|
||||||
|
// trInfo.tr is set when created (if EnableTracing is true),
|
||||||
mu sync.Mutex
|
// and cleared when the finish method is called.
|
||||||
done func(balancer.DoneInfo)
|
|
||||||
closed bool
|
|
||||||
finished bool
|
|
||||||
// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
|
|
||||||
// and is set to nil when the clientStream's finish method is called.
|
|
||||||
trInfo traceInfo
|
trInfo traceInfo
|
||||||
|
|
||||||
// statsCtx keeps the user context for stats handling.
|
|
||||||
// All stats collection should use the statsCtx (instead of the stream context)
|
|
||||||
// so that all the generated stats for a particular RPC can be associated in the processing phase.
|
|
||||||
statsCtx context.Context
|
|
||||||
statsHandler stats.Handler
|
statsHandler stats.Handler
|
||||||
|
beginTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) Context() context.Context {
|
func (cs *clientStream) Context() context.Context {
|
||||||
return cs.s.Context()
|
// TODO(retry): commit the current attempt (the context has peer-aware data).
|
||||||
|
return cs.attempt.context()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) Header() (metadata.MD, error) {
|
func (cs *clientStream) Header() (metadata.MD, error) {
|
||||||
m, err := cs.s.Header()
|
m, err := cs.attempt.header()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, ok := err.(transport.ConnectionError); !ok {
|
// TODO(retry): maybe retry on error or commit attempt on success.
|
||||||
cs.closeTransportStream(err)
|
err = toRPCErr(err)
|
||||||
}
|
cs.finish(err)
|
||||||
}
|
}
|
||||||
return m, err
|
return m, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) Trailer() metadata.MD {
|
func (cs *clientStream) Trailer() metadata.MD {
|
||||||
return cs.s.Trailer()
|
// TODO(retry): on error, maybe retry (trailers-only).
|
||||||
|
return cs.attempt.trailer()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
||||||
if cs.tracing {
|
// TODO(retry): buffer message for replaying if not committed.
|
||||||
cs.mu.Lock()
|
return cs.attempt.sendMsg(m)
|
||||||
if cs.trInfo.tr != nil {
|
}
|
||||||
cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
|
||||||
}
|
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
||||||
cs.mu.Unlock()
|
// TODO(retry): maybe retry on error or commit attempt on success.
|
||||||
}
|
return cs.attempt.recvMsg(m)
|
||||||
// TODO Investigate how to signal the stats handling party.
|
}
|
||||||
// generate error stats if err != nil && err != io.EOF?
|
|
||||||
defer func() {
|
func (cs *clientStream) CloseSend() error {
|
||||||
if err != nil {
|
cs.attempt.closeSend()
|
||||||
cs.finish(err)
|
return nil
|
||||||
}
|
}
|
||||||
if err == nil {
|
|
||||||
return
|
func (cs *clientStream) finish(err error) {
|
||||||
}
|
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
// Specialize the process for server streaming. SendMsg is only called
|
// Ending a stream with EOF indicates a success.
|
||||||
// once when creating the stream object. io.EOF needs to be skipped when
|
|
||||||
// the rpc is early finished (before the stream object is created.).
|
|
||||||
// TODO: It is probably better to move this into the generated code.
|
|
||||||
if !cs.desc.ClientStreams && cs.desc.ServerStreams {
|
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
|
cs.mu.Lock()
|
||||||
|
if cs.finished {
|
||||||
|
cs.mu.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if _, ok := err.(transport.ConnectionError); !ok {
|
cs.finished = true
|
||||||
cs.closeTransportStream(err)
|
cs.mu.Unlock()
|
||||||
|
// TODO(retry): commit current attempt if necessary.
|
||||||
|
cs.attempt.finish(err)
|
||||||
|
for _, o := range cs.opts {
|
||||||
|
o.after(cs.c)
|
||||||
|
}
|
||||||
|
cs.cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *csAttempt) context() context.Context {
|
||||||
|
return a.s.Context()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *csAttempt) header() (metadata.MD, error) {
|
||||||
|
return a.s.Header()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *csAttempt) trailer() metadata.MD {
|
||||||
|
return a.s.Trailer()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *csAttempt) sendMsg(m interface{}) (err error) {
|
||||||
|
// TODO Investigate how to signal the stats handling party.
|
||||||
|
// generate error stats if err != nil && err != io.EOF?
|
||||||
|
cs := a.cs
|
||||||
|
defer func() {
|
||||||
|
// For non-client-streaming RPCs, we return nil instead of EOF on success
|
||||||
|
// because the generated code requires it. finish is not called; RecvMsg()
|
||||||
|
// will call it with the stream's status independently.
|
||||||
|
if err == io.EOF && !cs.desc.ClientStreams {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
// Call finish on the client stream for errors generated by this SendMsg
|
||||||
|
// call, as these indicate problems created by this client. (Transport
|
||||||
|
// errors are converted to an io.EOF error below; the real error will be
|
||||||
|
// returned from RecvMsg eventually in that case, or be retried.)
|
||||||
|
cs.finish(err)
|
||||||
}
|
}
|
||||||
err = toRPCErr(err)
|
|
||||||
}()
|
}()
|
||||||
|
// TODO: Check cs.sentLast and error if we already ended the stream.
|
||||||
|
if EnableTracing {
|
||||||
|
a.mu.Lock()
|
||||||
|
if a.trInfo.tr != nil {
|
||||||
|
a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||||
|
}
|
||||||
|
a.mu.Unlock()
|
||||||
|
}
|
||||||
var outPayload *stats.OutPayload
|
var outPayload *stats.OutPayload
|
||||||
if cs.statsHandler != nil {
|
if a.statsHandler != nil {
|
||||||
outPayload = &stats.OutPayload{
|
outPayload = &stats.OutPayload{
|
||||||
Client: true,
|
Client: true,
|
||||||
}
|
}
|
||||||
@@ -392,174 +458,133 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if cs.c.maxSendMessageSize == nil {
|
|
||||||
return Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
|
|
||||||
}
|
|
||||||
if len(data) > *cs.c.maxSendMessageSize {
|
if len(data) > *cs.c.maxSendMessageSize {
|
||||||
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
|
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
|
||||||
}
|
}
|
||||||
err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false})
|
if !cs.desc.ClientStreams {
|
||||||
if err == nil && outPayload != nil {
|
cs.sentLast = true
|
||||||
|
}
|
||||||
|
err = a.t.Write(a.s, hdr, data, &transport.Options{Last: !cs.desc.ClientStreams})
|
||||||
|
if err == nil {
|
||||||
|
if outPayload != nil {
|
||||||
outPayload.SentTime = time.Now()
|
outPayload.SentTime = time.Now()
|
||||||
cs.statsHandler.HandleRPC(cs.statsCtx, outPayload)
|
a.statsHandler.HandleRPC(a.ctx, outPayload)
|
||||||
}
|
}
|
||||||
return err
|
return nil
|
||||||
|
}
|
||||||
|
return io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
func (a *csAttempt) recvMsg(m interface{}) (err error) {
|
||||||
|
cs := a.cs
|
||||||
|
defer func() {
|
||||||
|
if err != nil || !cs.desc.ServerStreams {
|
||||||
|
// err != nil or non-server-streaming indicates end of stream.
|
||||||
|
cs.finish(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
var inPayload *stats.InPayload
|
var inPayload *stats.InPayload
|
||||||
if cs.statsHandler != nil {
|
if a.statsHandler != nil {
|
||||||
inPayload = &stats.InPayload{
|
inPayload = &stats.InPayload{
|
||||||
Client: true,
|
Client: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if cs.c.maxReceiveMessageSize == nil {
|
if !a.decompSet {
|
||||||
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
|
|
||||||
}
|
|
||||||
if !cs.decompSet {
|
|
||||||
// Block until we receive headers containing received message encoding.
|
// Block until we receive headers containing received message encoding.
|
||||||
if ct := cs.s.RecvCompress(); ct != "" && ct != encoding.Identity {
|
if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
|
||||||
if cs.dc == nil || cs.dc.Type() != ct {
|
if a.dc == nil || a.dc.Type() != ct {
|
||||||
// No configured decompressor, or it does not match the incoming
|
// No configured decompressor, or it does not match the incoming
|
||||||
// message encoding; attempt to find a registered compressor that does.
|
// message encoding; attempt to find a registered compressor that does.
|
||||||
cs.dc = nil
|
a.dc = nil
|
||||||
cs.decomp = encoding.GetCompressor(ct)
|
a.decomp = encoding.GetCompressor(ct)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// No compression is used; disable our decompressor.
|
// No compression is used; disable our decompressor.
|
||||||
cs.dc = nil
|
a.dc = nil
|
||||||
}
|
}
|
||||||
// Only initialize this state once per stream.
|
// Only initialize this state once per stream.
|
||||||
cs.decompSet = true
|
a.decompSet = true
|
||||||
}
|
}
|
||||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload, cs.decomp)
|
err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, inPayload, a.decomp)
|
||||||
defer func() {
|
|
||||||
// err != nil indicates the termination of the stream.
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cs.finish(err)
|
if err == io.EOF {
|
||||||
|
if statusErr := a.s.Status().Err(); statusErr != nil {
|
||||||
|
return statusErr
|
||||||
}
|
}
|
||||||
}()
|
return io.EOF // indicates successful end of stream.
|
||||||
if err == nil {
|
|
||||||
if cs.tracing {
|
|
||||||
cs.mu.Lock()
|
|
||||||
if cs.trInfo.tr != nil {
|
|
||||||
cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
|
||||||
}
|
}
|
||||||
cs.mu.Unlock()
|
return toRPCErr(err)
|
||||||
|
}
|
||||||
|
if EnableTracing {
|
||||||
|
a.mu.Lock()
|
||||||
|
if a.trInfo.tr != nil {
|
||||||
|
a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||||
|
}
|
||||||
|
a.mu.Unlock()
|
||||||
}
|
}
|
||||||
if inPayload != nil {
|
if inPayload != nil {
|
||||||
cs.statsHandler.HandleRPC(cs.statsCtx, inPayload)
|
a.statsHandler.HandleRPC(a.ctx, inPayload)
|
||||||
}
|
}
|
||||||
if !cs.desc.ClientStreams || cs.desc.ServerStreams {
|
if cs.desc.ServerStreams {
|
||||||
return
|
// Subsequent messages should be received by subsequent RecvMsg calls.
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
// Special handling for client streaming rpc.
|
|
||||||
|
// Special handling for non-server-stream rpcs.
|
||||||
// This recv expects EOF or errors, so we don't collect inPayload.
|
// This recv expects EOF or errors, so we don't collect inPayload.
|
||||||
if cs.c.maxReceiveMessageSize == nil {
|
err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, nil, a.decomp)
|
||||||
return Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
|
|
||||||
}
|
|
||||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil, cs.decomp)
|
|
||||||
cs.closeTransportStream(err)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
|
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
|
||||||
}
|
}
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
if se := cs.s.Status().Err(); se != nil {
|
return a.s.Status().Err() // non-server streaming Recv returns nil on success
|
||||||
return se
|
|
||||||
}
|
|
||||||
cs.finish(err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return toRPCErr(err)
|
|
||||||
}
|
|
||||||
if _, ok := err.(transport.ConnectionError); !ok {
|
|
||||||
cs.closeTransportStream(err)
|
|
||||||
}
|
|
||||||
if err == io.EOF {
|
|
||||||
if statusErr := cs.s.Status().Err(); statusErr != nil {
|
|
||||||
return statusErr
|
|
||||||
}
|
|
||||||
// Returns io.EOF to indicate the end of the stream.
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
return toRPCErr(err)
|
return toRPCErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) CloseSend() (err error) {
|
func (a *csAttempt) closeSend() {
|
||||||
err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true})
|
cs := a.cs
|
||||||
defer func() {
|
if cs.sentLast {
|
||||||
if err != nil {
|
|
||||||
cs.finish(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if err == nil || err == io.EOF {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if _, ok := err.(transport.ConnectionError); !ok {
|
|
||||||
cs.closeTransportStream(err)
|
|
||||||
}
|
|
||||||
err = toRPCErr(err)
|
|
||||||
return
|
return
|
||||||
|
}
|
||||||
|
cs.sentLast = true
|
||||||
|
cs.attempt.t.Write(cs.attempt.s, nil, nil, &transport.Options{Last: true})
|
||||||
|
// We ignore errors from Write. Any error it would return would also be
|
||||||
|
// returned by a subsequent RecvMsg call, and the user is supposed to always
|
||||||
|
// finish the stream by calling RecvMsg until it returns err != nil.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *clientStream) closeTransportStream(err error) {
|
func (a *csAttempt) finish(err error) {
|
||||||
cs.mu.Lock()
|
a.mu.Lock()
|
||||||
if cs.closed {
|
a.t.CloseStream(a.s, err)
|
||||||
cs.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cs.closed = true
|
|
||||||
cs.mu.Unlock()
|
|
||||||
cs.t.CloseStream(cs.s, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cs *clientStream) finish(err error) {
|
if a.done != nil {
|
||||||
cs.mu.Lock()
|
a.done(balancer.DoneInfo{
|
||||||
defer cs.mu.Unlock()
|
Err: err,
|
||||||
if cs.finished {
|
BytesSent: true,
|
||||||
return
|
BytesReceived: a.s.BytesReceived(),
|
||||||
}
|
|
||||||
cs.finished = true
|
|
||||||
defer func() {
|
|
||||||
if cs.cancel != nil {
|
|
||||||
cs.cancel()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
for _, o := range cs.opts {
|
|
||||||
o.after(cs.c)
|
|
||||||
}
|
|
||||||
if cs.done != nil {
|
|
||||||
updateRPCInfoInContext(cs.s.Context(), rpcInfo{
|
|
||||||
bytesSent: true,
|
|
||||||
bytesReceived: cs.s.BytesReceived(),
|
|
||||||
})
|
})
|
||||||
cs.done(balancer.DoneInfo{Err: err})
|
|
||||||
cs.done = nil
|
|
||||||
}
|
}
|
||||||
if cs.statsHandler != nil {
|
if a.statsHandler != nil {
|
||||||
end := &stats.End{
|
end := &stats.End{
|
||||||
Client: true,
|
Client: true,
|
||||||
|
BeginTime: a.beginTime,
|
||||||
EndTime: time.Now(),
|
EndTime: time.Now(),
|
||||||
|
Error: err,
|
||||||
}
|
}
|
||||||
if err != io.EOF {
|
a.statsHandler.HandleRPC(a.ctx, end)
|
||||||
// end.Error is nil if the RPC finished successfully.
|
|
||||||
end.Error = toRPCErr(err)
|
|
||||||
}
|
}
|
||||||
cs.statsHandler.HandleRPC(cs.statsCtx, end)
|
if a.trInfo.tr != nil {
|
||||||
}
|
if err == nil {
|
||||||
if !cs.tracing {
|
a.trInfo.tr.LazyPrintf("RPC: [OK]")
|
||||||
return
|
|
||||||
}
|
|
||||||
if cs.trInfo.tr != nil {
|
|
||||||
if err == nil || err == io.EOF {
|
|
||||||
cs.trInfo.tr.LazyPrintf("RPC: [OK]")
|
|
||||||
} else {
|
} else {
|
||||||
cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
|
a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
|
||||||
cs.trInfo.tr.SetError()
|
a.trInfo.tr.SetError()
|
||||||
}
|
}
|
||||||
cs.trInfo.tr.Finish()
|
a.trInfo.tr.Finish()
|
||||||
cs.trInfo.tr = nil
|
a.trInfo.tr = nil
|
||||||
}
|
}
|
||||||
|
a.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerStream defines the interface a server stream has to satisfy.
|
// ServerStream defines the interface a server stream has to satisfy.
|
||||||
@@ -583,10 +608,11 @@ type ServerStream interface {
|
|||||||
|
|
||||||
// serverStream implements a server side Stream.
|
// serverStream implements a server side Stream.
|
||||||
type serverStream struct {
|
type serverStream struct {
|
||||||
|
ctx context.Context
|
||||||
t transport.ServerTransport
|
t transport.ServerTransport
|
||||||
s *transport.Stream
|
s *transport.Stream
|
||||||
p *parser
|
p *parser
|
||||||
codec Codec
|
codec baseCodec
|
||||||
|
|
||||||
cp Compressor
|
cp Compressor
|
||||||
dc Decompressor
|
dc Decompressor
|
||||||
@@ -603,7 +629,7 @@ type serverStream struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ss *serverStream) Context() context.Context {
|
func (ss *serverStream) Context() context.Context {
|
||||||
return ss.s.Context()
|
return ss.ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ss *serverStream) SetHeader(md metadata.MD) error {
|
func (ss *serverStream) SetHeader(md metadata.MD) error {
|
||||||
@@ -653,7 +679,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(data) > ss.maxSendMessageSize {
|
if len(data) > ss.maxSendMessageSize {
|
||||||
return Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
|
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
|
||||||
}
|
}
|
||||||
if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil {
|
if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil {
|
||||||
return toRPCErr(err)
|
return toRPCErr(err)
|
||||||
@@ -693,7 +719,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err == io.ErrUnexpectedEOF {
|
if err == io.ErrUnexpectedEOF {
|
||||||
err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
|
err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
|
||||||
}
|
}
|
||||||
return toRPCErr(err)
|
return toRPCErr(err)
|
||||||
}
|
}
|
||||||
@@ -706,9 +732,5 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
|||||||
// MethodFromServerStream returns the method string for the input stream.
|
// MethodFromServerStream returns the method string for the input stream.
|
||||||
// The returned string is in the format of "/service/method".
|
// The returned string is in the format of "/service/method".
|
||||||
func MethodFromServerStream(stream ServerStream) (string, bool) {
|
func MethodFromServerStream(stream ServerStream) (string, bool) {
|
||||||
s, ok := transport.StreamFromContext(stream.Context())
|
return Method(stream.Context())
|
||||||
if !ok {
|
|
||||||
return "", ok
|
|
||||||
}
|
|
||||||
return s.Method(), ok
|
|
||||||
}
|
}
|
||||||
|
|||||||
1
vendor/google.golang.org/grpc/transport/control.go
generated
vendored
1
vendor/google.golang.org/grpc/transport/control.go
generated
vendored
@@ -116,6 +116,7 @@ type goAway struct {
|
|||||||
func (*goAway) item() {}
|
func (*goAway) item() {}
|
||||||
|
|
||||||
type flushIO struct {
|
type flushIO struct {
|
||||||
|
closeTr bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*flushIO) item() {}
|
func (*flushIO) item() {}
|
||||||
|
|||||||
6
vendor/google.golang.org/grpc/transport/go16.go
generated
vendored
6
vendor/google.golang.org/grpc/transport/go16.go
generated
vendored
@@ -22,6 +22,7 @@ package transport
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
|
||||||
@@ -43,3 +44,8 @@ func ContextErr(err error) StreamError {
|
|||||||
}
|
}
|
||||||
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// contextFromRequest returns a background context.
|
||||||
|
func contextFromRequest(r *http.Request) context.Context {
|
||||||
|
return context.Background()
|
||||||
|
}
|
||||||
|
|||||||
6
vendor/google.golang.org/grpc/transport/go17.go
generated
vendored
6
vendor/google.golang.org/grpc/transport/go17.go
generated
vendored
@@ -23,6 +23,7 @@ package transport
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net"
|
"net"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
|
||||||
@@ -44,3 +45,8 @@ func ContextErr(err error) StreamError {
|
|||||||
}
|
}
|
||||||
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// contextFromRequest returns a context from the HTTP Request.
|
||||||
|
func contextFromRequest(r *http.Request) context.Context {
|
||||||
|
return r.Context()
|
||||||
|
}
|
||||||
|
|||||||
77
vendor/google.golang.org/grpc/transport/handler_server.go
generated
vendored
77
vendor/google.golang.org/grpc/transport/handler_server.go
generated
vendored
@@ -40,20 +40,24 @@ import (
|
|||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/peer"
|
"google.golang.org/grpc/peer"
|
||||||
|
"google.golang.org/grpc/stats"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||||||
// from inside an http.Handler. It requires that the http Server
|
// from inside an http.Handler. It requires that the http Server
|
||||||
// supports HTTP/2.
|
// supports HTTP/2.
|
||||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) {
|
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
|
||||||
if r.ProtoMajor != 2 {
|
if r.ProtoMajor != 2 {
|
||||||
return nil, errors.New("gRPC requires HTTP/2")
|
return nil, errors.New("gRPC requires HTTP/2")
|
||||||
}
|
}
|
||||||
if r.Method != "POST" {
|
if r.Method != "POST" {
|
||||||
return nil, errors.New("invalid gRPC request method")
|
return nil, errors.New("invalid gRPC request method")
|
||||||
}
|
}
|
||||||
if !validContentType(r.Header.Get("Content-Type")) {
|
contentType := r.Header.Get("Content-Type")
|
||||||
|
// TODO: do we assume contentType is lowercase? we did before
|
||||||
|
contentSubtype, validContentType := contentSubtype(contentType)
|
||||||
|
if !validContentType {
|
||||||
return nil, errors.New("invalid gRPC request content-type")
|
return nil, errors.New("invalid gRPC request content-type")
|
||||||
}
|
}
|
||||||
if _, ok := w.(http.Flusher); !ok {
|
if _, ok := w.(http.Flusher); !ok {
|
||||||
@@ -68,6 +72,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr
|
|||||||
req: r,
|
req: r,
|
||||||
closedCh: make(chan struct{}),
|
closedCh: make(chan struct{}),
|
||||||
writes: make(chan func()),
|
writes: make(chan func()),
|
||||||
|
contentType: contentType,
|
||||||
|
contentSubtype: contentSubtype,
|
||||||
|
stats: stats,
|
||||||
}
|
}
|
||||||
|
|
||||||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||||
@@ -79,7 +86,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr
|
|||||||
st.timeout = to
|
st.timeout = to
|
||||||
}
|
}
|
||||||
|
|
||||||
var metakv []string
|
metakv := []string{"content-type", contentType}
|
||||||
if r.Host != "" {
|
if r.Host != "" {
|
||||||
metakv = append(metakv, ":authority", r.Host)
|
metakv = append(metakv, ":authority", r.Host)
|
||||||
}
|
}
|
||||||
@@ -91,7 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr
|
|||||||
for _, v := range vv {
|
for _, v := range vv {
|
||||||
v, err := decodeMetadataHeader(k, v)
|
v, err := decodeMetadataHeader(k, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err)
|
return nil, streamErrorf(codes.Internal, "malformed binary metadata: %v", err)
|
||||||
}
|
}
|
||||||
metakv = append(metakv, k, v)
|
metakv = append(metakv, k, v)
|
||||||
}
|
}
|
||||||
@@ -123,10 +130,17 @@ type serverHandlerTransport struct {
|
|||||||
// when WriteStatus is called.
|
// when WriteStatus is called.
|
||||||
writes chan func()
|
writes chan func()
|
||||||
|
|
||||||
mu sync.Mutex
|
// block concurrent WriteStatus calls
|
||||||
// streamDone indicates whether WriteStatus has been called and writes channel
|
// e.g. grpc/(*serverStream).SendMsg/RecvMsg
|
||||||
// has been closed.
|
writeStatusMu sync.Mutex
|
||||||
streamDone bool
|
|
||||||
|
// we just mirror the request content-type
|
||||||
|
contentType string
|
||||||
|
// we store both contentType and contentSubtype so we don't keep recreating them
|
||||||
|
// TODO make sure this is consistent across handler_server and http2_server
|
||||||
|
contentSubtype string
|
||||||
|
|
||||||
|
stats stats.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ht *serverHandlerTransport) Close() error {
|
func (ht *serverHandlerTransport) Close() error {
|
||||||
@@ -177,13 +191,9 @@ func (ht *serverHandlerTransport) do(fn func()) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
|
func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
|
||||||
ht.mu.Lock()
|
ht.writeStatusMu.Lock()
|
||||||
if ht.streamDone {
|
defer ht.writeStatusMu.Unlock()
|
||||||
ht.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ht.streamDone = true
|
|
||||||
ht.mu.Unlock()
|
|
||||||
err := ht.do(func() {
|
err := ht.do(func() {
|
||||||
ht.writeCommonHeaders(s)
|
ht.writeCommonHeaders(s)
|
||||||
|
|
||||||
@@ -222,7 +232,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if err == nil { // transport has not been closed
|
||||||
|
if ht.stats != nil {
|
||||||
|
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||||
|
}
|
||||||
|
ht.Close()
|
||||||
close(ht.writes)
|
close(ht.writes)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -236,7 +253,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
|||||||
|
|
||||||
h := ht.rw.Header()
|
h := ht.rw.Header()
|
||||||
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
||||||
h.Set("Content-Type", "application/grpc")
|
h.Set("Content-Type", ht.contentType)
|
||||||
|
|
||||||
// Predeclare trailers we'll set later in WriteStatus (after the body).
|
// Predeclare trailers we'll set later in WriteStatus (after the body).
|
||||||
// This is a SHOULD in the HTTP RFC, and the way you add (known)
|
// This is a SHOULD in the HTTP RFC, and the way you add (known)
|
||||||
@@ -264,7 +281,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||||
return ht.do(func() {
|
err := ht.do(func() {
|
||||||
ht.writeCommonHeaders(s)
|
ht.writeCommonHeaders(s)
|
||||||
h := ht.rw.Header()
|
h := ht.rw.Header()
|
||||||
for k, vv := range md {
|
for k, vv := range md {
|
||||||
@@ -280,17 +297,24 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
|||||||
ht.rw.WriteHeader(200)
|
ht.rw.WriteHeader(200)
|
||||||
ht.rw.(http.Flusher).Flush()
|
ht.rw.(http.Flusher).Flush()
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
if ht.stats != nil {
|
||||||
|
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
|
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
|
||||||
// With this transport type there will be exactly 1 stream: this HTTP request.
|
// With this transport type there will be exactly 1 stream: this HTTP request.
|
||||||
|
|
||||||
var ctx context.Context
|
ctx := contextFromRequest(ht.req)
|
||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
if ht.timeoutSet {
|
if ht.timeoutSet {
|
||||||
ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
|
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
|
||||||
} else {
|
} else {
|
||||||
ctx, cancel = context.WithCancel(context.Background())
|
ctx, cancel = context.WithCancel(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// requestOver is closed when either the request's context is done
|
// requestOver is closed when either the request's context is done
|
||||||
@@ -321,6 +345,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
|||||||
st: ht,
|
st: ht,
|
||||||
method: req.URL.Path,
|
method: req.URL.Path,
|
||||||
recvCompress: req.Header.Get("grpc-encoding"),
|
recvCompress: req.Header.Get("grpc-encoding"),
|
||||||
|
contentSubtype: ht.contentSubtype,
|
||||||
}
|
}
|
||||||
pr := &peer.Peer{
|
pr := &peer.Peer{
|
||||||
Addr: ht.RemoteAddr(),
|
Addr: ht.RemoteAddr(),
|
||||||
@@ -329,8 +354,16 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
|||||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
|
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
|
||||||
}
|
}
|
||||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||||
ctx = peer.NewContext(ctx, pr)
|
s.ctx = peer.NewContext(ctx, pr)
|
||||||
s.ctx = newContextWithStream(ctx, s)
|
if ht.stats != nil {
|
||||||
|
s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||||
|
inHeader := &stats.InHeader{
|
||||||
|
FullMethod: s.method,
|
||||||
|
RemoteAddr: ht.RemoteAddr(),
|
||||||
|
Compression: s.recvCompress,
|
||||||
|
}
|
||||||
|
ht.stats.HandleRPC(s.ctx, inHeader)
|
||||||
|
}
|
||||||
s.trReader = &transportReader{
|
s.trReader = &transportReader{
|
||||||
reader: &recvBufferReader{ctx: s.ctx, recv: s.buf},
|
reader: &recvBufferReader{ctx: s.ctx, recv: s.buf},
|
||||||
windowHandler: func(int) {},
|
windowHandler: func(int) {},
|
||||||
|
|||||||
105
vendor/google.golang.org/grpc/transport/http2_client.go
generated
vendored
105
vendor/google.golang.org/grpc/transport/http2_client.go
generated
vendored
@@ -20,6 +20,7 @@ package transport
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
@@ -93,6 +94,11 @@ type http2Client struct {
|
|||||||
bdpEst *bdpEstimator
|
bdpEst *bdpEstimator
|
||||||
outQuotaVersion uint32
|
outQuotaVersion uint32
|
||||||
|
|
||||||
|
// onSuccess is a callback that client transport calls upon
|
||||||
|
// receiving server preface to signal that a succefull HTTP2
|
||||||
|
// connection was established.
|
||||||
|
onSuccess func()
|
||||||
|
|
||||||
mu sync.Mutex // guard the following variables
|
mu sync.Mutex // guard the following variables
|
||||||
state transportState // the state of underlying connection
|
state transportState // the state of underlying connection
|
||||||
activeStreams map[uint32]*Stream
|
activeStreams map[uint32]*Stream
|
||||||
@@ -115,18 +121,6 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
func isTemporary(err error) bool {
|
func isTemporary(err error) bool {
|
||||||
switch err {
|
|
||||||
case io.EOF:
|
|
||||||
// Connection closures may be resolved upon retry, and are thus
|
|
||||||
// treated as temporary.
|
|
||||||
return true
|
|
||||||
case context.DeadlineExceeded:
|
|
||||||
// In Go 1.7, context.DeadlineExceeded implements Timeout(), and this
|
|
||||||
// special case is not needed. Until then, we need to keep this
|
|
||||||
// clause.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
switch err := err.(type) {
|
switch err := err.(type) {
|
||||||
case interface {
|
case interface {
|
||||||
Temporary() bool
|
Temporary() bool
|
||||||
@@ -139,22 +133,18 @@ func isTemporary(err error) bool {
|
|||||||
// temporary.
|
// temporary.
|
||||||
return err.Timeout()
|
return err.Timeout()
|
||||||
}
|
}
|
||||||
return false
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
||||||
// and starts to receive messages on it. Non-nil error returns if construction
|
// and starts to receive messages on it. Non-nil error returns if construction
|
||||||
// fails.
|
// fails.
|
||||||
func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) {
|
func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ ClientTransport, err error) {
|
||||||
scheme := "http"
|
scheme := "http"
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
connectCtx, connectCancel := context.WithTimeout(ctx, timeout)
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cancel()
|
cancel()
|
||||||
// Don't call connectCancel in success path due to a race in Go 1.6:
|
|
||||||
// https://github.com/golang/go/issues/15078.
|
|
||||||
connectCancel()
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -179,10 +169,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
|
|||||||
scheme = "https"
|
scheme = "https"
|
||||||
conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn)
|
conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Credentials handshake errors are typically considered permanent
|
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
|
||||||
// to avoid retrying on e.g. bad certificates.
|
|
||||||
temp := isTemporary(err)
|
|
||||||
return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err)
|
|
||||||
}
|
}
|
||||||
isSecure = true
|
isSecure = true
|
||||||
}
|
}
|
||||||
@@ -240,6 +227,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
|
|||||||
kp: kp,
|
kp: kp,
|
||||||
statsHandler: opts.StatsHandler,
|
statsHandler: opts.StatsHandler,
|
||||||
initialWindowSize: initialWindowSize,
|
initialWindowSize: initialWindowSize,
|
||||||
|
onSuccess: onSuccess,
|
||||||
}
|
}
|
||||||
if opts.InitialWindowSize >= defaultWindowSize {
|
if opts.InitialWindowSize >= defaultWindowSize {
|
||||||
t.initialWindowSize = opts.InitialWindowSize
|
t.initialWindowSize = opts.InitialWindowSize
|
||||||
@@ -300,7 +288,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, t
|
|||||||
t.framer.writer.Flush()
|
t.framer.writer.Flush()
|
||||||
go func() {
|
go func() {
|
||||||
loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
|
loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
|
||||||
t.Close()
|
t.conn.Close()
|
||||||
}()
|
}()
|
||||||
if t.kp.Time != infinity {
|
if t.kp.Time != infinity {
|
||||||
go t.keepalive()
|
go t.keepalive()
|
||||||
@@ -320,6 +308,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
|||||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||||
sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
|
sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
|
||||||
headerChan: make(chan struct{}),
|
headerChan: make(chan struct{}),
|
||||||
|
contentSubtype: callHdr.ContentSubtype,
|
||||||
}
|
}
|
||||||
t.nextID += 2
|
t.nextID += 2
|
||||||
s.requestRead = func(n int) {
|
s.requestRead = func(n int) {
|
||||||
@@ -377,7 +366,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||||||
for _, c := range t.creds {
|
for _, c := range t.creds {
|
||||||
data, err := c.GetRequestMetadata(ctx, audience)
|
data, err := c.GetRequestMetadata(ctx, audience)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, streamErrorf(codes.Internal, "transport: %v", err)
|
if _, ok := status.FromError(err); ok {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, streamErrorf(codes.Unauthenticated, "transport: %v", err)
|
||||||
}
|
}
|
||||||
for k, v := range data {
|
for k, v := range data {
|
||||||
// Capital header names are illegal in HTTP/2.
|
// Capital header names are illegal in HTTP/2.
|
||||||
@@ -431,7 +424,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
|
headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
|
headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
|
headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
|
||||||
|
|
||||||
@@ -456,7 +449,22 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||||||
if b := stats.OutgoingTrace(ctx); b != nil {
|
if b := stats.OutgoingTrace(ctx); b != nil {
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
|
||||||
}
|
}
|
||||||
if md, ok := metadata.FromOutgoingContext(ctx); ok {
|
|
||||||
|
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
|
||||||
|
var k string
|
||||||
|
for _, vv := range added {
|
||||||
|
for i, v := range vv {
|
||||||
|
if i%2 == 0 {
|
||||||
|
k = v
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
|
||||||
|
if isReservedHeader(k) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
|
||||||
|
}
|
||||||
|
}
|
||||||
for k, vv := range md {
|
for k, vv := range md {
|
||||||
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
|
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
|
||||||
if isReservedHeader(k) {
|
if isReservedHeader(k) {
|
||||||
@@ -573,7 +581,7 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
|
|||||||
}
|
}
|
||||||
s.state = streamDone
|
s.state = streamDone
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
if _, ok := err.(StreamError); ok {
|
if err != nil && !rstStream {
|
||||||
rstStream = true
|
rstStream = true
|
||||||
rstError = http2.ErrCodeCancel
|
rstError = http2.ErrCodeCancel
|
||||||
}
|
}
|
||||||
@@ -642,6 +650,8 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||||||
select {
|
select {
|
||||||
case <-s.ctx.Done():
|
case <-s.ctx.Done():
|
||||||
return ContextErr(s.ctx.Err())
|
return ContextErr(s.ctx.Err())
|
||||||
|
case <-s.done:
|
||||||
|
return io.EOF
|
||||||
case <-t.ctx.Done():
|
case <-t.ctx.Done():
|
||||||
return ErrConnClosing
|
return ErrConnClosing
|
||||||
default:
|
default:
|
||||||
@@ -691,6 +701,8 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||||||
}
|
}
|
||||||
ltq, _, err := t.localSendQuota.get(size, s.waiters)
|
ltq, _, err := t.localSendQuota.get(size, s.waiters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Add the acquired quota back to transport.
|
||||||
|
t.sendQuotaPool.add(tq)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// even if ltq is smaller than size we don't adjust size since
|
// even if ltq is smaller than size we don't adjust size since
|
||||||
@@ -1107,22 +1119,22 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
if !endStream {
|
|
||||||
s.recvCompress = state.encoding
|
|
||||||
}
|
|
||||||
if !s.headerDone {
|
if !s.headerDone {
|
||||||
if !endStream && len(state.mdata) > 0 {
|
if !endStream {
|
||||||
|
// Headers frame is not actually a trailers-only frame.
|
||||||
|
isHeader = true
|
||||||
|
s.recvCompress = state.encoding
|
||||||
|
if len(state.mdata) > 0 {
|
||||||
s.header = state.mdata
|
s.header = state.mdata
|
||||||
}
|
}
|
||||||
|
}
|
||||||
close(s.headerChan)
|
close(s.headerChan)
|
||||||
s.headerDone = true
|
s.headerDone = true
|
||||||
isHeader = true
|
|
||||||
}
|
}
|
||||||
if !endStream || s.state == streamDone {
|
if !endStream || s.state == streamDone {
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(state.mdata) > 0 {
|
if len(state.mdata) > 0 {
|
||||||
s.trailer = state.mdata
|
s.trailer = state.mdata
|
||||||
}
|
}
|
||||||
@@ -1160,6 +1172,7 @@ func (t *http2Client) reader() {
|
|||||||
t.Close()
|
t.Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
t.onSuccess()
|
||||||
t.handleSettings(sf, true)
|
t.handleSettings(sf, true)
|
||||||
|
|
||||||
// loop to keep reading incoming messages on this transport.
|
// loop to keep reading incoming messages on this transport.
|
||||||
@@ -1234,8 +1247,7 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
|
|||||||
// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer)
|
// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer)
|
||||||
// is duplicated between the client and the server.
|
// is duplicated between the client and the server.
|
||||||
// The transport layer needs to be refactored to take care of this.
|
// The transport layer needs to be refactored to take care of this.
|
||||||
func (t *http2Client) itemHandler(i item) error {
|
func (t *http2Client) itemHandler(i item) (err error) {
|
||||||
var err error
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorf(" error in itemHandler: %v", err)
|
errorf(" error in itemHandler: %v", err)
|
||||||
@@ -1243,10 +1255,11 @@ func (t *http2Client) itemHandler(i item) error {
|
|||||||
}()
|
}()
|
||||||
switch i := i.(type) {
|
switch i := i.(type) {
|
||||||
case *dataFrame:
|
case *dataFrame:
|
||||||
err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d)
|
if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil {
|
||||||
if err == nil {
|
return err
|
||||||
i.f()
|
|
||||||
}
|
}
|
||||||
|
i.f()
|
||||||
|
return nil
|
||||||
case *headerFrame:
|
case *headerFrame:
|
||||||
t.hBuf.Reset()
|
t.hBuf.Reset()
|
||||||
for _, f := range i.hf {
|
for _, f := range i.hf {
|
||||||
@@ -1280,31 +1293,33 @@ func (t *http2Client) itemHandler(i item) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
case *windowUpdate:
|
case *windowUpdate:
|
||||||
err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
|
return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
|
||||||
case *settings:
|
case *settings:
|
||||||
err = t.framer.fr.WriteSettings(i.ss...)
|
return t.framer.fr.WriteSettings(i.ss...)
|
||||||
case *settingsAck:
|
case *settingsAck:
|
||||||
err = t.framer.fr.WriteSettingsAck()
|
return t.framer.fr.WriteSettingsAck()
|
||||||
case *resetStream:
|
case *resetStream:
|
||||||
// If the server needs to be to intimated about stream closing,
|
// If the server needs to be to intimated about stream closing,
|
||||||
// then we need to make sure the RST_STREAM frame is written to
|
// then we need to make sure the RST_STREAM frame is written to
|
||||||
// the wire before the headers of the next stream waiting on
|
// the wire before the headers of the next stream waiting on
|
||||||
// streamQuota. We ensure this by adding to the streamsQuota pool
|
// streamQuota. We ensure this by adding to the streamsQuota pool
|
||||||
// only after having acquired the writableChan to send RST_STREAM.
|
// only after having acquired the writableChan to send RST_STREAM.
|
||||||
err = t.framer.fr.WriteRSTStream(i.streamID, i.code)
|
err := t.framer.fr.WriteRSTStream(i.streamID, i.code)
|
||||||
t.streamsQuota.add(1)
|
t.streamsQuota.add(1)
|
||||||
|
return err
|
||||||
case *flushIO:
|
case *flushIO:
|
||||||
err = t.framer.writer.Flush()
|
return t.framer.writer.Flush()
|
||||||
case *ping:
|
case *ping:
|
||||||
if !i.ack {
|
if !i.ack {
|
||||||
t.bdpEst.timesnap(i.data)
|
t.bdpEst.timesnap(i.data)
|
||||||
}
|
}
|
||||||
err = t.framer.fr.WritePing(i.ack, i.data)
|
return t.framer.fr.WritePing(i.ack, i.data)
|
||||||
default:
|
default:
|
||||||
errorf("transport: http2Client.controller got unexpected item type %v", i)
|
errorf("transport: http2Client.controller got unexpected item type %v", i)
|
||||||
|
return fmt.Errorf("transport: http2Client.controller got unexpected item type %v", i)
|
||||||
}
|
}
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
|
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
|
||||||
|
|||||||
54
vendor/google.golang.org/grpc/transport/http2_server.go
generated
vendored
54
vendor/google.golang.org/grpc/transport/http2_server.go
generated
vendored
@@ -228,6 +228,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||||||
}
|
}
|
||||||
t.framer.writer.Flush()
|
t.framer.writer.Flush()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
t.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// Check the validity of client preface.
|
// Check the validity of client preface.
|
||||||
preface := make([]byte, len(clientPreface))
|
preface := make([]byte, len(clientPreface))
|
||||||
if _, err := io.ReadFull(t.conn, preface); err != nil {
|
if _, err := io.ReadFull(t.conn, preface); err != nil {
|
||||||
@@ -239,8 +245,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||||||
|
|
||||||
frame, err := t.framer.fr.ReadFrame()
|
frame, err := t.framer.fr.ReadFrame()
|
||||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
t.Close()
|
return nil, err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
|
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
|
||||||
@@ -254,7 +259,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
|
loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
|
||||||
t.Close()
|
t.conn.Close()
|
||||||
}()
|
}()
|
||||||
go t.keepalive()
|
go t.keepalive()
|
||||||
return t, nil
|
return t, nil
|
||||||
@@ -282,6 +287,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||||
recvCompress: state.encoding,
|
recvCompress: state.encoding,
|
||||||
method: state.method,
|
method: state.method,
|
||||||
|
contentSubtype: state.contentSubtype,
|
||||||
}
|
}
|
||||||
|
|
||||||
if frame.StreamEnded() {
|
if frame.StreamEnded() {
|
||||||
@@ -301,10 +307,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||||||
pr.AuthInfo = t.authInfo
|
pr.AuthInfo = t.authInfo
|
||||||
}
|
}
|
||||||
s.ctx = peer.NewContext(s.ctx, pr)
|
s.ctx = peer.NewContext(s.ctx, pr)
|
||||||
// Cache the current stream to the context so that the server application
|
|
||||||
// can find out. Required when the server wants to send some metadata
|
|
||||||
// back to the client (unary call only).
|
|
||||||
s.ctx = newContextWithStream(s.ctx, s)
|
|
||||||
// Attach the received metadata to the context.
|
// Attach the received metadata to the context.
|
||||||
if len(state.mdata) > 0 {
|
if len(state.mdata) > 0 {
|
||||||
s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
|
s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
|
||||||
@@ -725,7 +727,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
|||||||
// first and create a slice of that exact size.
|
// first and create a slice of that exact size.
|
||||||
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
|
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
|
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
|
||||||
if s.sendCompress != "" {
|
if s.sendCompress != "" {
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||||
}
|
}
|
||||||
@@ -744,9 +746,9 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
|||||||
endStream: false,
|
endStream: false,
|
||||||
})
|
})
|
||||||
if t.stats != nil {
|
if t.stats != nil {
|
||||||
outHeader := &stats.OutHeader{
|
// Note: WireLength is not set in outHeader.
|
||||||
//WireLength: // TODO(mmukhi): Revisit this later, if needed.
|
// TODO(mmukhi): Revisit this later, if needed.
|
||||||
}
|
outHeader := &stats.OutHeader{}
|
||||||
t.stats.HandleRPC(s.Context(), outHeader)
|
t.stats.HandleRPC(s.Context(), outHeader)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -787,7 +789,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
|||||||
headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
|
headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
|
||||||
if !headersSent {
|
if !headersSent {
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
|
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
|
||||||
}
|
}
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
|
||||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
|
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
|
||||||
@@ -837,10 +839,6 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||||||
|
|
||||||
var writeHeaderFrame bool
|
var writeHeaderFrame bool
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
if s.state == streamDone {
|
|
||||||
s.mu.Unlock()
|
|
||||||
return streamErrorf(codes.Unknown, "the stream has been done")
|
|
||||||
}
|
|
||||||
if !s.headerOk {
|
if !s.headerOk {
|
||||||
writeHeaderFrame = true
|
writeHeaderFrame = true
|
||||||
}
|
}
|
||||||
@@ -886,15 +884,14 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||||||
}
|
}
|
||||||
ltq, _, err := t.localSendQuota.get(size, s.waiters)
|
ltq, _, err := t.localSendQuota.get(size, s.waiters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Add the acquired quota back to transport.
|
||||||
|
t.sendQuotaPool.add(tq)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// even if ltq is smaller than size we don't adjust size since,
|
// even if ltq is smaller than size we don't adjust size since,
|
||||||
// ltq is only a soft limit.
|
// ltq is only a soft limit.
|
||||||
streamQuota -= size
|
streamQuota -= size
|
||||||
p := r[:size]
|
p := r[:size]
|
||||||
// Reset ping strikes when sending data since this might cause
|
|
||||||
// the peer to send ping.
|
|
||||||
atomic.StoreUint32(&t.resetPingStrikes, 1)
|
|
||||||
success := func() {
|
success := func() {
|
||||||
ltq := ltq
|
ltq := ltq
|
||||||
t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() {
|
t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() {
|
||||||
@@ -1009,6 +1006,9 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
|
|||||||
func (t *http2Server) itemHandler(i item) error {
|
func (t *http2Server) itemHandler(i item) error {
|
||||||
switch i := i.(type) {
|
switch i := i.(type) {
|
||||||
case *dataFrame:
|
case *dataFrame:
|
||||||
|
// Reset ping strikes when sending data since this might cause
|
||||||
|
// the peer to send ping.
|
||||||
|
atomic.StoreUint32(&t.resetPingStrikes, 1)
|
||||||
if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil {
|
if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1069,6 +1069,9 @@ func (t *http2Server) itemHandler(i item) error {
|
|||||||
if !i.headsUp {
|
if !i.headsUp {
|
||||||
// Stop accepting more streams now.
|
// Stop accepting more streams now.
|
||||||
t.state = draining
|
t.state = draining
|
||||||
|
if len(t.activeStreams) == 0 {
|
||||||
|
i.closeConn = true
|
||||||
|
}
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil {
|
if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1076,8 +1079,7 @@ func (t *http2Server) itemHandler(i item) error {
|
|||||||
if i.closeConn {
|
if i.closeConn {
|
||||||
// Abruptly close the connection following the GoAway (via
|
// Abruptly close the connection following the GoAway (via
|
||||||
// loopywriter). But flush out what's inside the buffer first.
|
// loopywriter). But flush out what's inside the buffer first.
|
||||||
t.framer.writer.Flush()
|
t.controlBuf.put(&flushIO{closeTr: true})
|
||||||
return fmt.Errorf("transport: Connection closing")
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1107,7 +1109,13 @@ func (t *http2Server) itemHandler(i item) error {
|
|||||||
}()
|
}()
|
||||||
return nil
|
return nil
|
||||||
case *flushIO:
|
case *flushIO:
|
||||||
return t.framer.writer.Flush()
|
if err := t.framer.writer.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if i.closeTr {
|
||||||
|
return ErrConnClosing
|
||||||
|
}
|
||||||
|
return nil
|
||||||
case *ping:
|
case *ping:
|
||||||
if !i.ack {
|
if !i.ack {
|
||||||
t.bdpEst.timesnap(i.data)
|
t.bdpEst.timesnap(i.data)
|
||||||
@@ -1155,7 +1163,7 @@ func (t *http2Server) closeStream(s *Stream) {
|
|||||||
t.idle = time.Now()
|
t.idle = time.Now()
|
||||||
}
|
}
|
||||||
if t.state == draining && len(t.activeStreams) == 0 {
|
if t.state == draining && len(t.activeStreams) == 0 {
|
||||||
defer t.Close()
|
defer t.controlBuf.put(&flushIO{closeTr: true})
|
||||||
}
|
}
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
// In case stream sending and receiving are invoked in separate
|
// In case stream sending and receiving are invoked in separate
|
||||||
|
|||||||
65
vendor/google.golang.org/grpc/transport/http_util.go
generated
vendored
65
vendor/google.golang.org/grpc/transport/http_util.go
generated
vendored
@@ -46,6 +46,12 @@ const (
|
|||||||
// http2IOBufSize specifies the buffer size for sending frames.
|
// http2IOBufSize specifies the buffer size for sending frames.
|
||||||
defaultWriteBufSize = 32 * 1024
|
defaultWriteBufSize = 32 * 1024
|
||||||
defaultReadBufSize = 32 * 1024
|
defaultReadBufSize = 32 * 1024
|
||||||
|
// baseContentType is the base content-type for gRPC. This is a valid
|
||||||
|
// content-type on it's own, but can also include a content-subtype such as
|
||||||
|
// "proto" as a suffix after "+" or ";". See
|
||||||
|
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||||
|
// for more details.
|
||||||
|
baseContentType = "application/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -64,7 +70,7 @@ var (
|
|||||||
http2.ErrCodeConnect: codes.Internal,
|
http2.ErrCodeConnect: codes.Internal,
|
||||||
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
|
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
|
||||||
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
||||||
http2.ErrCodeHTTP11Required: codes.FailedPrecondition,
|
http2.ErrCodeHTTP11Required: codes.Internal,
|
||||||
}
|
}
|
||||||
statusCodeConvTab = map[codes.Code]http2.ErrCode{
|
statusCodeConvTab = map[codes.Code]http2.ErrCode{
|
||||||
codes.Internal: http2.ErrCodeInternal,
|
codes.Internal: http2.ErrCodeInternal,
|
||||||
@@ -114,6 +120,7 @@ type decodeState struct {
|
|||||||
mdata map[string][]string
|
mdata map[string][]string
|
||||||
statsTags []byte
|
statsTags []byte
|
||||||
statsTrace []byte
|
statsTrace []byte
|
||||||
|
contentSubtype string
|
||||||
}
|
}
|
||||||
|
|
||||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||||
@@ -149,17 +156,44 @@ func isWhitelistedPseudoHeader(hdr string) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func validContentType(t string) bool {
|
// contentSubtype returns the content-subtype for the given content-type. The
|
||||||
e := "application/grpc"
|
// given content-type must be a valid content-type that starts with
|
||||||
if !strings.HasPrefix(t, e) {
|
// "application/grpc". A content-subtype will follow "application/grpc" after a
|
||||||
return false
|
// "+" or ";". See
|
||||||
|
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||||
|
// more details.
|
||||||
|
//
|
||||||
|
// If contentType is not a valid content-type for gRPC, the boolean
|
||||||
|
// will be false, otherwise true. If content-type == "application/grpc",
|
||||||
|
// "application/grpc+", or "application/grpc;", the boolean will be true,
|
||||||
|
// but no content-subtype will be returned.
|
||||||
|
//
|
||||||
|
// contentType is assumed to be lowercase already.
|
||||||
|
func contentSubtype(contentType string) (string, bool) {
|
||||||
|
if contentType == baseContentType {
|
||||||
|
return "", true
|
||||||
}
|
}
|
||||||
// Support variations on the content-type
|
if !strings.HasPrefix(contentType, baseContentType) {
|
||||||
// (e.g. "application/grpc+blah", "application/grpc;blah").
|
return "", false
|
||||||
if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' {
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
return true
|
// guaranteed since != baseContentType and has baseContentType prefix
|
||||||
|
switch contentType[len(baseContentType)] {
|
||||||
|
case '+', ';':
|
||||||
|
// this will return true for "application/grpc+" or "application/grpc;"
|
||||||
|
// which the previous validContentType function tested to be valid, so we
|
||||||
|
// just say that no content-subtype is specified in this case
|
||||||
|
return contentType[len(baseContentType)+1:], true
|
||||||
|
default:
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// contentSubtype is assumed to be lowercase
|
||||||
|
func contentType(contentSubtype string) string {
|
||||||
|
if contentSubtype == "" {
|
||||||
|
return baseContentType
|
||||||
|
}
|
||||||
|
return baseContentType + "+" + contentSubtype
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *decodeState) status() *status.Status {
|
func (d *decodeState) status() *status.Status {
|
||||||
@@ -247,9 +281,16 @@ func (d *decodeState) addMetadata(k, v string) {
|
|||||||
func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
|
func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
|
||||||
switch f.Name {
|
switch f.Name {
|
||||||
case "content-type":
|
case "content-type":
|
||||||
if !validContentType(f.Value) {
|
contentSubtype, validContentType := contentSubtype(f.Value)
|
||||||
return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)
|
if !validContentType {
|
||||||
|
return streamErrorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value)
|
||||||
}
|
}
|
||||||
|
d.contentSubtype = contentSubtype
|
||||||
|
// TODO: do we want to propagate the whole content-type in the metadata,
|
||||||
|
// or come up with a way to just propagate the content-subtype if it was set?
|
||||||
|
// ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
|
||||||
|
// in the metadata?
|
||||||
|
d.addMetadata(f.Name, f.Value)
|
||||||
case "grpc-encoding":
|
case "grpc-encoding":
|
||||||
d.encoding = f.Value
|
d.encoding = f.Value
|
||||||
case "grpc-status":
|
case "grpc-status":
|
||||||
|
|||||||
90
vendor/google.golang.org/grpc/transport/transport.go
generated
vendored
90
vendor/google.golang.org/grpc/transport/transport.go
generated
vendored
@@ -26,7 +26,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
@@ -247,13 +246,34 @@ type Stream struct {
|
|||||||
|
|
||||||
bytesReceived bool // indicates whether any bytes have been received on this stream
|
bytesReceived bool // indicates whether any bytes have been received on this stream
|
||||||
unprocessed bool // set if the server sends a refused stream or GOAWAY including this stream
|
unprocessed bool // set if the server sends a refused stream or GOAWAY including this stream
|
||||||
|
|
||||||
|
// contentSubtype is the content-subtype for requests.
|
||||||
|
// this must be lowercase or the behavior is undefined.
|
||||||
|
contentSubtype string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) waitOnHeader() error {
|
||||||
|
if s.headerChan == nil {
|
||||||
|
// On the server headerChan is always nil since a stream originates
|
||||||
|
// only after having received headers.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
wc := s.waiters
|
||||||
|
select {
|
||||||
|
case <-wc.ctx.Done():
|
||||||
|
return ContextErr(wc.ctx.Err())
|
||||||
|
case <-wc.goAway:
|
||||||
|
return errStreamDrain
|
||||||
|
case <-s.headerChan:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RecvCompress returns the compression algorithm applied to the inbound
|
// RecvCompress returns the compression algorithm applied to the inbound
|
||||||
// message. It is empty string if there is no compression applied.
|
// message. It is empty string if there is no compression applied.
|
||||||
func (s *Stream) RecvCompress() string {
|
func (s *Stream) RecvCompress() string {
|
||||||
if s.headerChan != nil {
|
if err := s.waitOnHeader(); err != nil {
|
||||||
<-s.headerChan
|
return ""
|
||||||
}
|
}
|
||||||
return s.recvCompress
|
return s.recvCompress
|
||||||
}
|
}
|
||||||
@@ -279,15 +299,7 @@ func (s *Stream) GoAway() <-chan struct{} {
|
|||||||
// is available. It blocks until i) the metadata is ready or ii) there is no
|
// is available. It blocks until i) the metadata is ready or ii) there is no
|
||||||
// header metadata or iii) the stream is canceled/expired.
|
// header metadata or iii) the stream is canceled/expired.
|
||||||
func (s *Stream) Header() (metadata.MD, error) {
|
func (s *Stream) Header() (metadata.MD, error) {
|
||||||
var err error
|
err := s.waitOnHeader()
|
||||||
select {
|
|
||||||
case <-s.ctx.Done():
|
|
||||||
err = ContextErr(s.ctx.Err())
|
|
||||||
case <-s.goAway:
|
|
||||||
err = errStreamDrain
|
|
||||||
case <-s.headerChan:
|
|
||||||
return s.header.Copy(), nil
|
|
||||||
}
|
|
||||||
// Even if the stream is closed, header is returned if available.
|
// Even if the stream is closed, header is returned if available.
|
||||||
select {
|
select {
|
||||||
case <-s.headerChan:
|
case <-s.headerChan:
|
||||||
@@ -313,6 +325,15 @@ func (s *Stream) ServerTransport() ServerTransport {
|
|||||||
return s.st
|
return s.st
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ContentSubtype returns the content-subtype for a request. For example, a
|
||||||
|
// content-subtype of "proto" will result in a content-type of
|
||||||
|
// "application/grpc+proto". This will always be lowercase. See
|
||||||
|
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||||
|
// more details.
|
||||||
|
func (s *Stream) ContentSubtype() string {
|
||||||
|
return s.contentSubtype
|
||||||
|
}
|
||||||
|
|
||||||
// Context returns the context of the stream.
|
// Context returns the context of the stream.
|
||||||
func (s *Stream) Context() context.Context {
|
func (s *Stream) Context() context.Context {
|
||||||
return s.ctx
|
return s.ctx
|
||||||
@@ -345,6 +366,14 @@ func (s *Stream) SetHeader(md metadata.MD) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SendHeader sends the given header metadata. The given metadata is
|
||||||
|
// combined with any metadata set by previous calls to SetHeader and
|
||||||
|
// then written to the transport stream.
|
||||||
|
func (s *Stream) SendHeader(md metadata.MD) error {
|
||||||
|
t := s.ServerTransport()
|
||||||
|
return t.WriteHeader(s, md)
|
||||||
|
}
|
||||||
|
|
||||||
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
// SetTrailer sets the trailer metadata which will be sent with the RPC status
|
||||||
// by the server. This can be called multiple times. Server side only.
|
// by the server. This can be called multiple times. Server side only.
|
||||||
func (s *Stream) SetTrailer(md metadata.MD) error {
|
func (s *Stream) SetTrailer(md metadata.MD) error {
|
||||||
@@ -424,21 +453,6 @@ func (s *Stream) GoString() string {
|
|||||||
return fmt.Sprintf("<stream: %p, %v>", s, s.method)
|
return fmt.Sprintf("<stream: %p, %v>", s, s.method)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The key to save transport.Stream in the context.
|
|
||||||
type streamKey struct{}
|
|
||||||
|
|
||||||
// newContextWithStream creates a new context from ctx and attaches stream
|
|
||||||
// to it.
|
|
||||||
func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
|
|
||||||
return context.WithValue(ctx, streamKey{}, stream)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StreamFromContext returns the stream saved in ctx.
|
|
||||||
func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
|
|
||||||
s, ok = ctx.Value(streamKey{}).(*Stream)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// state of transport
|
// state of transport
|
||||||
type transportState int
|
type transportState int
|
||||||
|
|
||||||
@@ -506,8 +520,8 @@ type TargetInfo struct {
|
|||||||
|
|
||||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||||
// and returns it to the caller.
|
// and returns it to the caller.
|
||||||
func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) {
|
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) {
|
||||||
return newHTTP2Client(ctx, target, opts, timeout)
|
return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options provides additional hints and information for message
|
// Options provides additional hints and information for message
|
||||||
@@ -545,6 +559,14 @@ type CallHdr struct {
|
|||||||
// for performance purposes.
|
// for performance purposes.
|
||||||
// If it's false, new stream will never be flushed.
|
// If it's false, new stream will never be flushed.
|
||||||
Flush bool
|
Flush bool
|
||||||
|
|
||||||
|
// ContentSubtype specifies the content-subtype for a request. For example, a
|
||||||
|
// content-subtype of "proto" will result in a content-type of
|
||||||
|
// "application/grpc+proto". The value of ContentSubtype must be all
|
||||||
|
// lowercase, otherwise the behavior is undefined. See
|
||||||
|
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||||
|
// for more details.
|
||||||
|
ContentSubtype string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientTransport is the common interface for all gRPC client-side transport
|
// ClientTransport is the common interface for all gRPC client-side transport
|
||||||
@@ -668,13 +690,13 @@ func (e ConnectionError) Origin() error {
|
|||||||
var (
|
var (
|
||||||
// ErrConnClosing indicates that the transport is closing.
|
// ErrConnClosing indicates that the transport is closing.
|
||||||
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
|
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
|
||||||
// errStreamDrain indicates that the stream is rejected by the server because
|
// errStreamDrain indicates that the stream is rejected because the
|
||||||
// the server stops accepting new RPCs.
|
// connection is draining. This could be caused by goaway or balancer
|
||||||
// TODO: delete this error; it is no longer necessary.
|
// removing the address.
|
||||||
errStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
|
errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining")
|
||||||
// StatusGoAway indicates that the server sent a GOAWAY that included this
|
// StatusGoAway indicates that the server sent a GOAWAY that included this
|
||||||
// stream's ID in unprocessed RPCs.
|
// stream's ID in unprocessed RPCs.
|
||||||
statusGoAway = status.New(codes.Unavailable, "the server stopped accepting new RPCs")
|
statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: See if we can replace StreamError with status package errors.
|
// TODO: See if we can replace StreamError with status package errors.
|
||||||
|
|||||||
@@ -31,6 +31,8 @@ import (
|
|||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
golangGrpc "google.golang.org/grpc"
|
golangGrpc "google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
grpcStatus "google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -1313,7 +1315,7 @@ func (k *kataAgent) disconnect() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := k.client.Close(); err != nil && err != golangGrpc.ErrClientConnClosing {
|
if err := k.client.Close(); err != nil && grpcStatus.Convert(err).Code() != codes.Canceled {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -237,6 +237,10 @@ func (p *gRPCProxy) ReseedRandomDev(ctx context.Context, req *pb.ReseedRandomDev
|
|||||||
return emptyResp, nil
|
return emptyResp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *gRPCProxy) GetGuestDetails(ctx context.Context, req *pb.GuestDetailsRequest) (*pb.GuestDetailsResponse, error) {
|
||||||
|
return &pb.GuestDetailsResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func gRPCRegister(s *grpc.Server, srv interface{}) {
|
func gRPCRegister(s *grpc.Server, srv interface{}) {
|
||||||
switch g := srv.(type) {
|
switch g := srv.(type) {
|
||||||
case *gRPCProxy:
|
case *gRPCProxy:
|
||||||
|
|||||||
Reference in New Issue
Block a user